From ec326f99406c0173d7a4e3ab95ac0b5e759a24c7 Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Wed, 12 Nov 2025 08:23:42 +0000 Subject: [PATCH 01/11] Dataset class and tests for LLAMA3; support for streaming, parquet Signed-off-by: savitha-eng --- bionemo-recipes/recipes/llama3/dataset.py | 176 +++++++++++ .../recipes/llama3/distributed_config.py | 47 +++ .../recipes/llama3/tests/conftest.py | 107 +++++++ .../recipes/llama3/tests/test_dataset.py | 295 ++++++++++++++++++ 4 files changed, 625 insertions(+) create mode 100644 bionemo-recipes/recipes/llama3/dataset.py create mode 100644 bionemo-recipes/recipes/llama3/distributed_config.py create mode 100644 bionemo-recipes/recipes/llama3/tests/conftest.py create mode 100644 bionemo-recipes/recipes/llama3/tests/test_dataset.py diff --git a/bionemo-recipes/recipes/llama3/dataset.py b/bionemo-recipes/recipes/llama3/dataset.py new file mode 100644 index 000000000..4e5fb2f9e --- /dev/null +++ b/bionemo-recipes/recipes/llama3/dataset.py @@ -0,0 +1,176 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +import datasets +import datasets.distributed +from torch.utils.data import DistributedSampler +from torchdata.stateful_dataloader import StatefulDataLoader +from transformers import AutoTokenizer +from transformers.data.data_collator import DataCollatorForLanguageModeling + +from distributed_config import DistributedConfig + + +logger = logging.getLogger(__name__) + + +def create_tokenized_dataset( + distributed_config: DistributedConfig, + tokenizer_path: str, + load_dataset_kwargs: dict, + max_seq_length: int = 8192, + stride: int = 200, + buffer_size: int = 500_000, + use_lazy_tokenization: bool = True, +): + """Create a tokenized dataset with windowing. + + Args: + distributed_config: The distributed configuration. + tokenizer_path: Path to the nucleotide tokenizer directory. + load_dataset_kwargs: Keyword arguments to pass to `load_dataset`. + max_seq_length: The maximum length of sequences (window size). + stride: The stride for windowing (overlap = stride tokens). + buffer_size: The buffer size for shuffle. + use_lazy_tokenization: Whether to use datasets.set_transform for tokenization. + + Returns: + Tuple of (tokenized_dataset, tokenizer). + """ + logger.info(f"Loading dataset with kwargs: {load_dataset_kwargs}") + dataset = datasets.load_dataset(**load_dataset_kwargs) + logger.info(f"Loaded dataset: {dataset}") + + # Handle DatasetDict (extract "train" split if present) + if isinstance(dataset, (datasets.DatasetDict, datasets.IterableDatasetDict)): + if "train" in dataset: + dataset = dataset["train"] + else: + raise ValueError(f"Dataset has splits {list(dataset.keys())} but no 'train' split found. " + "Please specify split='train' in load_dataset_kwargs or ensure your dataset has a 'train' split.") + + # Normalize column names - rename 'nt_sequence' to 'sequence' if present + # Only do this for non-streaming datasets (streaming datasets don't have column_names attribute) + if hasattr(dataset, "column_names") and dataset.column_names is not None: + if "nt_sequence" in dataset.column_names and "sequence" not in dataset.column_names: + logger.info("Renaming column 'nt_sequence' to 'sequence' for consistency") + dataset = dataset.rename_column("nt_sequence", "sequence") + + if isinstance(dataset, datasets.IterableDataset): + dataset = datasets.distributed.split_dataset_by_node( + dataset, + rank=distributed_config.rank, + world_size=distributed_config.world_size, + ) + dataset = dataset.shuffle(seed=42, buffer_size=buffer_size) + + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + + def tokenize_with_windowing(examples): + """Tokenize nucleotide sequences with windowing (one-to-many mapping).""" + # Tokenize with windowing using return_overflowing_tokens + result = tokenizer( + examples["sequence"], + max_length=max_seq_length, + stride=stride, + truncation=True, + return_overflowing_tokens=True, + add_special_tokens=True, + ) + return result + + if isinstance(dataset, datasets.Dataset) and use_lazy_tokenization: + # Using dataset.map on a non-streaming dataset will automatically perform and cache the transform + tokenized_dataset = dataset.with_transform(tokenize_with_windowing) + else: + tokenized_dataset = dataset.map( + tokenize_with_windowing, + batched=True, + remove_columns=dataset.column_names, + ) + + return tokenized_dataset, tokenizer + + +def create_bshd_dataloader( + distributed_config: DistributedConfig, + tokenizer_path: str, + load_dataset_kwargs: dict, + micro_batch_size: int, + num_workers: int = 0, + max_seq_length: int = 8192, + stride: int = 200, + seed: int = 42, + buffer_size: int = 500_000, + use_lazy_tokenization: bool = True, +): + """Create a BSHD dataloader for genomic sequences using CLM (causal language modeling). + + Args: + distributed_config: The distributed configuration. + tokenizer_path: Path to the nucleotide tokenizer directory. + load_dataset_kwargs: Keyword arguments to pass to `load_dataset`. + micro_batch_size: The batch size per device. + num_workers: The number of workers to use for the dataloader. + max_seq_length: The maximum length of sequences (window size). + stride: The stride for windowing (overlap = stride tokens). + seed: The seed to use for the distributed sampler and data collator. + buffer_size: The buffer size for shuffle. + use_lazy_tokenization: Whether to use datasets.set_transform for tokenization. + + Returns: + A tuple of (dataloader, dataset_or_sampler). + """ + tokenized_dataset, tokenizer = create_tokenized_dataset( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + max_seq_length=max_seq_length, + stride=stride, + buffer_size=buffer_size, + use_lazy_tokenization=use_lazy_tokenization, + ) + + if isinstance(tokenized_dataset, datasets.IterableDataset): + sampler = None + else: + sampler = DistributedSampler( + tokenized_dataset, + rank=distributed_config.rank, + num_replicas=distributed_config.world_size, + seed=seed, + ) + + # Use DataCollatorForLanguageModeling with mlm=False for CLM + data_collator = DataCollatorForLanguageModeling( + tokenizer=tokenizer, + mlm=False, # Causal language modeling (no masking) + ) + + train_dataloader = StatefulDataLoader( + tokenized_dataset, + sampler=sampler, + batch_size=micro_batch_size, + collate_fn=data_collator, + num_workers=num_workers, + pin_memory=True, + persistent_workers=num_workers > 0, + ) + + return train_dataloader, tokenized_dataset if sampler is None else sampler + diff --git a/bionemo-recipes/recipes/llama3/distributed_config.py b/bionemo-recipes/recipes/llama3/distributed_config.py new file mode 100644 index 000000000..09c1c267b --- /dev/null +++ b/bionemo-recipes/recipes/llama3/distributed_config.py @@ -0,0 +1,47 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from dataclasses import dataclass, field + + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class DistributedConfig: + """Class to track distributed ranks and handle basic distributed training setup. + + If torch distributed environment variables are not set, we set them to default values for single-process training. + + Attributes: + rank: The rank of the process. + local_rank: The local rank of the process. + world_size: The total number of processes. + """ + + rank: int = field(default_factory=lambda: int(os.environ.setdefault("RANK", "0"))) + local_rank: int = field(default_factory=lambda: int(os.environ.setdefault("LOCAL_RANK", "0"))) + world_size: int = field(default_factory=lambda: int(os.environ.setdefault("WORLD_SIZE", "1"))) + _master_addr: str = field(default_factory=lambda: os.environ.setdefault("MASTER_ADDR", "localhost")) + _master_port: str = field(default_factory=lambda: os.environ.setdefault("MASTER_PORT", "12355")) + + def is_main_process(self) -> bool: + """This is the global rank 0 process, to be used for wandb logging, etc.""" + return self.rank == 0 + + + diff --git a/bionemo-recipes/recipes/llama3/tests/conftest.py b/bionemo-recipes/recipes/llama3/tests/conftest.py new file mode 100644 index 000000000..21a4f8b85 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/tests/conftest.py @@ -0,0 +1,107 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from pathlib import Path +from unittest import mock + +import pyarrow as pa +import pyarrow.parquet as pq +import pytest +import torch +from torch.distributed.device_mesh import _mesh_resources, init_device_mesh + + +sys.path.append(Path(__file__).parent.parent.as_posix()) +sys.path.append(Path(__file__).parent.as_posix()) + +from distributed_config import DistributedConfig + + +@pytest.fixture +def recipe_path() -> Path: + """Return the root directory of the recipe.""" + return Path(__file__).parent.parent + + +@pytest.fixture(scope="session") +def mock_genomic_parquet(tmp_path_factory) -> Path: + """Create a mock genomic sequences parquet file for testing. + + This fixture creates a small parquet file with synthetic genomic sequences + that can be used for training tests without relying on external data files. + + Returns: + Path to the generated parquet file + """ + tmp_dir = tmp_path_factory.mktemp("data") + parquet_path = tmp_dir / "test_genomic_sequences.parquet" + + # Create mock genomic sequences with simple repeating patterns + # These are easy for the model to overfit to, which is perfect for sanity tests + sequences = [ + "ATCG" * 300, # 1200 bp - simple ATCG repeat + "AAAA" * 250 + "TTTT" * 250, # 2000 bp - alternating A and T blocks + "GCGC" * 200, # 800 bp - GC repeat + "ACGT" * 400, # 1600 bp - all 4 nucleotides + "TGCA" * 350, # 1400 bp - reverse pattern + ] + + # Create parquet table with 'sequence' column + table = pa.table({ + "sequence": sequences, + }) + + pq.write_table(table, parquet_path) + return parquet_path + + +@pytest.fixture(scope="session", autouse=True) +def device_mesh(): + """Create a re-usable device mesh for testing. + + This is a "auto-use", session-scope fixture so that a single device mesh is created and used in all tests. + + Megatron-FSDP throws issues when re-creating the torch device mesh in the same process, starting in the 25.09 NGC + pytorch container release. To work around this, we create a re-usable device mesh that use in all single-process + tests. + """ + # Initialize the distributed configuration, including creating the distributed process group. + dist_config = DistributedConfig() + device = torch.device(f"cuda:{dist_config.local_rank}") + torch.distributed.init_process_group(backend="nccl", device_id=device) + torch.cuda.set_device(dist_config.local_rank) + device_mesh = init_device_mesh("cuda", mesh_shape=(1, 1), mesh_dim_names=("dp", "tp")) + + # Mock these torch.distributed functions so that we re-use the same device mesh, and don't re-create or destroy the + # global process group. + with ( + mock.patch("torch.distributed.device_mesh.init_device_mesh", return_value=device_mesh), + mock.patch("torch.distributed.init_process_group", return_value=None), + mock.patch("torch.distributed.destroy_process_group", return_value=None), + ): + yield + + # At the end of all tests, destroy the process group and clear the device mesh resources. + torch.distributed.destroy_process_group() + _mesh_resources.mesh_stack.clear() + _mesh_resources.child_to_root_mapping.clear() + _mesh_resources.root_to_flatten_mapping.clear() + _mesh_resources.flatten_name_to_root_dims.clear() + _mesh_resources.mesh_dim_group_options.clear() + torch.cuda.empty_cache() + torch.cuda.synchronize() + + diff --git a/bionemo-recipes/recipes/llama3/tests/test_dataset.py b/bionemo-recipes/recipes/llama3/tests/test_dataset.py new file mode 100644 index 000000000..a8009f905 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/tests/test_dataset.py @@ -0,0 +1,295 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +from pathlib import Path + +import datasets +import pyarrow as pa +import pyarrow.parquet as pq +import pytest +import torch +from transformers import AutoTokenizer + +from dataset import create_bshd_dataloader, create_tokenized_dataset +from distributed_config import DistributedConfig + + +@pytest.fixture(scope="session") +def tokenizer_path(): + """Get the path to the nucleotide tokenizer.""" + return str(Path(__file__).parent.parent.parent.parent / "models" / "llama3" / "nucleotide_fast_tokenizer") + + +@pytest.fixture(scope="session") +def tokenizer(tokenizer_path): + """Load the nucleotide tokenizer.""" + return AutoTokenizer.from_pretrained(tokenizer_path) + + +@pytest.fixture +def simple_parquet(tmp_path): + """Create a simple Parquet file with one genomic sequence.""" + parquet_path = tmp_path / "genomic_sequences.parquet" + + # Create a minimal dataset with one 1000bp sequence + sequence = "A" * 1000 + + table = pa.table({ + "sequence": [sequence], + }) + + pq.write_table(table, parquet_path) + return str(parquet_path) + + +def test_tokenizer_loads(tokenizer_path): + """Test that the tokenizer loads correctly.""" + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + assert tokenizer is not None + assert tokenizer.vocab_size > 0 + + +def test_windowing_creates_multiple_windows_from_long_sequence(tokenizer_path): + """Test that windowing creates multiple samples from a long sequence using the tokenizer directly.""" + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + + # Create a 5kbp sequence + sequence = "A" * 5000 + + # Tokenize with windowing + result = tokenizer( + sequence, + max_length=1000, + stride=800, # 800 token overlap + truncation=True, + return_overflowing_tokens=True, + add_special_tokens=True, + ) + + # Should create multiple windows + num_windows = len(result["input_ids"]) + assert num_windows > 1, f"Expected multiple windows, got {num_windows}" + + # First and last windows may be shorter, but most should be max_length + assert len(result["input_ids"][0]) <= 1000 + + +def test_dataset_loads_and_tokenizes_sequence(tokenizer_path, tmp_path): + """Test that dataset loads and tokenizes a sequence correctly.""" + # Create a Parquet file with a single T sequence + parquet_path = tmp_path / "genomic_sequences.parquet" + sequence = "T" * 100 + table = pa.table({"sequence": [sequence]}) + pq.write_table(table, parquet_path) + + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": str(parquet_path), + "split": "train", # Explicitly request train split + } + + tokenized_dataset, tokenizer = create_tokenized_dataset( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + max_seq_length=100, + stride=50, + buffer_size=10_000, + ) + + # Access first sample + sample = tokenized_dataset[0] + assert "input_ids" in sample + assert isinstance(sample["input_ids"], list) + + # Check that it contains T tokens (ASCII 84) + # Remove BOS (2) and EOS (0) tokens + tokens = sample["input_ids"] + assert tokens[0] == 2 # BOS + assert tokens[-1] == 0 # EOS + # Middle should be all Ts (84) + assert all(t == 84 for t in tokens[1:-1]) + + +def test_dataloader_produces_correct_batch_structure(tokenizer_path, simple_parquet): + """Test that the dataloader produces batches with correct structure.""" + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": simple_parquet, + "split": "train", + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=500, + stride=100, + ) + + # Get a batch + batch = next(iter(dataloader)) + + # Check batch structure + assert "input_ids" in batch + assert "attention_mask" in batch + assert "labels" in batch + + # Check batch contains tensors + assert isinstance(batch["input_ids"], torch.Tensor) + assert isinstance(batch["attention_mask"], torch.Tensor) + assert isinstance(batch["labels"], torch.Tensor) + + # Check batch size (may vary with lazy tokenization and windowing) + assert batch["input_ids"].shape[0] >= 1, "Batch should have at least 1 sample" + + +def test_attention_mask_aligns_with_labels(tokenizer_path, simple_parquet): + """Test attention_mask correctly identifies real vs padded positions.""" + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": simple_parquet, + "split": "train", + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=500, + stride=100, + ) + + batch = next(iter(dataloader)) + + # Check that attention_mask is present and valid + attention_mask = batch["attention_mask"][0] + + # Should have some real tokens (attention_mask=1) + assert torch.sum(attention_mask) > 0, "Should have at least some real tokens" + + +def test_streaming_dataset_produces_batches(tokenizer_path, simple_parquet): + """Test that streaming mode works and produces valid batches.""" + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": simple_parquet, + "split": "train", + "streaming": True, + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=500, + stride=100, + buffer_size=10_000, + ) + + # Get a batch + batch = next(iter(dataloader)) + + # Check batch structure + assert "input_ids" in batch + assert "attention_mask" in batch + assert "labels" in batch + + # Check batch is tensors + assert isinstance(batch["input_ids"], torch.Tensor) + assert batch["input_ids"].shape[0] == 2 + + +def test_windowing_in_dataset_creates_multiple_samples(tokenizer_path, tmp_path): + """Test that the dataset's windowing creates expected number of samples.""" + # Create a 3kbp sequence + parquet_path = tmp_path / "genomic_sequences.parquet" + sequence = "A" * 3000 + table = pa.table({"sequence": [sequence]}) + pq.write_table(table, parquet_path) + + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": str(parquet_path), + "split": "train", + } + + tokenized_dataset, tokenizer = create_tokenized_dataset( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + max_seq_length=1000, + stride=800, # 800 token overlap, so 200 token step + buffer_size=10_000, + use_lazy_tokenization=False, # Use eager tokenization to expand windows + ) + + # Count samples + num_samples = len(tokenized_dataset) + + # With 3000bp sequence, max_length=1000, stride=800 (overlap) + # Should create multiple windows (at least 2) + assert num_samples >= 2, f"Expected at least 2 windows, got {num_samples}" + + +def test_lazy_tokenization_returns_batch(tokenizer_path, simple_parquet): + """Test that lazy tokenization works and returns valid batches.""" + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": simple_parquet, + "split": "train", + "streaming": False, + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=500, + stride=100, + use_lazy_tokenization=True, + ) + + # Get a batch + batch = next(iter(dataloader)) + + # Verify batch is not None and has correct structure + assert batch is not None + assert "input_ids" in batch + assert "attention_mask" in batch + assert "labels" in batch + assert isinstance(batch["input_ids"], torch.Tensor) + assert batch["input_ids"].shape[0] >= 1 # At least one sample in batch From cda848ad19cc1c9c830d0a22400de15f3f82b75d Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Thu, 13 Nov 2025 19:34:40 +0000 Subject: [PATCH 02/11] Modified tests to more throughly test multi sequence and batching scenarios per the feedback Signed-off-by: savitha-eng --- .../recipes/llama3/tests/test_dataset.py | 349 ++++++++++++------ 1 file changed, 239 insertions(+), 110 deletions(-) diff --git a/bionemo-recipes/recipes/llama3/tests/test_dataset.py b/bionemo-recipes/recipes/llama3/tests/test_dataset.py index a8009f905..2b299a62a 100644 --- a/bionemo-recipes/recipes/llama3/tests/test_dataset.py +++ b/bionemo-recipes/recipes/llama3/tests/test_dataset.py @@ -41,57 +41,35 @@ def tokenizer(tokenizer_path): @pytest.fixture def simple_parquet(tmp_path): - """Create a simple Parquet file with one genomic sequence.""" + """Create a simple Parquet file with multiple genomic sequences for testing batching.""" parquet_path = tmp_path / "genomic_sequences.parquet" - # Create a minimal dataset with one 1000bp sequence - sequence = "A" * 1000 + # Create multiple sequences of varying lengths for better batching tests + sequences = [ + "A" * 1000, + "T" * 1200, + "C" * 800, + "G" * 1500, + "ATCG" * 300, + ] table = pa.table({ - "sequence": [sequence], + "sequence": sequences, }) pq.write_table(table, parquet_path) return str(parquet_path) -def test_tokenizer_loads(tokenizer_path): - """Test that the tokenizer loads correctly.""" - tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) - assert tokenizer is not None - assert tokenizer.vocab_size > 0 - - -def test_windowing_creates_multiple_windows_from_long_sequence(tokenizer_path): - """Test that windowing creates multiple samples from a long sequence using the tokenizer directly.""" - tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) - - # Create a 5kbp sequence - sequence = "A" * 5000 - - # Tokenize with windowing - result = tokenizer( - sequence, - max_length=1000, - stride=800, # 800 token overlap - truncation=True, - return_overflowing_tokens=True, - add_special_tokens=True, - ) - - # Should create multiple windows - num_windows = len(result["input_ids"]) - assert num_windows > 1, f"Expected multiple windows, got {num_windows}" - - # First and last windows may be shorter, but most should be max_length - assert len(result["input_ids"][0]) <= 1000 - - def test_dataset_loads_and_tokenizes_sequence(tokenizer_path, tmp_path): - """Test that dataset loads and tokenizes a sequence correctly.""" - # Create a Parquet file with a single T sequence + """Test that dataset loads and tokenizes a sequence correctly with exact token verification. + + Uses single sequence so shuffling doesn't affect test (similar to SQLite test approach). + Pattern: expected_sequence = [nucleotide_id] * seqlen + """ + # Create a Parquet file with a single T sequence of known length parquet_path = tmp_path / "genomic_sequences.parquet" - sequence = "T" * 100 + sequence = "T" * 10 # Small, predictable sequence table = pa.table({"sequence": [sequence]}) pq.write_table(table, parquet_path) @@ -100,39 +78,58 @@ def test_dataset_loads_and_tokenizes_sequence(tokenizer_path, tmp_path): load_dataset_kwargs = { "path": "parquet", "data_files": str(parquet_path), - "split": "train", # Explicitly request train split + "split": "train", } tokenized_dataset, tokenizer = create_tokenized_dataset( distributed_config=distributed_config, tokenizer_path=tokenizer_path, load_dataset_kwargs=load_dataset_kwargs, - max_seq_length=100, - stride=50, + max_seq_length=20, # Large enough to fit the sequence + stride=10, buffer_size=10_000, + use_lazy_tokenization=False, # Eager to get predictable dataset ) - # Access first sample + # Only 1 sequence → 1 window → dataset[0] is predictable regardless of shuffle sample = tokenized_dataset[0] assert "input_ids" in sample - assert isinstance(sample["input_ids"], list) - # Check that it contains T tokens (ASCII 84) - # Remove BOS (2) and EOS (0) tokens + # Get nucleotides (remove BOS and EOS) tokens = sample["input_ids"] - assert tokens[0] == 2 # BOS - assert tokens[-1] == 0 # EOS - # Middle should be all Ts (84) - assert all(t == 84 for t in tokens[1:-1]) + nucleotides = tokens[1:-1] + + # Verify exact expected sequence + BOS = 2 + EOS = 0 + T = 84 # ASCII value of 'T' + + expected_sequence = [T] * 10 # All Ts + received_sequence = nucleotides + + assert tokens[0] == BOS, f"First token should be BOS (2), got {tokens[0]}" + assert tokens[-1] == EOS, f"Last token should be EOS (0), got {tokens[-1]}" + assert received_sequence == expected_sequence, \ + f"Expected {expected_sequence}, got {received_sequence}" -def test_dataloader_produces_correct_batch_structure(tokenizer_path, simple_parquet): - """Test that the dataloader produces batches with correct structure.""" +def test_dataloader_returns_expected_batch(tokenizer_path, tmp_path): + """Test dataloader returns exact expected batch with known input. + + Creates minimal test data with exactly one sequence to get deterministic output. + Verifies exact token values match expected hardcoded batch. + """ + # Create minimal test parquet with exactly 1 sequence + parquet_path = tmp_path / "single_sequence.parquet" + sequence = "A" * 5 # 5 As + table = pa.table({"sequence": [sequence]}) + pq.write_table(table, parquet_path) + distributed_config = DistributedConfig(rank=0, world_size=1) load_dataset_kwargs = { "path": "parquet", - "data_files": simple_parquet, + "data_files": str(parquet_path), "split": "train", } @@ -140,31 +137,42 @@ def test_dataloader_produces_correct_batch_structure(tokenizer_path, simple_parq distributed_config=distributed_config, tokenizer_path=tokenizer_path, load_dataset_kwargs=load_dataset_kwargs, - micro_batch_size=2, + micro_batch_size=1, # Just one sample per batch num_workers=0, - max_seq_length=500, - stride=100, + max_seq_length=10, # Large enough for 5bp sequence + stride=5, + use_lazy_tokenization=False, # Eager for deterministic behavior ) - # Get a batch - batch = next(iter(dataloader)) + returned_batch = next(iter(dataloader)) - # Check batch structure - assert "input_ids" in batch - assert "attention_mask" in batch - assert "labels" in batch + # Hardcode expected batch (1 sequence, deterministic output) + # seq: 5bp of As -> BOS + 5 As + EOS + BOS = 2 + EOS = 0 + A = 65 # ASCII value of 'A' - # Check batch contains tensors - assert isinstance(batch["input_ids"], torch.Tensor) - assert isinstance(batch["attention_mask"], torch.Tensor) - assert isinstance(batch["labels"], torch.Tensor) + expected_input_ids = torch.tensor([[BOS, A, A, A, A, A, EOS]], dtype=torch.long) + expected_labels = torch.tensor([[BOS, A, A, A, A, A, EOS]], dtype=torch.long) # CLM: labels = input_ids + expected_attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1]], dtype=torch.long) # All real tokens - # Check batch size (may vary with lazy tokenization and windowing) - assert batch["input_ids"].shape[0] >= 1, "Batch should have at least 1 sample" + assert torch.equal(returned_batch["input_ids"], expected_input_ids), \ + f"Expected input_ids {expected_input_ids}, got {returned_batch['input_ids']}" + assert torch.equal(returned_batch["labels"], expected_labels), \ + f"Expected labels {expected_labels}, got {returned_batch['labels']}" + assert torch.equal(returned_batch["attention_mask"], expected_attention_mask), \ + f"Expected attention_mask {expected_attention_mask}, got {returned_batch['attention_mask']}" def test_attention_mask_aligns_with_labels(tokenizer_path, simple_parquet): - """Test attention_mask correctly identifies real vs padded positions.""" + """Test attention_mask correctly identifies real vs padded positions in labels. + + Where attention_mask=1: labels should contain real token IDs (matching input_ids) + Where attention_mask=0: labels should contain ignore_index value (-100) + """ + # HuggingFace's DataCollatorForLanguageModeling uses -100 as ignore_index by default + IGNORE_PAD_TOKEN = -100 + distributed_config = DistributedConfig(rank=0, world_size=1) load_dataset_kwargs = { @@ -173,6 +181,7 @@ def test_attention_mask_aligns_with_labels(tokenizer_path, simple_parquet): "split": "train", } + # Use a moderate window size to ensure we get padding in batches dataloader, _ = create_bshd_dataloader( distributed_config=distributed_config, tokenizer_path=tokenizer_path, @@ -185,46 +194,38 @@ def test_attention_mask_aligns_with_labels(tokenizer_path, simple_parquet): batch = next(iter(dataloader)) - # Check that attention_mask is present and valid + # Check first sequence in batch attention_mask = batch["attention_mask"][0] - - # Should have some real tokens (attention_mask=1) - assert torch.sum(attention_mask) > 0, "Should have at least some real tokens" + labels = batch["labels"][0] + input_ids = batch["input_ids"][0] + + # Where attention_mask=1, labels should equal input_ids (real tokens) + real_positions = attention_mask == 1 + real_labels = labels[real_positions] + real_input_ids = input_ids[real_positions] + + # For CLM (Causal Language Modeling), labels should match input_ids at real positions + assert torch.all(real_labels == real_input_ids), "Labels should match input_ids at real token positions" + + # Verify specific token positions contain expected values + assert real_labels[0].item() == 2, "First token should be BOS (2)" + assert real_labels[-1].item() == 0, "Last real token should be EOS (0)" + # Middle tokens should be nucleotides (A=65, T=84, C=67, G=71) + if len(real_labels) > 2: + middle_token = real_labels[1].item() + assert middle_token in [65, 84, 67, 71], f"Nucleotide tokens should be A/T/C/G, got {middle_token}" + + # Ensure NO real position has the ignore padding value + assert torch.all(real_labels != IGNORE_PAD_TOKEN), "Real tokens should not have IGNORE_PAD_TOKEN" + + # Where attention_mask=0, labels should be IGNORE_PAD_TOKEN (-100) + padded_positions = attention_mask == 0 + if padded_positions.any(): + padded_labels = labels[padded_positions] + assert torch.all(padded_labels == IGNORE_PAD_TOKEN), \ + f"Padded positions should have IGNORE_PAD_TOKEN (-100), got {padded_labels.unique()}" -def test_streaming_dataset_produces_batches(tokenizer_path, simple_parquet): - """Test that streaming mode works and produces valid batches.""" - distributed_config = DistributedConfig(rank=0, world_size=1) - - load_dataset_kwargs = { - "path": "parquet", - "data_files": simple_parquet, - "split": "train", - "streaming": True, - } - - dataloader, _ = create_bshd_dataloader( - distributed_config=distributed_config, - tokenizer_path=tokenizer_path, - load_dataset_kwargs=load_dataset_kwargs, - micro_batch_size=2, - num_workers=0, - max_seq_length=500, - stride=100, - buffer_size=10_000, - ) - - # Get a batch - batch = next(iter(dataloader)) - - # Check batch structure - assert "input_ids" in batch - assert "attention_mask" in batch - assert "labels" in batch - - # Check batch is tensors - assert isinstance(batch["input_ids"], torch.Tensor) - assert batch["input_ids"].shape[0] == 2 def test_windowing_in_dataset_creates_multiple_samples(tokenizer_path, tmp_path): @@ -256,9 +257,9 @@ def test_windowing_in_dataset_creates_multiple_samples(tokenizer_path, tmp_path) # Count samples num_samples = len(tokenized_dataset) - # With 3000bp sequence, max_length=1000, stride=800 (overlap) - # Should create multiple windows (at least 2) - assert num_samples >= 2, f"Expected at least 2 windows, got {num_samples}" + # With 3000bp sequence, max_length=1000, stride=800 (800 overlap, 200 step) + # Formula: ceil((3000+2 - 1000) / 200) + 1 = ceil(2002/200) + 1 = 11 + 1 = 12 windows + assert num_samples == 12, f"Expected exactly 12 windows, got {num_samples}" def test_lazy_tokenization_returns_batch(tokenizer_path, simple_parquet): @@ -292,4 +293,132 @@ def test_lazy_tokenization_returns_batch(tokenizer_path, simple_parquet): assert "attention_mask" in batch assert "labels" in batch assert isinstance(batch["input_ids"], torch.Tensor) - assert batch["input_ids"].shape[0] >= 1 # At least one sample in batch + + # With lazy tokenization and windowing, batch size can vary due to on-the-fly window expansion + # Just verify we get at least one sample (lazy tokenization + windowing makes exact count unpredictable) + assert batch["input_ids"].shape[0] >= 1, \ + f"Expected at least 1 sample in batch, got {batch['input_ids'].shape[0]}" + + +@pytest.mark.parametrize("streaming", [False, True]) +def test_multiple_sequences_batch_correctly(tokenizer_path, simple_parquet, streaming): + """Test that multiple sequences batch together correctly in both streaming and non-streaming modes. + + This test catches bugs that only appear with multi-row datasets vs single-row: + - Batching/collation works with multiple sequences + - Sequences in batch are different (not duplicated) + - Padding aligns correctly across multiple sequences + - All sequences are processed across batches + - Works in both streaming=True and streaming=False modes + """ + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": simple_parquet, + "split": "train", + "streaming": streaming, + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=500, + stride=100, + buffer_size=10_000, # Only used for streaming + use_lazy_tokenization=False, + ) + + # Get first batch + batch = next(iter(dataloader)) + + # KEY TEST 1: Verify batch contains MULTIPLE sequences (not just 1) + assert batch["input_ids"].shape[0] == 2, \ + f"Batch should contain 2 sequences, got {batch['input_ids'].shape[0]}" + + # KEY TEST 2: Verify sequences in batch are DIFFERENT (catch duplication bugs) + seq1 = batch["input_ids"][0] + seq2 = batch["input_ids"][1] + assert not torch.equal(seq1, seq2), \ + "Sequences in batch should be different, not duplicates" + + # KEY TEST 3: Verify padding aligns across all tensors in batch + batch_size, seq_length = batch["input_ids"].shape + assert batch["attention_mask"].shape == (batch_size, seq_length) + assert batch["labels"].shape == (batch_size, seq_length) + + # KEY TEST 4: Verify all sequences are processed (multiple batches produced) + # With 5 sequences from simple_parquet (800-1500bp) and max_seq_length=500, + # windowing will create ~11+ windows total. With batch_size=2, expect ~5-6 batches. + # We already consumed 1 batch, so should have at least 4 remaining batches. + all_batches = list(dataloader) + total_batches = len(all_batches) + 1 # +1 for first batch already consumed + assert len(all_batches) >= 4, \ + f"Expected at least 4 remaining batches (5 total), got {len(all_batches)} remaining ({total_batches} total)" + + # KEY TEST 5: Verify subsequent batches also valid (not just first batch) + if len(all_batches) > 0: + second_batch = all_batches[0] + # Check structure is consistent across batches + assert "input_ids" in second_batch + assert "attention_mask" in second_batch + assert "labels" in second_batch + # Verify it also has multiple sequences (could be different count due to windowing) + assert second_batch["input_ids"].shape[0] >= 1, \ + f"Second batch should have at least 1 sequence, got {second_batch['input_ids'].shape[0]}" + # Verify tensors align + batch_size_2, seq_length_2 = second_batch["input_ids"].shape + assert second_batch["attention_mask"].shape == (batch_size_2, seq_length_2) + assert second_batch["labels"].shape == (batch_size_2, seq_length_2) + + +def test_batching_produces_correct_batch_size(tokenizer_path, tmp_path): + """Test that batching combines multiple sequences correctly with exact batch counts. + + Creates 5 short sequences (no windowing) with micro_batch_size=2. + Should produce exactly 3 batches with shapes: [2, 2, 1]. + """ + # Create 5 sequences that won't trigger windowing (all very short) + parquet_path = tmp_path / "five_sequences.parquet" + sequences = [ + "A" * 10, # Seq 1 + "T" * 15, # Seq 2 + "C" * 12, # Seq 3 + "G" * 8, # Seq 4 + "ATCG" * 3, # Seq 5 (12bp) + ] + table = pa.table({"sequence": sequences}) + pq.write_table(table, parquet_path) + + distributed_config = DistributedConfig(rank=0, world_size=1) + + load_dataset_kwargs = { + "path": "parquet", + "data_files": str(parquet_path), + "split": "train", + } + + dataloader, _ = create_bshd_dataloader( + distributed_config=distributed_config, + tokenizer_path=tokenizer_path, + load_dataset_kwargs=load_dataset_kwargs, + micro_batch_size=2, + num_workers=0, + max_seq_length=50, # Large enough - no windowing + stride=10, + use_lazy_tokenization=False, # Use eager to ensure predictable batching + ) + + # Collect all batches + batches = list(dataloader) + + # With 5 sequences and batch_size=2, expect exactly 3 batches: [2, 2, 1] + assert len(batches) == 3, f"Expected exactly 3 batches from 5 sequences, got {len(batches)}" + + # Check each batch has correct shape + assert batches[0]["input_ids"].shape[0] == 2, "Batch 0 should have 2 sequences" + assert batches[1]["input_ids"].shape[0] == 2, "Batch 1 should have 2 sequences" + assert batches[2]["input_ids"].shape[0] == 1, "Batch 2 should have 1 sequence (remainder)" From 0daae2a5c5bf38a24ad570b8ab07c9e1ee082b59 Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Sat, 15 Nov 2025 00:55:49 +0000 Subject: [PATCH 03/11] Make StatefulDataLoader configurable to work around pin_memory issue - Added use_stateful_dataloader parameter (defaults to False) - Switch between StatefulDataLoader and regular DataLoader - Set pin_memory=False when using StatefulDataLoader (BIONEMO-3246 workaround) - Matches ESM2 implementation pattern - All tests pass (8/8 dataset tests, 14/14 tokenizer tests) Signed-off-by: savitha-eng --- bionemo-recipes/recipes/llama3/dataset.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bionemo-recipes/recipes/llama3/dataset.py b/bionemo-recipes/recipes/llama3/dataset.py index 4e5fb2f9e..9e72d9cfc 100644 --- a/bionemo-recipes/recipes/llama3/dataset.py +++ b/bionemo-recipes/recipes/llama3/dataset.py @@ -18,7 +18,7 @@ import datasets import datasets.distributed -from torch.utils.data import DistributedSampler +from torch.utils.data import DataLoader, DistributedSampler from torchdata.stateful_dataloader import StatefulDataLoader from transformers import AutoTokenizer from transformers.data.data_collator import DataCollatorForLanguageModeling @@ -118,6 +118,7 @@ def create_bshd_dataloader( seed: int = 42, buffer_size: int = 500_000, use_lazy_tokenization: bool = True, + use_stateful_dataloader: bool = False, ): """Create a BSHD dataloader for genomic sequences using CLM (causal language modeling). @@ -132,6 +133,7 @@ def create_bshd_dataloader( seed: The seed to use for the distributed sampler and data collator. buffer_size: The buffer size for shuffle. use_lazy_tokenization: Whether to use datasets.set_transform for tokenization. + use_stateful_dataloader: Whether to use the StatefulDataLoader to enable checkpointing the dataloader state. Returns: A tuple of (dataloader, dataset_or_sampler). @@ -162,13 +164,15 @@ def create_bshd_dataloader( mlm=False, # Causal language modeling (no masking) ) - train_dataloader = StatefulDataLoader( + # TODO(BIONEMO-3246) - remove the pin_memory=False once StatefulDataLoader supports pin_memory again. + dataloader_class = StatefulDataLoader if use_stateful_dataloader else DataLoader + train_dataloader = dataloader_class( tokenized_dataset, sampler=sampler, batch_size=micro_batch_size, collate_fn=data_collator, num_workers=num_workers, - pin_memory=True, + pin_memory=True if not use_stateful_dataloader else False, persistent_workers=num_workers > 0, ) From 0293211e4a1fe41675c17691b48eac067fab2b1e Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Sat, 15 Nov 2025 02:17:51 +0000 Subject: [PATCH 04/11] Add configurable sequence_column parameter for dataset flexibility - Added sequence_column parameter to create_tokenized_dataset and create_bshd_dataloader - Defaults to 'sequence' for backwards compatibility - Supports any column name (e.g., 'Text' for arcinstitute/opengenome2, 'nt_sequence' for SQLite data) - Validates column exists with helpful error messages - Removes hardcoded nt_sequence special case - All existing tests pass (8/8) with default parameter Signed-off-by: savitha-eng --- bionemo-recipes/recipes/llama3/dataset.py | 25 ++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/bionemo-recipes/recipes/llama3/dataset.py b/bionemo-recipes/recipes/llama3/dataset.py index 9e72d9cfc..22f25fd69 100644 --- a/bionemo-recipes/recipes/llama3/dataset.py +++ b/bionemo-recipes/recipes/llama3/dataset.py @@ -37,6 +37,7 @@ def create_tokenized_dataset( stride: int = 200, buffer_size: int = 500_000, use_lazy_tokenization: bool = True, + sequence_column: str = "sequence", ): """Create a tokenized dataset with windowing. @@ -48,6 +49,7 @@ def create_tokenized_dataset( stride: The stride for windowing (overlap = stride tokens). buffer_size: The buffer size for shuffle. use_lazy_tokenization: Whether to use datasets.set_transform for tokenization. + sequence_column: Name of the column containing genomic sequences (default: "sequence"). Returns: Tuple of (tokenized_dataset, tokenizer). @@ -64,12 +66,22 @@ def create_tokenized_dataset( raise ValueError(f"Dataset has splits {list(dataset.keys())} but no 'train' split found. " "Please specify split='train' in load_dataset_kwargs or ensure your dataset has a 'train' split.") - # Normalize column names - rename 'nt_sequence' to 'sequence' if present - # Only do this for non-streaming datasets (streaming datasets don't have column_names attribute) + # Normalize column name to "sequence" for consistent processing + # Only validate and rename for non-streaming datasets (streaming datasets don't have column_names attribute) if hasattr(dataset, "column_names") and dataset.column_names is not None: - if "nt_sequence" in dataset.column_names and "sequence" not in dataset.column_names: - logger.info("Renaming column 'nt_sequence' to 'sequence' for consistency") - dataset = dataset.rename_column("nt_sequence", "sequence") + if sequence_column != "sequence": + if sequence_column not in dataset.column_names: + raise ValueError( + f"Sequence column '{sequence_column}' not found in dataset. " + f"Available columns: {dataset.column_names}" + ) + logger.info(f"Renaming column '{sequence_column}' to 'sequence' for consistency") + dataset = dataset.rename_column(sequence_column, "sequence") + elif "sequence" not in dataset.column_names: + raise ValueError( + f"Column 'sequence' not found in dataset. Available columns: {dataset.column_names}. " + f"Use sequence_column parameter to specify the correct column name." + ) if isinstance(dataset, datasets.IterableDataset): dataset = datasets.distributed.split_dataset_by_node( @@ -119,6 +131,7 @@ def create_bshd_dataloader( buffer_size: int = 500_000, use_lazy_tokenization: bool = True, use_stateful_dataloader: bool = False, + sequence_column: str = "sequence", ): """Create a BSHD dataloader for genomic sequences using CLM (causal language modeling). @@ -134,6 +147,7 @@ def create_bshd_dataloader( buffer_size: The buffer size for shuffle. use_lazy_tokenization: Whether to use datasets.set_transform for tokenization. use_stateful_dataloader: Whether to use the StatefulDataLoader to enable checkpointing the dataloader state. + sequence_column: Name of the column containing genomic sequences (default: "sequence"). Returns: A tuple of (dataloader, dataset_or_sampler). @@ -146,6 +160,7 @@ def create_bshd_dataloader( stride=stride, buffer_size=buffer_size, use_lazy_tokenization=use_lazy_tokenization, + sequence_column=sequence_column, ) if isinstance(tokenized_dataset, datasets.IterableDataset): From fdfa77377f35fe1f20b22e4720a6d204d278fb49 Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Wed, 12 Nov 2025 08:29:20 +0000 Subject: [PATCH 05/11] Training scripts, tests, and config for llama3; very similar to ESM2 native te Signed-off-by: savitha-eng --- bionemo-recipes/recipes/llama3/checkpoint.py | 566 ++++++++++++++++++ .../hydra_config/L0_convergence_ddp.yaml | 81 +++ .../hydra_config/L0_convergence_fsdp2.yaml | 81 +++ .../llama3/hydra_config/L0_sanity.yaml | 44 ++ .../recipes/llama3/hydra_config/defaults.yaml | 78 +++ bionemo-recipes/recipes/llama3/perf_logger.py | 136 +++++ bionemo-recipes/recipes/llama3/scheduler.py | 45 ++ .../recipes/llama3/tests/test_train.py | 194 ++++++ bionemo-recipes/recipes/llama3/train_ddp.py | 180 ++++++ bionemo-recipes/recipes/llama3/train_fsdp2.py | 194 ++++++ 10 files changed, 1599 insertions(+) create mode 100644 bionemo-recipes/recipes/llama3/checkpoint.py create mode 100644 bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_ddp.yaml create mode 100644 bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_fsdp2.yaml create mode 100644 bionemo-recipes/recipes/llama3/hydra_config/L0_sanity.yaml create mode 100644 bionemo-recipes/recipes/llama3/hydra_config/defaults.yaml create mode 100644 bionemo-recipes/recipes/llama3/perf_logger.py create mode 100644 bionemo-recipes/recipes/llama3/scheduler.py create mode 100644 bionemo-recipes/recipes/llama3/tests/test_train.py create mode 100644 bionemo-recipes/recipes/llama3/train_ddp.py create mode 100644 bionemo-recipes/recipes/llama3/train_fsdp2.py diff --git a/bionemo-recipes/recipes/llama3/checkpoint.py b/bionemo-recipes/recipes/llama3/checkpoint.py new file mode 100644 index 000000000..bc0906f7c --- /dev/null +++ b/bionemo-recipes/recipes/llama3/checkpoint.py @@ -0,0 +1,566 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import NamedTuple + +import torch +import torch.distributed.checkpoint as dcp +import transformers +from safetensors.torch import save_file +from torch.distributed.checkpoint.state_dict import ( + StateDictOptions, + get_model_state_dict, + get_state_dict, + set_state_dict, +) +from torch.distributed.checkpoint.stateful import Stateful +from torchdata.stateful_dataloader import StatefulDataLoader + +from distributed_config import DistributedConfig + + +logger = logging.getLogger(__name__) + + +class CheckpointOutput(NamedTuple): + """Output of checkpoint loading.""" + + model: torch.nn.Module + optimizer: torch.optim.Optimizer + scheduler: torch.optim.lr_scheduler.LRScheduler + dataloader: StatefulDataLoader | None + step: int + epoch: int + + +# ============================================================================ +# Helper functions +# ============================================================================ + + +def get_latest_checkpoint(ckpt_path: str | os.PathLike) -> tuple[Path | None, int]: + """Get the latest checkpoint path and step number. + + Returns: + Tuple of (checkpoint path, step number). + If no checkpoint files are found, returns (None, 0). + """ + ckpt_path = Path(ckpt_path) + if not ckpt_path.exists(): + return None, 0 + + checkpoints = [f for f in ckpt_path.iterdir() if f.name.startswith("step_")] + + if not checkpoints: + return None, 0 + + latest = max(checkpoints, key=lambda x: int(Path(x).stem.split("_")[1])) + step = int(Path(latest).stem.split("_")[1]) + return latest, step + + +def should_save_checkpoint(step: int, save_every_n_steps: int) -> bool: + """Determine if a checkpoint should be saved.""" + if save_every_n_steps > 0 and step % save_every_n_steps == 0 and step > 0: + return True + return False + + +# ============================================================================ +# DDP Checkpointing +# ============================================================================ + + +def load_checkpoint_ddp( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, +) -> CheckpointOutput: + """Load DDP checkpoint.""" + checkpoint_path, _ = get_latest_checkpoint(ckpt_path) + + if not checkpoint_path: + logger.info("No DDP checkpoint found, starting from scratch") + return CheckpointOutput(model, optimizer, scheduler, dataloader, 0, 0) + + checkpoint = torch.load( + checkpoint_path / "checkpoint.pt", + map_location=f"cuda:{dist_config.local_rank}", + weights_only=False, + ) + + model.load_state_dict(checkpoint["model"]) + optimizer.load_state_dict(checkpoint["optimizer"]) + scheduler.load_state_dict(checkpoint["scheduler"]) + dataloader = load_dataloader(dataloader, checkpoint_path, dist_config) + step = checkpoint["step"] + epoch = checkpoint["epoch"] + + if dist_config.is_main_process(): + logger.info(f"Loaded DDP checkpoint from step {step}") + + # Increment the step by one to avoid re-running the previous step. + return CheckpointOutput(model, optimizer, scheduler, dataloader, step + 1, epoch) + + +def save_checkpoint_ddp( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + step: int, + epoch: int, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, +) -> None: + """Saves the Dataloader state and the DDP checkpoint.""" + ckpt_path = Path(ckpt_path) + checkpoint_path = ckpt_path / f"step_{step}" + checkpoint_path.mkdir(parents=True, exist_ok=True) + + # Dataloader checkpointing needs to happen on all ranks, while DDP model checkpointing only needs to happen on the + # main process. + save_dataloader(dataloader, checkpoint_path, dist_config) + + if not dist_config.is_main_process(): + return + + torch.save( + { + "model": model.state_dict(), + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "step": step, + "epoch": epoch, + }, + checkpoint_path / "checkpoint.pt", + ) + + logger.info(f"Saved DDP checkpoint to {checkpoint_path}") + + +def save_final_model_ddp( + model: torch.nn.Module, + save_directory: str | os.PathLike, + dist_config: DistributedConfig, +) -> None: + """Save final model for DDP - only on main process.""" + if not dist_config.is_main_process(): + return + + # Unwrap model if wrapped + underlying_model: transformers.PreTrainedModel = model.module if hasattr(model, "module") else model # type: ignore + + os.makedirs(save_directory, exist_ok=True) + underlying_model.save_pretrained(save_directory, state_dict=underlying_model.state_dict(), safe_serialization=True) + logger.info(f"Saved final DDP model to {save_directory}") + + +# ============================================================================ +# mFSDP Checkpointing +# ============================================================================ + + +def load_checkpoint_mfsdp( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, +) -> CheckpointOutput: + """Load mFSDP distributed checkpoint. + + Args: + model: The model to load. + optimizer: The optimizer to load. + scheduler: The LR scheduler to load. + ckpt_path: The directory containing checkpoints. + dist_config: The distributed configuration. + dataloader: The dataloader to load. + + Returns: + Tuple of (model, optimizer, scheduler, step). + """ + checkpoint_path, step = get_latest_checkpoint(ckpt_path) + if not checkpoint_path: + logger.info("No mFSDP checkpoint found, starting from scratch") + return CheckpointOutput(model, optimizer, scheduler, dataloader, 0, 0) + + ckpt_state_dict = { + "model": model.state_dict(), + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "metadata": { + "step": step, # Initialize with current step from filename + "epoch": 0, # Initialize with default epoch + }, + } + torch.distributed.checkpoint.load(state_dict=ckpt_state_dict, checkpoint_id=checkpoint_path) + + model.load_state_dict(ckpt_state_dict["model"]) + optimizer.load_state_dict(ckpt_state_dict["optimizer"]) + scheduler.load_state_dict(ckpt_state_dict["scheduler"]) + dataloader = load_dataloader(dataloader, checkpoint_path, dist_config) + + step = ckpt_state_dict["metadata"]["step"] + epoch = ckpt_state_dict["metadata"]["epoch"] + + # Ensure all ranks have completed loading before proceeding + torch.distributed.barrier() + + logger.info(f"Loaded mFSDP checkpoint from step {step}") + + # Increment the step by one to avoid re-running the previous step. + return CheckpointOutput(model, optimizer, scheduler, dataloader, step + 1, epoch) + + +def save_checkpoint_mfsdp( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + step: int, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, + epoch: int = 0, +) -> None: + """Save mFSDP distributed checkpoint. + + Args: + model: The model to save. + optimizer: The optimizer to save. + scheduler: The LR scheduler to save. + ckpt_path: The directory to save the checkpoint. + step: The step number to save the checkpoint. + dist_config: The distributed configuration. + dataloader: The dataloader to save. + epoch: The epoch number to save the checkpoint. + """ + ckpt_path = Path(ckpt_path) + checkpoint_path = ckpt_path / f"step_{step}" + checkpoint_path.mkdir(parents=True, exist_ok=True) + + # Save dataloader state, if provided. + save_dataloader(dataloader, checkpoint_path, dist_config) + + # Save model, optimizer, scheduler state, and metadata + state_dict = { + "model": model.state_dict(), + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "metadata": { + "step": step, + "epoch": epoch, + }, + } + + torch.distributed.checkpoint.save(state_dict, checkpoint_id=checkpoint_path) + + if dist_config.is_main_process(): + logger.info(f"Saved mFSDP checkpoint to {checkpoint_path}") + + +def save_final_model_mfsdp( + model: torch.nn.Module, + save_directory: str | os.PathLike, + dist_config: DistributedConfig, +) -> None: + """Save final model for mFSDP - requires parameter gathering on all ranks.""" + from megatron_fsdp.uneven_dtensor import gather_uneven_dtensor_to_full_tensor + + if dist_config.is_main_process(): + logger.info("Starting mFSDP parameter gathering...") + + # Parameter gathering must happen on ALL processes + unsharded_state_dict = { + # Gather all parameters to CPU, and remove the "module." prefix from the Megatron-FSDP class wrapper. + k.removeprefix("module."): gather_uneven_dtensor_to_full_tensor( + v, target_device=torch.device("cpu") + ).to_local() + if isinstance(v, torch.distributed.tensor.DTensor) + else v + for k, v in model.state_dict().items() + } + + # Only main process saves the model + if not dist_config.is_main_process(): + return + + os.makedirs(save_directory, exist_ok=True) + model.module.save_pretrained(save_directory, state_dict=unsharded_state_dict, safe_serialization=True) + logger.info(f"Saved final mFSDP model to {save_directory}") + + +# ============================================================================ +# FSDP2 Checkpointing +# ============================================================================ + + +@dataclass +class AppState(Stateful): + """AppState for FSDP2 checkpoint. + + Adapted from https://docs.pytorch.org/tutorials/recipes/distributed_checkpoint_recipe.html + """ + + model: torch.nn.Module + optimizer: torch.optim.Optimizer + scheduler: torch.optim.lr_scheduler.LRScheduler + step: int = 0 + epoch: int = 0 + state_dict_options: StateDictOptions = field( + default_factory=lambda: StateDictOptions( + full_state_dict=False, + cpu_offload=True, + ) + ) + + def state_dict(self): + """Get the state dict for the model, optimizer, scheduler, and step.""" + model_state_dict, optimizer_state_dict = get_state_dict( + self.model, self.optimizer, options=self.state_dict_options + ) + return { + "model": model_state_dict, + "optim": optimizer_state_dict, + "scheduler": self.scheduler.state_dict(), + "step": self.step, + "epoch": self.epoch, + } + + def load_state_dict(self, state_dict: dict): + """Load the state dict for the model, optimizer, scheduler, and step.""" + set_state_dict( + self.model, + self.optimizer, + model_state_dict=state_dict["model"], + optim_state_dict=state_dict["optim"], + options=self.state_dict_options, + ) + self.scheduler.load_state_dict(state_dict["scheduler"]) + self.step = state_dict["step"] + self.epoch = state_dict["epoch"] + + +def load_checkpoint_fsdp2( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, +) -> CheckpointOutput: + """Load FSDP2 checkpoint. + + Args: + model: The model to load. + optimizer: The optimizer to load. + scheduler: The LR scheduler to load. + ckpt_path: The directory containing checkpoints. + dist_config: The distributed configuration. + dataloader: The dataloader to load. + """ + checkpoint_path, _ = get_latest_checkpoint(ckpt_path) + if not checkpoint_path: + logger.info("No FSDP2 checkpoint found, starting from scratch") + return CheckpointOutput(model, optimizer, scheduler, dataloader, 0, 0) + + app_state = AppState( + model=model, + optimizer=optimizer, + scheduler=scheduler, + ) + + state_dict = {"app": app_state} + dcp.load(state_dict, checkpoint_id=checkpoint_path) + + if dataloader is not None: + load_dataloader( + dataloader=dataloader, + ckpt_path=checkpoint_path, + dist_config=dist_config, + ) + + logger.info(f"Loaded distributed FSDP2 checkpoint from step {app_state.step}") + + # Increment the step by one to avoid re-running the previous step. + return CheckpointOutput(model, optimizer, scheduler, dataloader, app_state.step + 1, app_state.epoch) + + +def save_checkpoint_fsdp2( + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler.LRScheduler, + ckpt_path: str | os.PathLike, + step: int, + epoch: int, + dist_config: DistributedConfig, + dataloader: StatefulDataLoader | None = None, +) -> None: + """Save FSDP2 checkpoint. + + Args: + model: The model to save. + optimizer: The optimizer to save. + scheduler: The LR scheduler to save. + ckpt_path: The directory to save the checkpoint. + step: The step number to save the checkpoint. + epoch: The epoch number to save the checkpoint. + dist_config: The distributed configuration. + dataloader: The dataloader to save. + """ + ckpt_path = Path(ckpt_path) + checkpoint_path = ckpt_path / f"step_{step}" + checkpoint_path.mkdir(parents=True, exist_ok=True) + + if dataloader is not None: + save_dataloader( + dataloader=dataloader, + ckpt_path=checkpoint_path, + dist_config=dist_config, + ) + logger.info(f"Saved FSDP2 dataloader to {ckpt_path}") + + state_dict = { + "app": AppState( + model=model, + optimizer=optimizer, + scheduler=scheduler, + step=step, + epoch=epoch, + ) + } + dcp.save(state_dict=state_dict, checkpoint_id=checkpoint_path) + logger.info(f"Saved distributed FSDP2 checkpoint to {checkpoint_path}") + + +def save_final_model_fsdp2( + model: torch.nn.Module, + save_directory: str | os.PathLike, + dist_config: DistributedConfig, +) -> None: + """Save final model for FSDP2 - gather on all ranks, save on main.""" + # ALL ranks must participate in gathering + model_state_dict = get_model_state_dict( + model=model, + options=StateDictOptions( + full_state_dict=True, + cpu_offload=True, + ), + ) + + # Only main process saves + if not dist_config.is_main_process(): + return + + os.makedirs(save_directory, exist_ok=True) + + # Save just the weights using safetensors + save_file(model_state_dict, os.path.join(save_directory, "model.safetensors")) + + # Save the config + underlying_model = model.module if hasattr(model, "module") else model + if hasattr(underlying_model, "config"): + underlying_model.config.save_pretrained(save_directory) + + logger.info(f"Saved final FSDP2 model to {save_directory} (weights + config only)") + + +# ============================================================================ +# Dataloader Checkpointing +# ============================================================================ + + +def save_dataloader( + dataloader: StatefulDataLoader | None, + ckpt_path: str | os.PathLike, + dist_config: DistributedConfig, +): + """Save the dataloader state to a file. + + For resuming training with long epochs, we save the dataloader state as part of the checkpoint to allow for resuming + from the exact same step. Here we save the dataloader state based on global rank. Note, the total number of ranks + and dataloader num_workers should match for resuming training. + + Args: + dataloader: The dataloader to save the state of. + ckpt_path: The path to save the dataloader state to. + dist_config: The distributed configuration. + """ + if dataloader is None: + return + + ckpt_path = Path(ckpt_path) + ckpt_path.mkdir(parents=True, exist_ok=True) + dataloader_path = ckpt_path / f"dataloader_rank_{dist_config.rank}.pt" + + dataloader_state = dataloader.state_dict() + dataloader_state["num_workers"] = dataloader.num_workers + dataloader_state["num_ranks"] = dist_config.world_size + torch.save(dataloader_state, dataloader_path) + if dist_config.is_main_process(): + logger.info(f"Saved dataloader state to {dataloader_path}") + + +def load_dataloader( + dataloader: StatefulDataLoader | None, + ckpt_path: str | os.PathLike, + dist_config: DistributedConfig, +) -> StatefulDataLoader | None: + """Load the dataloader state from a file. + + Here we load the dataloader state based on global rank. + + Args: + dataloader: The dataloader to load the state of. + ckpt_path: The path to load the dataloader state from. + dist_config: The distributed configuration. + """ + if dataloader is None: + return dataloader + + dataloader_path = Path(ckpt_path) / f"dataloader_rank_{dist_config.rank}.pt" + if not dataloader_path.exists(): + logger.warning( + f"No dataloader checkpoint found for rank {dist_config.rank}, starting dataloader from scratch." + ) + return dataloader + + dataloader_state = torch.load(dataloader_path) + + if ( + dataloader.num_workers != dataloader_state["num_workers"] + or dist_config.world_size != dataloader_state["num_ranks"] + ): + logger.warning( + f"Dataloader num_workers mismatch: {dataloader.num_workers} != {dataloader_state['num_workers']} or " + f"num_ranks mismatch: {dist_config.world_size} != {dataloader_state['num_ranks']}, " + "starting dataloader from scratch." + ) + return dataloader + + dataloader.load_state_dict(dataloader_state) + if dist_config.is_main_process(): + logger.info(f"Loaded dataloader state from {dataloader_path}") + + return dataloader diff --git a/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_ddp.yaml b/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_ddp.yaml new file mode 100644 index 000000000..c191af864 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_ddp.yaml @@ -0,0 +1,81 @@ +# @package _global_ + +# Convergence test configuration for DDP with tiny Llama model (~10M params) +# Tests that the model can overfit on a small 200MB dataset +# Single GPU version + +defaults: + - defaults + - _self_ + +# Use tiny Llama config for fast convergence testing +model_tag: /workspaces/bionemo-framework/bionemo-recipes/recipes/llama3/tiny_llama_config + +# Training steps - enough to see convergence on small dataset +num_train_steps: 1000 + +# Dataset configuration - use 2MB subset +dataset: + tokenizer_path: /workspaces/bionemo-framework/bionemo-recipes/models/llama3/nucleotide_fast_tokenizer + micro_batch_size: 1 # Conservative for single GPU + num_workers: 2 + max_seq_length: 8192 # Full Llama3 context length + stride: 400 # 400bp overlap for 8K context + buffer_size: 10_000 # Smaller buffer for faster iteration + use_lazy_tokenization: true + load_dataset_kwargs: + path: "parquet" + data_files: "/workspaces/bionemo-framework/data/genomic_sequences_2mb.parquet" + split: "train" + streaming: true # Use streaming to avoid loading entire dataset into memory + +# Optimizer - higher LR for faster convergence on small model +adamw_kwargs: + lr: 5e-4 # Higher than default for faster convergence + fused: true + betas: [0.9, 0.98] + eps: 1e-8 + weight_decay: 0.01 + +# Learning rate scheduler +lr_scheduler_kwargs: + num_warmup_steps: 100 # Quick warmup (10% of training) + num_training_steps: 1000 + +# Checkpoint configuration - disabled for fast convergence testing +checkpoint: + ckpt_dir: null # No checkpoints + save_final_model: false # Don't save final model + resume_from_checkpoint: false # Start fresh for convergence test + save_every_n_steps: null # No intermediate checkpoints + +# Logging - frequent logging to track convergence +logger: + frequency: 10 # Log every 10 steps + +# WandB configuration +wandb_init_args: + project: "llama3-genomic-convergence" + name: "tiny-llama-ddp-convergence-test" + mode: "online" # Online mode for real-time dashboard + tags: + - convergence-test + - ddp + - tiny-model + - 10M-params + - single-gpu + - 8192-context + +# Meta device and torch compile +use_meta_device: false +use_torch_compile: false # Disable for debugging + +# FP8 configuration - disabled for convergence testing +fp8_config: + enabled: false + fp8_recipe: transformer_engine.common.recipe.DelayedScaling + fp8_format: "HYBRID" + fp8_recipe_kwargs: {} + fp8_model_init_kwargs: + enabled: false + diff --git a/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_fsdp2.yaml b/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_fsdp2.yaml new file mode 100644 index 000000000..a0a8d35b3 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/hydra_config/L0_convergence_fsdp2.yaml @@ -0,0 +1,81 @@ +# @package _global_ + +# Convergence test configuration for FSDP2 with tiny Llama model (~10M params) +# Tests that the model can overfit on a small 200MB dataset +# Works with single GPU (no sharding) or multi-GPU (sharded) + +defaults: + - defaults + - _self_ + +# Use tiny Llama config for fast convergence testing +model_tag: /workspaces/bionemo-framework/bionemo-recipes/recipes/llama3/tiny_llama_config + +# Training steps - enough to see convergence on small dataset +num_train_steps: 1000 + +# Dataset configuration - use 2MB subset +dataset: + tokenizer_path: /workspaces/bionemo-framework/bionemo-recipes/models/llama3/nucleotide_fast_tokenizer + micro_batch_size: 1 # Conservative for single GPU + num_workers: 2 + max_seq_length: 8192 # Full Llama3 context length + stride: 400 # 400bp overlap for 8K context + buffer_size: 10_000 # Smaller buffer for faster iteration + use_lazy_tokenization: true + load_dataset_kwargs: + path: "parquet" + data_files: "/workspaces/bionemo-framework/data/genomic_sequences_2mb.parquet" + split: "train" + streaming: true # Use streaming to avoid loading entire dataset into memory + +# Optimizer - higher LR for faster convergence on small model +adamw_kwargs: + lr: 5e-4 # Higher than default for faster convergence + fused: true + betas: [0.9, 0.98] + eps: 1e-8 + weight_decay: 0.01 + +# Learning rate scheduler +lr_scheduler_kwargs: + num_warmup_steps: 100 # Quick warmup (10% of training) + num_training_steps: 1000 + +# Checkpoint configuration - disabled for fast convergence testing +checkpoint: + ckpt_dir: null # No checkpoints + save_final_model: false # Don't save final model + resume_from_checkpoint: false # Start fresh for convergence test + save_every_n_steps: null # No intermediate checkpoints + +# Logging - frequent logging to track convergence +logger: + frequency: 10 # Log every 10 steps + +# WandB configuration +wandb_init_args: + project: "llama3-genomic-convergence" + name: "tiny-llama-fsdp2-convergence-test" + mode: "online" # Online mode for real-time dashboard + tags: + - convergence-test + - fsdp2 + - tiny-model + - 10M-params + - single-node + - 8192-context + +# Meta device and torch compile +use_meta_device: false +use_torch_compile: false # Disable for debugging + +# FP8 configuration - disabled for convergence testing +fp8_config: + enabled: false + fp8_recipe: transformer_engine.common.recipe.DelayedScaling + fp8_format: "HYBRID" + fp8_recipe_kwargs: {} + fp8_model_init_kwargs: + enabled: false + diff --git a/bionemo-recipes/recipes/llama3/hydra_config/L0_sanity.yaml b/bionemo-recipes/recipes/llama3/hydra_config/L0_sanity.yaml new file mode 100644 index 000000000..8927bd3a9 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/hydra_config/L0_sanity.yaml @@ -0,0 +1,44 @@ +defaults: + - defaults + - _self_ + +# Training config +model_tag: ./small_llama_config # Use small Llama config for testing (4 layers, 2048 hidden) +num_train_steps: 250 + +# We want this on in CI/CD to validate that the script runs successfully with torch.compile. +use_torch_compile: false # Disable for faster startup during testing + +dataset: + tokenizer_path: /workspaces/bionemo-framework/bionemo-recipes/models/llama3/nucleotide_fast_tokenizer + micro_batch_size: 1 # Small batch size for limited GPU memory + num_workers: 1 + max_seq_length: 1024 # Smaller window for testing + stride: 100 # Smaller stride for testing + buffer_size: 10_000 # Smaller buffer for testing + use_lazy_tokenization: true + load_dataset_kwargs: + path: "parquet" + split: "train" + data_files: "test_genomic_sequences.parquet" # Use local test file for now + + +# WandB config +wandb_init_args: + name: "llama3_8B_genomic_sanity" + mode: "offline" + +# Learning rate scheduler config +lr_scheduler_kwargs: + num_warmup_steps: 10 # Shorter warmup for quick testing + num_training_steps: 250 # Match num_train_steps + +checkpoint: + ckpt_dir: null + resume_from_checkpoint: true + save_every_n_steps: 50 + save_final_model: false + +logger: + frequency: 1 + diff --git a/bionemo-recipes/recipes/llama3/hydra_config/defaults.yaml b/bionemo-recipes/recipes/llama3/hydra_config/defaults.yaml new file mode 100644 index 000000000..f4e7deba1 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/hydra_config/defaults.yaml @@ -0,0 +1,78 @@ +# Training config +model_tag: ??? # E.g., meta-llama/Meta-Llama-3-8B or a local path +num_train_steps: ??? + +# TODO: Once BIONEMO-2583 and BIONEMO-2719 are fixed, enable this by default and simplify training scripts to remove the +# meta-device conditional. +use_meta_device: false + +# Whether to wrap the model in torch.compile. Note, this is currently not supported with mfsdp (BIONEMO-2977). +# We leave this off by default since we don't see much of a performance improvement with TE layers. +use_torch_compile: false + +dataset: + tokenizer_path: /workspaces/bionemo-framework/bionemo-recipes/models/llama3/nucleotide_fast_tokenizer + micro_batch_size: ??? + num_workers: 1 + max_seq_length: 8192 # Window size for genomic sequences + stride: 200 # Overlap for windowing + buffer_size: 500_000 # Shuffle buffer size + use_lazy_tokenization: true + load_dataset_kwargs: + path: "parquet" + split: "train" + streaming: True + +# WandB config +wandb_init_args: + name: ??? + +# mFSDP config +fully_shard_kwargs: + zero_dp_strategy: "optim_grads_params" + calculate_per_token_loss: false + init_model_with_meta_device: ${use_meta_device} + check_for_nan_in_grad: true + grad_reduce_in_fp32: false + preserve_fp32_weights: true + overlap_grad_reduce: true + overlap_param_gather: true + sync_model_each_microbatch: true + average_in_collective: false + +# TransformerEngine FP8 config. See +# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html for more information on +# supported formats. +fp8_config: + enabled: false + fp8_recipe: transformer_engine.common.recipe.DelayedScaling + fp8_format: "HYBRID" + fp8_recipe_kwargs: {} + fp8_model_init_kwargs: + enabled: false # If this is set to true, fp8_config.enabled must also be set to true. + +# Optimizer config +adamw_kwargs: + lr: 4e-4 + fused: true + betas: [0.9, 0.98] + eps: 1e-8 + weight_decay: 0.01 + +# Learning rate scheduler config +lr_scheduler_kwargs: + num_warmup_steps: 2_000 + num_training_steps: 500_000 + +# Checkpoint config +checkpoint: + ckpt_dir: ??? + save_final_model: true + resume_from_checkpoint: true + save_every_n_steps: 50 + +logger: + frequency: 100 + + + diff --git a/bionemo-recipes/recipes/llama3/perf_logger.py b/bionemo-recipes/recipes/llama3/perf_logger.py new file mode 100644 index 000000000..6f2f77b50 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/perf_logger.py @@ -0,0 +1,136 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time + +import torch +import torchmetrics +import torchmetrics.text +import wandb +from omegaconf import DictConfig, OmegaConf +from tqdm import tqdm +from transformers.modeling_outputs import CausalLMOutputWithPast + +from distributed_config import DistributedConfig + + +logger = logging.getLogger(__name__) + + +class PerfLogger: + """Class to log performance metrics to stdout and wandb, and print final averaged metrics at the end of training. + + Args: + dist_config: The distributed configuration. + args: The arguments. + + Attributes: + min_loss: The minimum loss seen so far. + """ + + def __init__(self, dist_config: DistributedConfig, args: DictConfig): + """Initialize the logger.""" + self._dist_config = dist_config + self._run_config = OmegaConf.to_container(args, resolve=True, throw_on_missing=True) + + self.min_loss = float("inf") + + self.logging_frequency = args.logger.frequency + # Track whether to collect memory stats (disabled by default for max performance) + + metrics_dict = { + "train/loss": torchmetrics.MeanMetric(), + "train/grad_norm": torchmetrics.MeanMetric(), + "train/learning_rate": torchmetrics.MeanMetric(), + "train/step_time": torchmetrics.MeanMetric(), + "train/tokens_per_second": torchmetrics.MeanMetric(), + "train/unpadded_tokens_per_second": torchmetrics.MeanMetric(), + "train/perplexity": torchmetrics.text.Perplexity(ignore_index=-100), + "train/gpu_memory_allocated_max_gb": torchmetrics.MaxMetric(), + "train/gpu_memory_allocated_mean_gb": torchmetrics.MeanMetric(), + } + + self.metrics = torchmetrics.MetricCollection(metrics_dict) + # We move metrics to a GPU device so we can use torch.distributed to aggregate them before logging. + self.metrics.to(torch.device(f"cuda:{dist_config.local_rank}")) + self.previous_step_time = time.perf_counter() + + if self._dist_config.is_main_process(): + # Log the entire args object to wandb for experiment tracking and reproducibility. + wandb.init(**args.wandb_init_args, config=self._run_config) + self._progress_bar = tqdm(total=args.num_train_steps, desc="Training") + + def log_step( + self, + step: int, + batch: dict[str, torch.Tensor], + outputs: CausalLMOutputWithPast, + grad_norm: float, + lr: float, + ): + """Log a step to the logger and wandb. + + Args: + step: The step number. + batch: The batch of data for the step. + outputs: The outputs of the step. + grad_norm: The gradient norm of the step. + lr: The learning rate of the step. + """ + num_tokens = batch["input_ids"].numel() + # 1 is the padding token for the nucleotide tokenizer (NeMo convention: EOS=0, PAD=1, BOS=2, UNK=3). + num_unpadded_tokens = batch["input_ids"][batch["input_ids"] != 1].numel() + + self.min_loss = min(self.min_loss, outputs.loss.item()) + step_time, self.previous_step_time = time.perf_counter() - self.previous_step_time, time.perf_counter() + + self.metrics["train/loss"].update(outputs.loss) + self.metrics["train/learning_rate"].update(lr) + self.metrics["train/grad_norm"].update(grad_norm) + self.metrics["train/step_time"].update(step_time) + self.metrics["train/tokens_per_second"].update(num_tokens / step_time) + self.metrics["train/unpadded_tokens_per_second"].update(num_unpadded_tokens / step_time) + + # Handle sequence packing for torchmetrics calculation. + if outputs.logits.dim() < 3: + outputs.logits = outputs.logits.unsqueeze(0) + + self.metrics["train/perplexity"].update(outputs.logits, batch["labels"]) + + if step % self.logging_frequency == 0 and step > 0: + memory_allocated = torch.cuda.memory_allocated() / (1024**3) + self.metrics["train/gpu_memory_allocated_max_gb"].update(memory_allocated) + self.metrics["train/gpu_memory_allocated_mean_gb"].update(memory_allocated) + + metrics = self.metrics.compute() + self.metrics.reset() + metrics["train/global_step"] = torch.tensor(step, dtype=torch.int64) + + if self._dist_config.is_main_process(): + wandb.log(metrics, step=step) + self._progress_bar.update(self.logging_frequency) + self._progress_bar.set_postfix({"loss": outputs.loss.item()}) + + if self._dist_config.local_rank == 0: + logger.info(", ".join([f"{k.split('/')[1]}: {v:.3g}" for k, v in metrics.items()])) + + def finish(self): + """Finish the logger and close the progress bar.""" + if not self._dist_config.is_main_process(): + return + + wandb.finish() + self._progress_bar.close() diff --git a/bionemo-recipes/recipes/llama3/scheduler.py b/bionemo-recipes/recipes/llama3/scheduler.py new file mode 100644 index 000000000..9f9da8da9 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/scheduler.py @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from torch.optim.lr_scheduler import LambdaLR + + +def get_linear_schedule_with_warmup( + optimizer, + num_warmup_steps=2_000, + num_training_steps=500_000, + last_epoch=-1, +): + """Linear warmup and decay scheduler for ESM-2 pretraining. + + The description from Lin 2022 is: The learning rate is warmed up over the first 2,000 steps + to a peak value of 4e-4 (1.6e-4 for the 15B parameter model), and then linearly decayed to + one tenth of its peak value over the 90% of training duration. We've found internally that a + longer warmup helps convergence for larger models (3B+) with bf16 precision. + """ + decay_steps = int(num_training_steps * 0.9) + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + # Warmup phase: linearly increase learning rate + return float(current_step) / float(max(1, num_warmup_steps)) + # Decay phase: linearly decay to one tenth of peak over 90% of training + elif current_step > decay_steps: + return 0.1 # one tenth of peak learning rate after decay period + else: + # Linear decay from 1.0 to 0.1 over decay_steps-num_warmup_steps + return 1.0 - 0.9 * (current_step - num_warmup_steps) / float(max(1, decay_steps - num_warmup_steps)) + + return LambdaLR(optimizer, lr_lambda, last_epoch) diff --git a/bionemo-recipes/recipes/llama3/tests/test_train.py b/bionemo-recipes/recipes/llama3/tests/test_train.py new file mode 100644 index 000000000..c23e04648 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/tests/test_train.py @@ -0,0 +1,194 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +import pytest +import torch +from hydra import compose, initialize_config_dir + +from train_ddp import main as main_ddp +from train_fsdp2 import main as main_fsdp2 + + +@pytest.fixture(autouse=True) +def set_seed(): + """Set random seeds for reproducibility.""" + random.seed(42) + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + + +def test_sanity_convergence_ddp(tmp_path, recipe_path, mock_genomic_parquet): + """Test that DDP training converges on mock genomic data. + + This test validates: + - The train_ddp.py script runs end-to-end without errors + - Model, optimizer, and dataloader integrate correctly + - Training converges to reasonable loss on small dataset + - Uses L0_sanity config with small model and few training steps + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + ], + ) + + final_loss = main_ddp(sanity_config) + + # For genomic Causal LM, we expect convergence to < 5.0 on the small test dataset + # The model should learn to predict simple patterns in the mock data + assert final_loss < 5.0, f"Final loss {final_loss} is too high, expected < 5.0" + + +def test_sanity_convergence_fsdp2(tmp_path, recipe_path, mock_genomic_parquet): + """Test that FSDP2 training converges on mock genomic data. + + This test validates: + - The train_fsdp2.py script runs end-to-end without errors + - FSDP2 wrapping and sharding work correctly + - Training converges to reasonable loss on small dataset + - Uses L0_sanity config with small model and few training steps + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + ], + ) + + final_loss = main_fsdp2(sanity_config) + + # FSDP2 should achieve similar convergence to DDP + assert final_loss < 5.0, f"Final loss {final_loss} is too high, expected < 5.0" + + +def test_sanity_convergence_ddp_non_streaming_dataset(tmp_path, recipe_path, mock_genomic_parquet): + """Test that DDP training works with non-streaming dataset. + + This test validates: + - The dataloader works correctly with streaming=False + - Map-style dataset integration works + - Training converges similarly to streaming mode + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + "dataset.load_dataset_kwargs.streaming=False", + ], + ) + + final_loss = main_ddp(sanity_config) + + # Non-streaming mode should converge just as well as streaming + assert final_loss < 5.0, f"Final loss {final_loss} is too high, expected < 5.0" + + +def test_sanity_convergence_fsdp2_non_streaming_dataset(tmp_path, recipe_path, mock_genomic_parquet): + """Test that FSDP2 training works with non-streaming dataset. + + This test validates: + - FSDP2 works correctly with map-style datasets + - Non-streaming mode doesn't break FSDP2 sharding + - Training converges similarly to streaming mode + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + "dataset.load_dataset_kwargs.streaming=False", + ], + ) + + final_loss = main_fsdp2(sanity_config) + + # Non-streaming mode should converge just as well as streaming + assert final_loss < 5.0, f"Final loss {final_loss} is too high, expected < 5.0" + + +def test_sanity_ddp_with_lazy_tokenization(tmp_path, recipe_path, mock_genomic_parquet): + """Test that DDP training works with lazy tokenization enabled. + + This test validates: + - Lazy tokenization (one-to-one mapping) works correctly + - Training can run with lazy tokenization + - No errors occur during forward/backward passes + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + "dataset.use_lazy_tokenization=True", + "num_train_steps=10", # Just verify it runs, don't test convergence + ], + ) + + final_loss = main_ddp(sanity_config) + + # Just check that training runs without errors + # We don't check convergence because lazy tokenization produces different windowing + assert final_loss is not None, "Training should complete and return a loss value" + + +def test_sanity_fsdp2_with_lazy_tokenization(tmp_path, recipe_path, mock_genomic_parquet): + """Test that FSDP2 training works with lazy tokenization enabled. + + This test validates: + - Lazy tokenization works with FSDP2 + - FSDP2 sharding doesn't break with lazy tokenization + - No errors occur during forward/backward passes + """ + # Run the training script with Hydra configuration overrides + with initialize_config_dir(config_dir=str(recipe_path / "hydra_config"), version_base="1.2"): + sanity_config = compose( + config_name="L0_sanity", + overrides=[ + f"+wandb_init_args.dir={tmp_path}", + f"checkpoint.ckpt_dir={tmp_path}", + f"dataset.load_dataset_kwargs.data_files={mock_genomic_parquet}", + "dataset.use_lazy_tokenization=True", + "num_train_steps=10", # Just verify it runs, don't test convergence + ], + ) + + final_loss = main_fsdp2(sanity_config) + + # Just check that training runs without errors + assert final_loss is not None, "Training should complete and return a loss value" + + diff --git a/bionemo-recipes/recipes/llama3/train_ddp.py b/bionemo-recipes/recipes/llama3/train_ddp.py new file mode 100644 index 000000000..d7a4f43b7 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/train_ddp.py @@ -0,0 +1,180 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +import hydra +import torch +import transformer_engine.pytorch +from omegaconf import DictConfig +from torch.distributed.device_mesh import init_device_mesh +from torch.optim import AdamW +from transformer_engine.common.recipe import Format +from transformers import AutoConfig, AutoModelForCausalLM + +from checkpoint import load_checkpoint_ddp, save_checkpoint_ddp, save_final_model_ddp, should_save_checkpoint +from dataset import create_bshd_dataloader +from distributed_config import DistributedConfig +from perf_logger import PerfLogger +from scheduler import get_linear_schedule_with_warmup + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +@hydra.main(config_path="hydra_config", config_name="L0_sanity", version_base="1.2") +def main(args: DictConfig) -> float | None: + """Train Llama3 with TE layers using DDP for genomic sequences. + + Returns: + float: The loss value for the final batch. + """ + # Initialize the distributed configuration, including creating the distributed process group. + dist_config = DistributedConfig() + logger.info("Initializing distributed training: %s", dist_config) + device = torch.device(f"cuda:{dist_config.local_rank}") + torch.distributed.init_process_group(backend="nccl", device_id=device) + torch.cuda.set_device(dist_config.local_rank) + + # Create a device mesh for DDP. While this isn't strictly necessary, it mirrors the device mesh we create for FSDP2 + # and MFSDP. + device_mesh = init_device_mesh("cuda", mesh_shape=(dist_config.world_size,), mesh_dim_names=("ddp",)) + + # Create an FP8 recipe -- this is only used if FP8 is enabled in the config. + fp8_recipe = hydra.utils.get_class(args.fp8_config.fp8_recipe)( + fp8_format=Format[args.fp8_config.fp8_format], **args.fp8_config.fp8_recipe_kwargs + ) + + # Create an empty Llama3 model with a causal language model head, e.g. "meta-llama/Meta-Llama-3-8B". + config = AutoConfig.from_pretrained(args.model_tag, trust_remote_code=True, dtype=torch.bfloat16) + # Use SDPA (Scaled Dot-Product Attention) to avoid materializing large causal masks + # config.attn_implementation = "sdpa" + + # Optionally use transformer engine to initialize only fp8 versions of weights by setting + # `fp8_config.fp8_model_init_kwargs.enabled` to `True`, as opposed to using the default where both bfloat16 and fp8 + # versions of weights are kept. + with transformer_engine.pytorch.fp8_model_init(recipe=fp8_recipe, **args.fp8_config.fp8_model_init_kwargs): + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) + + logger.info("Initialized Model:\n%s", model) + + # Create optimizer. + optimizer = AdamW(model.parameters(), **args.adamw_kwargs) + scheduler = get_linear_schedule_with_warmup(optimizer, **args.lr_scheduler_kwargs) + + model = model.to(device=device) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[dist_config.local_rank], + output_device=dist_config.local_rank, + device_mesh=device_mesh["ddp"], + ) + + # Create BSHD dataloader for genomic sequences. + train_dataloader, dataset_or_sampler = create_bshd_dataloader(dist_config, **args.dataset) + + if args.use_torch_compile: + # If we're using torch.compile, we need to do this before loading the checkpoint to ensure key consistency. + model = torch.compile(model) + + # If we're resuming from a checkpoint, load it and set the start step. Otherwise, start from step 0. + ckpt_path = Path(args.checkpoint.ckpt_dir) / "train_ddp" if args.checkpoint.ckpt_dir else None + if args.checkpoint.resume_from_checkpoint and ckpt_path: + model, optimizer, scheduler, train_dataloader, start_step, epoch = load_checkpoint_ddp( + model=model, + optimizer=optimizer, + scheduler=scheduler, + ckpt_path=ckpt_path, + dist_config=dist_config, + dataloader=train_dataloader, + ) + else: + start_step = 0 + epoch = 0 + + perf_logger = PerfLogger(dist_config, args) + + # Training loop + step = start_step + while step < args.num_train_steps: + for batch in train_dataloader: + batch = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()} # noqa PLW2901 + + # Forward pass with mixed precision. + with transformer_engine.pytorch.fp8_autocast(enabled=args.fp8_config.enabled, fp8_recipe=fp8_recipe): + outputs = model(**batch) + + # Backward pass. + loss = outputs.loss + loss.backward() + + # Compute and clip gradient norms. + total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0).item() + + # Step optimizer. + optimizer.step() + scheduler.step() + optimizer.zero_grad() + + perf_logger.log_step( + step=step, + batch=batch, + outputs=outputs, + grad_norm=total_norm, + lr=optimizer.param_groups[0]["lr"], + ) + + if ckpt_path and should_save_checkpoint(step, args.checkpoint.save_every_n_steps): + save_checkpoint_ddp( + model=model, + optimizer=optimizer, + scheduler=scheduler, + ckpt_path=ckpt_path, + step=step, + dist_config=dist_config, + dataloader=train_dataloader, + epoch=epoch, + ) + + step += 1 + if step >= args.num_train_steps: + break + + # Dataloader exhausted, incrementing epoch + epoch += 1 + dataset_or_sampler.set_epoch(epoch) + + # Save final model to a .safetensors file. + if args.checkpoint.save_final_model and ckpt_path: + save_final_model_ddp( + model=model, + save_directory=ckpt_path / "final_model", + dist_config=dist_config, + ) + + # Clean up distributed training + perf_logger.finish() + torch.distributed.destroy_process_group() + + return perf_logger.min_loss + + +if __name__ == "__main__": + main() + + + diff --git a/bionemo-recipes/recipes/llama3/train_fsdp2.py b/bionemo-recipes/recipes/llama3/train_fsdp2.py new file mode 100644 index 000000000..bfa4a59f8 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/train_fsdp2.py @@ -0,0 +1,194 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +import hydra +import torch +import transformer_engine.pytorch +from omegaconf import DictConfig, OmegaConf +from torch.distributed.device_mesh import init_device_mesh +from torch.distributed.fsdp import fully_shard +from torch.optim import AdamW +from transformer_engine.common.recipe import Format +from transformers import AutoConfig, AutoModelForCausalLM + +# This import seems to be needed with meta device init and AutoModel.from_config +from transformers.models.llama.modeling_llama import LlamaForCausalLM # noqa: F401 + +from checkpoint import load_checkpoint_fsdp2, save_checkpoint_fsdp2, save_final_model_fsdp2, should_save_checkpoint +from dataset import create_bshd_dataloader +from distributed_config import DistributedConfig +from perf_logger import PerfLogger +from scheduler import get_linear_schedule_with_warmup + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +@hydra.main(config_path="hydra_config", config_name="L0_sanity", version_base="1.2") +def main(args: DictConfig) -> float | None: # noqa: C901 + """Train Llama3 with TE layers using FSDP2 for genomic sequences. + + Returns: + float: The loss value for the final batch. + """ + # Initialize the distributed configuration, including creating the distributed process group. + dist_config = DistributedConfig() + logger.info("Initializing distributed training: %s", dist_config) + device = torch.device(f"cuda:{dist_config.local_rank}") + torch.distributed.init_process_group(backend="nccl", device_id=device) + torch.cuda.set_device(dist_config.local_rank) + + # Create a device mesh for FSDP. + device_mesh = init_device_mesh( + "cuda", + mesh_shape=(dist_config.world_size,), + mesh_dim_names=("dp",), + ) + + # Create an FP8 recipe -- this is only used if FP8 is enabled in the config. + fp8_recipe = hydra.utils.get_class(args.fp8_config.fp8_recipe)( + fp8_format=Format[args.fp8_config.fp8_format], **args.fp8_config.fp8_recipe_kwargs + ) + + # Create an empty Llama3 model with a causal language model head, e.g. "meta-llama/Meta-Llama-3-8B". + config = AutoConfig.from_pretrained(args.model_tag, trust_remote_code=True, dtype=torch.bfloat16) + # Use SDPA (Scaled Dot-Product Attention) to avoid materializing large causal masks + config.attn_implementation = "sdpa" + + # Optionally use transformer engine to initialize only fp8 versions of weights by setting + # `fp8_config.fp8_model_init_kwargs.enabled` to `True`, as opposed to using the default where both bfloat16 and fp8 + # versions of weights are kept. + with transformer_engine.pytorch.fp8_model_init(recipe=fp8_recipe, **args.fp8_config.fp8_model_init_kwargs): + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) + + logger.info("Initialized Model:\n%s", model) + + # Shard the transformer layers with FSDP. For Llama3, the transformer stack is in model.model.layers. + # Each decoder layer should be individually sharded before sharding the full model. + transformer_stack = model.model.layers + for layer in transformer_stack: + fully_shard(layer, mesh=device_mesh["dp"]) + fully_shard(model, mesh=device_mesh["dp"]) + + # Disable KV cache to avoid complications during training. + model.config.use_cache = False + model.model.config.use_cache = False + + # Create optimizer. Convert OmegaConf to regular dict to avoid serialization issues (BIONEMO-2873). + optimizer = AdamW(model.parameters(), **OmegaConf.to_container(args.adamw_kwargs, resolve=True)) # type: ignore + scheduler = get_linear_schedule_with_warmup(optimizer, **args.lr_scheduler_kwargs) + + if args.use_meta_device: + model.to_empty(device=device) + for module in model.modules(): + if hasattr(module, "reset_parameters"): + module.reset_parameters() + + # Create BSHD dataloader for genomic sequences. + train_dataloader, dataset_or_sampler = create_bshd_dataloader(dist_config, **args.dataset) + + if args.use_torch_compile: + # If we're using torch.compile, we need to do this before loading the checkpoint to ensure key consistency. + model = torch.compile(model) + + # If we're resuming from a checkpoint, load it and set the start step. Otherwise, start from step 0. + ckpt_path = Path(args.checkpoint.ckpt_dir) / "train_fsdp2" if args.checkpoint.ckpt_dir else None + if args.checkpoint.resume_from_checkpoint and ckpt_path: + model, optimizer, scheduler, train_dataloader, start_step, epoch = load_checkpoint_fsdp2( + model=model, + optimizer=optimizer, + scheduler=scheduler, + ckpt_path=ckpt_path, + dist_config=dist_config, + dataloader=train_dataloader, + ) + else: + start_step = 0 + epoch = 0 + + perf_logger = PerfLogger(dist_config, args) + + # Training loop + step = start_step + while step < args.num_train_steps: + for batch in train_dataloader: + batch = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()} # noqa: PLW2901 + + # Forward pass with mixed precision. + with transformer_engine.pytorch.fp8_autocast(enabled=args.fp8_config.enabled, fp8_recipe=fp8_recipe): + outputs = model(**batch) + + # Backward pass. + loss = outputs.loss + loss.backward() + + # Compute and clip gradient norms. + total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0).item() + + # Step optimizer. + optimizer.step() + scheduler.step() + optimizer.zero_grad() + + perf_logger.log_step( + step=step, + batch=batch, + outputs=outputs, + grad_norm=total_norm, + lr=optimizer.param_groups[0]["lr"], + ) + + if ckpt_path and should_save_checkpoint(step, args.checkpoint.save_every_n_steps): + save_checkpoint_fsdp2( + model=model, + optimizer=optimizer, + scheduler=scheduler, + ckpt_path=ckpt_path, + step=step, + epoch=epoch, + dist_config=dist_config, + dataloader=train_dataloader, + ) + + step += 1 + if step >= args.num_train_steps: + break + + # Dataloader exhausted, incrementing epoch + epoch += 1 + dataset_or_sampler.set_epoch(epoch) + + # Save final model to a .safetensors file. + if args.checkpoint.save_final_model and ckpt_path: + save_final_model_fsdp2( + model=model, + save_directory=ckpt_path / "final_model", + dist_config=dist_config, + ) + + # Clean up distributed training + perf_logger.finish() + torch.distributed.destroy_process_group() + + return perf_logger.min_loss + + +if __name__ == "__main__": + main() + From 21b4469e0e368356b7418fa513292bb152acc7f9 Mon Sep 17 00:00:00 2001 From: savitha-eng Date: Fri, 14 Nov 2025 01:09:06 +0000 Subject: [PATCH 06/11] address refactoring feedback Signed-off-by: savitha-eng --- .../example_small_llama_checkpoint/README.md | 35 ++ .../config.json | 26 ++ .../special_tokens_map.json | 6 + .../tokenizer.json | 396 ++++++++++++++++++ .../tokenizer_config.json | 44 ++ .../example_tiny_llama_checkpoint/README.md | 37 ++ .../example_tiny_llama_checkpoint/config.json | 26 ++ .../special_tokens_map.json | 6 + .../tokenizer.json | 396 ++++++++++++++++++ .../tokenizer_config.json | 44 ++ .../llama3/genomic_sequences_2mb.parquet | Bin 0 -> 4956020 bytes .../llama3/hydra_config/L0_convergence.yaml | 79 ++++ .../llama3/hydra_config/L0_sanity.yaml | 6 +- .../recipes/llama3/hydra_config/defaults.yaml | 4 +- .../llama3/test_genomic_sequences.parquet | Bin 0 -> 7063 bytes bionemo-recipes/recipes/llama3/train_fsdp2.py | 3 - 16 files changed, 1100 insertions(+), 8 deletions(-) create mode 100644 bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/README.md create mode 100644 bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/config.json create mode 100644 bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/special_tokens_map.json create mode 100644 bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer.json create mode 100644 bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer_config.json create mode 100644 bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/README.md create mode 100644 bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/config.json create mode 100644 bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/special_tokens_map.json create mode 100644 bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer.json create mode 100644 bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer_config.json create mode 100644 bionemo-recipes/recipes/llama3/genomic_sequences_2mb.parquet create mode 100644 bionemo-recipes/recipes/llama3/hydra_config/L0_convergence.yaml create mode 100644 bionemo-recipes/recipes/llama3/test_genomic_sequences.parquet diff --git a/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/README.md b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/README.md new file mode 100644 index 000000000..75065e156 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/README.md @@ -0,0 +1,35 @@ +# Example Small Llama3 Checkpoint + +This directory contains the model and tokenizer configuration for a small Llama3 model (~10M parameters) optimized for genomic sequences. This checkpoint is designed for testing and development purposes, allowing unit tests to run without requiring external paths or complex configuration. + +## Contents + +- **config.json**: Model configuration for a small Llama3 model (4 layers, 2048 hidden size) +- **tokenizer.json**: Fast tokenizer for nucleotide sequences (256 vocab size) +- **tokenizer_config.json**: Tokenizer configuration +- **special_tokens_map.json**: Special tokens mapping (EOS=0, PAD=1, BOS=2, UNK=3) + +## Usage + +Use this directory as the `model_tag` in your training configurations: + +```yaml +# In your hydra config +model_tag: ./example_small_llama_checkpoint + +dataset: + tokenizer_path: ./example_small_llama_checkpoint # Same directory for tokenizer +``` + +This eliminates the need for absolute paths and makes configurations portable across different environments. + +## Model Parameters + +- Layers: 4 +- Hidden size: 2048 +- Attention heads: 16 +- Intermediate size: 8192 +- Vocabulary size: 256 (nucleotide tokenizer) +- Max position embeddings: 8192 + + diff --git a/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/config.json b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/config.json new file mode 100644 index 000000000..c23211f62 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/config.json @@ -0,0 +1,26 @@ +{ + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 2, + "eos_token_id": 0, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 8192, + "max_position_embeddings": 8192, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 16, + "num_hidden_layers": 4, + "num_key_value_heads": 16, + "pad_token_id": 1, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "transformers_version": "4.57.1", + "use_cache": true, + "vocab_size": 256 +} diff --git a/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/special_tokens_map.json b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/special_tokens_map.json new file mode 100644 index 000000000..a1e19488e --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/special_tokens_map.json @@ -0,0 +1,6 @@ +{ + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "" +} diff --git a/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer.json b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer.json new file mode 100644 index 000000000..57bd6c3f1 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer.json @@ -0,0 +1,396 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 0, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 1, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 2, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 3, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + } + ], + "normalizer": null, + "pre_tokenizer": { + "type": "Split", + "pattern": { + "String": "" + }, + "behavior": "Isolated", + "invert": false + }, + "post_processor": { + "type": "TemplateProcessing", + "single": [ + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "A", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + } + ], + "pair": [ + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "A", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "B", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + } + ], + "special_tokens": { + "": { + "id": "", + "ids": [ + 2 + ], + "tokens": [ + "" + ] + }, + "": { + "id": "", + "ids": [ + 0 + ], + "tokens": [ + "" + ] + } + } + }, + "decoder": null, + "model": { + "type": "WordLevel", + "vocab": { + "": 0, + "": 1, + "": 2, + "": 3, + "\u0004": 4, + "\u0005": 5, + "\u0006": 6, + "\u0007": 7, + "\b": 8, + "\t": 9, + "\n": 10, + "\u000b": 11, + "\f": 12, + "\r": 13, + "\u000e": 14, + "\u000f": 15, + "\u0010": 16, + "\u0011": 17, + "\u0012": 18, + "\u0013": 19, + "\u0014": 20, + "\u0015": 21, + "\u0016": 22, + "\u0017": 23, + "\u0018": 24, + "\u0019": 25, + "\u001a": 26, + "\u001b": 27, + "\u001c": 28, + "\u001d": 29, + "\u001e": 30, + "\u001f": 31, + " ": 32, + "!": 33, + "\"": 34, + "#": 35, + "$": 36, + "%": 37, + "&": 38, + "'": 39, + "(": 40, + ")": 41, + "*": 42, + "+": 43, + ",": 44, + "-": 45, + ".": 46, + "/": 47, + "0": 48, + "1": 49, + "2": 50, + "3": 51, + "4": 52, + "5": 53, + "6": 54, + "7": 55, + "8": 56, + "9": 57, + ":": 58, + ";": 59, + "<": 60, + "=": 61, + ">": 62, + "?": 63, + "@": 64, + "A": 65, + "B": 66, + "C": 67, + "D": 68, + "E": 69, + "F": 70, + "G": 71, + "H": 72, + "I": 73, + "J": 74, + "K": 75, + "L": 76, + "M": 77, + "N": 78, + "O": 79, + "P": 80, + "Q": 81, + "R": 82, + "S": 83, + "T": 84, + "U": 85, + "V": 86, + "W": 87, + "X": 88, + "Y": 89, + "Z": 90, + "[": 91, + "\\": 92, + "]": 93, + "^": 94, + "_": 95, + "`": 96, + "a": 97, + "b": 98, + "c": 99, + "d": 100, + "e": 101, + "f": 102, + "g": 103, + "h": 104, + "i": 105, + "j": 106, + "k": 107, + "l": 108, + "m": 109, + "n": 110, + "o": 111, + "p": 112, + "q": 113, + "r": 114, + "s": 115, + "t": 116, + "u": 117, + "v": 118, + "w": 119, + "x": 120, + "y": 121, + "z": 122, + "{": 123, + "|": 124, + "}": 125, + "~": 126, + "": 127, + "€": 128, + "": 129, + "‚": 130, + "ƒ": 131, + "„": 132, + "…": 133, + "†": 134, + "‡": 135, + "ˆ": 136, + "‰": 137, + "Š": 138, + "‹": 139, + "Œ": 140, + "": 141, + "Ž": 142, + "": 143, + "": 144, + "‘": 145, + "’": 146, + "“": 147, + "”": 148, + "•": 149, + "–": 150, + "—": 151, + "˜": 152, + "™": 153, + "š": 154, + "›": 155, + "œ": 156, + "": 157, + "ž": 158, + "Ÿ": 159, + " ": 160, + "¡": 161, + "¢": 162, + "£": 163, + "¤": 164, + "¥": 165, + "¦": 166, + "§": 167, + "¨": 168, + "©": 169, + "ª": 170, + "«": 171, + "¬": 172, + "­": 173, + "®": 174, + "¯": 175, + "°": 176, + "±": 177, + "²": 178, + "³": 179, + "´": 180, + "µ": 181, + "¶": 182, + "·": 183, + "¸": 184, + "¹": 185, + "º": 186, + "»": 187, + "¼": 188, + "½": 189, + "¾": 190, + "¿": 191, + "À": 192, + "Á": 193, + "Â": 194, + "Ã": 195, + "Ä": 196, + "Å": 197, + "Æ": 198, + "Ç": 199, + "È": 200, + "É": 201, + "Ê": 202, + "Ë": 203, + "Ì": 204, + "Í": 205, + "Î": 206, + "Ï": 207, + "Ð": 208, + "Ñ": 209, + "Ò": 210, + "Ó": 211, + "Ô": 212, + "Õ": 213, + "Ö": 214, + "×": 215, + "Ø": 216, + "Ù": 217, + "Ú": 218, + "Û": 219, + "Ü": 220, + "Ý": 221, + "Þ": 222, + "ß": 223, + "à": 224, + "á": 225, + "â": 226, + "ã": 227, + "ä": 228, + "å": 229, + "æ": 230, + "ç": 231, + "è": 232, + "é": 233, + "ê": 234, + "ë": 235, + "ì": 236, + "í": 237, + "î": 238, + "ï": 239, + "ð": 240, + "ñ": 241, + "ò": 242, + "ó": 243, + "ô": 244, + "õ": 245, + "ö": 246, + "÷": 247, + "ø": 248, + "ù": 249, + "ú": 250, + "û": 251, + "ü": 252, + "ý": 253, + "þ": 254, + "ÿ": 255 + }, + "unk_token": "" + } +} \ No newline at end of file diff --git a/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer_config.json b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer_config.json new file mode 100644 index 000000000..5e189bec3 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_small_llama_checkpoint/tokenizer_config.json @@ -0,0 +1,44 @@ +{ + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "bos_token": "", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "extra_special_tokens": {}, + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "tokenizer_class": "PreTrainedTokenizerFast", + "unk_token": "" +} diff --git a/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/README.md b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/README.md new file mode 100644 index 000000000..54f35a6da --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/README.md @@ -0,0 +1,37 @@ +# Example Tiny Llama3 Checkpoint + +This directory contains the model and tokenizer configuration for a tiny Llama3 model (~1M parameters) optimized for fast convergence testing on genomic sequences. This checkpoint is designed for quick sanity checks and convergence tests. + +## Contents + +- **config.json**: Model configuration for a tiny Llama3 model (4 layers, 384 hidden size) +- **tokenizer.json**: Fast tokenizer for nucleotide sequences (256 vocab size) +- **tokenizer_config.json**: Tokenizer configuration +- **special_tokens_map.json**: Special tokens mapping (EOS=0, PAD=1, BOS=2, UNK=3) + +## Usage + +Use this directory as the `model_tag` in your training configurations: + +```yaml +# In your hydra config (e.g., L0_convergence configs) +model_tag: ./example_tiny_llama_checkpoint + +dataset: + tokenizer_path: ./example_tiny_llama_checkpoint # Same directory for tokenizer +``` + +This eliminates the need for absolute paths and makes configurations portable across different environments. + +## Model Parameters + +- Layers: 4 +- Hidden size: 384 +- Attention heads: 6 +- Intermediate size: 1536 +- Vocabulary size: 256 (nucleotide tokenizer) +- Max position embeddings: 8192 + +Perfect for fast convergence testing where you want to verify the model can overfit on small datasets. + + diff --git a/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/config.json b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/config.json new file mode 100644 index 000000000..4f5f1c9b4 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/config.json @@ -0,0 +1,26 @@ +{ + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 2, + "eos_token_id": 0, + "head_dim": 64, + "hidden_act": "silu", + "hidden_size": 384, + "initializer_range": 0.02, + "intermediate_size": 1536, + "max_position_embeddings": 8192, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 6, + "num_hidden_layers": 4, + "num_key_value_heads": 6, + "pad_token_id": 1, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "transformers_version": "4.57.1", + "use_cache": true, + "vocab_size": 256 +} diff --git a/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/special_tokens_map.json b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/special_tokens_map.json new file mode 100644 index 000000000..a1e19488e --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/special_tokens_map.json @@ -0,0 +1,6 @@ +{ + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "" +} diff --git a/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer.json b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer.json new file mode 100644 index 000000000..57bd6c3f1 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer.json @@ -0,0 +1,396 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 0, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 1, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 2, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 3, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + } + ], + "normalizer": null, + "pre_tokenizer": { + "type": "Split", + "pattern": { + "String": "" + }, + "behavior": "Isolated", + "invert": false + }, + "post_processor": { + "type": "TemplateProcessing", + "single": [ + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "A", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + } + ], + "pair": [ + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "A", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + }, + { + "Sequence": { + "id": "B", + "type_id": 0 + } + }, + { + "SpecialToken": { + "id": "", + "type_id": 0 + } + } + ], + "special_tokens": { + "": { + "id": "", + "ids": [ + 2 + ], + "tokens": [ + "" + ] + }, + "": { + "id": "", + "ids": [ + 0 + ], + "tokens": [ + "" + ] + } + } + }, + "decoder": null, + "model": { + "type": "WordLevel", + "vocab": { + "": 0, + "": 1, + "": 2, + "": 3, + "\u0004": 4, + "\u0005": 5, + "\u0006": 6, + "\u0007": 7, + "\b": 8, + "\t": 9, + "\n": 10, + "\u000b": 11, + "\f": 12, + "\r": 13, + "\u000e": 14, + "\u000f": 15, + "\u0010": 16, + "\u0011": 17, + "\u0012": 18, + "\u0013": 19, + "\u0014": 20, + "\u0015": 21, + "\u0016": 22, + "\u0017": 23, + "\u0018": 24, + "\u0019": 25, + "\u001a": 26, + "\u001b": 27, + "\u001c": 28, + "\u001d": 29, + "\u001e": 30, + "\u001f": 31, + " ": 32, + "!": 33, + "\"": 34, + "#": 35, + "$": 36, + "%": 37, + "&": 38, + "'": 39, + "(": 40, + ")": 41, + "*": 42, + "+": 43, + ",": 44, + "-": 45, + ".": 46, + "/": 47, + "0": 48, + "1": 49, + "2": 50, + "3": 51, + "4": 52, + "5": 53, + "6": 54, + "7": 55, + "8": 56, + "9": 57, + ":": 58, + ";": 59, + "<": 60, + "=": 61, + ">": 62, + "?": 63, + "@": 64, + "A": 65, + "B": 66, + "C": 67, + "D": 68, + "E": 69, + "F": 70, + "G": 71, + "H": 72, + "I": 73, + "J": 74, + "K": 75, + "L": 76, + "M": 77, + "N": 78, + "O": 79, + "P": 80, + "Q": 81, + "R": 82, + "S": 83, + "T": 84, + "U": 85, + "V": 86, + "W": 87, + "X": 88, + "Y": 89, + "Z": 90, + "[": 91, + "\\": 92, + "]": 93, + "^": 94, + "_": 95, + "`": 96, + "a": 97, + "b": 98, + "c": 99, + "d": 100, + "e": 101, + "f": 102, + "g": 103, + "h": 104, + "i": 105, + "j": 106, + "k": 107, + "l": 108, + "m": 109, + "n": 110, + "o": 111, + "p": 112, + "q": 113, + "r": 114, + "s": 115, + "t": 116, + "u": 117, + "v": 118, + "w": 119, + "x": 120, + "y": 121, + "z": 122, + "{": 123, + "|": 124, + "}": 125, + "~": 126, + "": 127, + "€": 128, + "": 129, + "‚": 130, + "ƒ": 131, + "„": 132, + "…": 133, + "†": 134, + "‡": 135, + "ˆ": 136, + "‰": 137, + "Š": 138, + "‹": 139, + "Œ": 140, + "": 141, + "Ž": 142, + "": 143, + "": 144, + "‘": 145, + "’": 146, + "“": 147, + "”": 148, + "•": 149, + "–": 150, + "—": 151, + "˜": 152, + "™": 153, + "š": 154, + "›": 155, + "œ": 156, + "": 157, + "ž": 158, + "Ÿ": 159, + " ": 160, + "¡": 161, + "¢": 162, + "£": 163, + "¤": 164, + "¥": 165, + "¦": 166, + "§": 167, + "¨": 168, + "©": 169, + "ª": 170, + "«": 171, + "¬": 172, + "­": 173, + "®": 174, + "¯": 175, + "°": 176, + "±": 177, + "²": 178, + "³": 179, + "´": 180, + "µ": 181, + "¶": 182, + "·": 183, + "¸": 184, + "¹": 185, + "º": 186, + "»": 187, + "¼": 188, + "½": 189, + "¾": 190, + "¿": 191, + "À": 192, + "Á": 193, + "Â": 194, + "Ã": 195, + "Ä": 196, + "Å": 197, + "Æ": 198, + "Ç": 199, + "È": 200, + "É": 201, + "Ê": 202, + "Ë": 203, + "Ì": 204, + "Í": 205, + "Î": 206, + "Ï": 207, + "Ð": 208, + "Ñ": 209, + "Ò": 210, + "Ó": 211, + "Ô": 212, + "Õ": 213, + "Ö": 214, + "×": 215, + "Ø": 216, + "Ù": 217, + "Ú": 218, + "Û": 219, + "Ü": 220, + "Ý": 221, + "Þ": 222, + "ß": 223, + "à": 224, + "á": 225, + "â": 226, + "ã": 227, + "ä": 228, + "å": 229, + "æ": 230, + "ç": 231, + "è": 232, + "é": 233, + "ê": 234, + "ë": 235, + "ì": 236, + "í": 237, + "î": 238, + "ï": 239, + "ð": 240, + "ñ": 241, + "ò": 242, + "ó": 243, + "ô": 244, + "õ": 245, + "ö": 246, + "÷": 247, + "ø": 248, + "ù": 249, + "ú": 250, + "û": 251, + "ü": 252, + "ý": 253, + "þ": 254, + "ÿ": 255 + }, + "unk_token": "" + } +} \ No newline at end of file diff --git a/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer_config.json b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer_config.json new file mode 100644 index 000000000..5e189bec3 --- /dev/null +++ b/bionemo-recipes/recipes/llama3/example_tiny_llama_checkpoint/tokenizer_config.json @@ -0,0 +1,44 @@ +{ + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "bos_token": "", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "extra_special_tokens": {}, + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "tokenizer_class": "PreTrainedTokenizerFast", + "unk_token": "" +} diff --git a/bionemo-recipes/recipes/llama3/genomic_sequences_2mb.parquet b/bionemo-recipes/recipes/llama3/genomic_sequences_2mb.parquet new file mode 100644 index 0000000000000000000000000000000000000000..087541e15ce232089208af465a6d8faaec33b054 GIT binary patch literal 4956020 zcmaI8d3+7m8$X`6=5Qy;oyaAzhX)5Y1hMZNF(L>;thF5s4N^<(``{wfT1zc0gZNPU zRutv7qSmUarIvDOs#>*mRn+(K{k?wwe*ekqx-)m?%yXXSy*%fP4yt2PYktc|KAz0VX?m0xGH?bg0cBy^Ovj>Rw@4f{fnX{{vS7tiQzjMW%BXg zaNwYda+lM?Qj%42N=im@28<~Fm~2FaC5zZfDuusI zPRYnn3ckY0Dajcs0})mD#N-sERI*A*L1iJ38lIejT2aZ#85u^ZN2wHVQ8X@^lED|{ zANWLcK|VV}iHV+!WMvrPe7WJ_DH&)Go{@~!Vkm!|g8C@(=76_6|0r4UJu@=ICM0-x z2*?#VVakXIQ-JsmNuG?9WYh}h;g$|d#zZW5^RWjS;#dAaF{l}qfogoDeOQVZhyG|8 zo}7#i#az4>mLVpEs}y|2A95%qECu$Pe{RK1bl?x(PWS=`MI~&eszz@N2~&cM8Z`CI zIZbH}Efu+>s1Q{ajXf$O#c-%eSPHd4OZWK(K@^Ml?tYGf!XZ%^yNuHEYZS#+ zT1X)ZqPnPuMIhY4_h>|a;|4Z`bC7z-=5)r22VV~}RSyU_C}+eQg^c=PDRM3Dqda<1 z77d4?n&1)5m1F2256k)h>4>H2Sg+#!AuCxPQb`ylw}~ZWhi7Dj zy)LWJXFR}Y)Wk2eOO)Y9T#gAenmQnn<7=Ry{9H6Nj#7{Cj1<+68ipy$P38{;FXmX6bhJOT=YDe@V_(OYai)A1N9 z+QG1D^VHNwtuSS;Rs;PF&@&L`lvnXIR~$eQ%tcG_RL$2djX#Z*v;e6X$3r+0C`<{s zNd?4E^#^((P7X&a4rIJGX`MdI_nF5}^1bR2ma@1CTv{cHad18q6~#P!;!LM^X^@Vj zF;rFb6X$e{vsx`dU7o*|Xe3ThDG0uI6O48(*_k6gl@nx=s!C(rk|GA%;SEc1DxxwS zb+&PoVw_L#v#O!0;dsVnN*rmfKJd|_Z2fj zf9JfWOGq?p7}CT7{KcbL{Bf$?f(&=G5e54DhnPvN0TsLmn3B!;@B#7bjlXd#a10p?Rr z=wq51mh6P=cIpgH%flG$)TD4b4nJTw`JK1LI9)-l6E%?^UMfLLgL9n`x)-XcGAJs3 zPi=rmnP&Pt_*^fEnV;8`TIm=hs66jLba(G@5|*Q(A|RU6r#Z9K6urpEL?VdVnR`7} zfFu4b;^{IfI`d)0EsTK1=ju1H#aJKAVo+(0^6CSs<$lMg=fywjGKEt?^i~UC$XNOa zbzr(FXpIThUldY*+qV#>Hqaqe!?~xb1+qn5JO=DNXiV2I5;IT?#pw?m5VHcq^s&H9 zqe^b7(^p=%tKvUQ)y=4ic!Vw(NX0FKLm!U>^p;Oj|KZV(!vfVIbjWUKz88fx^c(Hv ztV!_>7wu$@-A$Ik4*Z50)(rd^yr%d2el@ViYq`aYw(^NHl2*}u(TbsCJj-Bytra!!zM825_CY1k6XEU@l*PL2(y9iXrZ61CvInggT+6GlU~yTW z9-aw<|0Al z=kTtO#gLJDLCq0QsXU$pMq{0|cS|QTGg3Wr7gIhO<+{PUbVFq1eqql!{jFUAneLX% z*|Koj7@nyPx;M4Zy<{s)=Td$^limMloQfAUoPJasQ1R%+wAGzK;8(MVL9J-Na?o1G zidg-#TM`x(P|@C0Y;&IKLUfNZ8L5X-Wt4&|{Pr%Si3<9;tWD!-R?bE2rBTiz`+yVX zK6SdP}y#ro#0PF9crOGfw?4Qj=F=9)HIl644_C##bs(C;_0>9 z0_AX3e~crv16Q#*CkIv04@ur(qCAzwERfnS{=_VMH*G?^n-#yRpc!spD^@cy4TYbJ zw54q6o>E=$J&ld48<|9N(b<|7}3s@jh;=POPUQGSeMb7spzhbOw?yrkGjQV{VV4-I(ty6p7)^ z1ZriDh~(N&pnhh^qlT*|;x~r13Zl8b#1Pa(cVJMyii?cBTj+21f4TWtTO^4Dd=%=H zdK(=PN%zp%8pN=*lQVUoo|@T%qGEg)t4D~js4r3=%3yk^x@k2=eZ zp_L3)6>~lgbuw1VM>G{KgW0K^81s}#Un>``YKzzO8-lW#oo8g^)c2RyOX*80rl!*( zx*}q!0JN`~SBLX8r|RxbWpq$IXgdq}wJb_c@E9kdr(R)ZdFD63f6gyxj{16t{+Y?N z9}BcbS(n9Eh*CXJK^>LTs5ebB=NVWk5ZsMw<{%H2S$jk*){0^3CJd@Cx-dW7ahj-a zInD-3<1W6Xe9V|djcYt!2VLAl^jekF&*%&;_!>o^mPp|`>5C3nL^D;EOg7q385K^k z`WyEH@jJz`YZw;W5i_Yq;2t)6uMlAeZ5E5=S!U9n%C93RopnQXba#vUhl<9SuL`K0 zPIb}HIjhf@;n7H^FL-_x4s-CQ;*o!dTaS8@-~{T*Sic@eQwO4e5skXJbLph0BPM%4 z39fK#hNJ}QDB2qrnHOw2!Rb~4EpSuxV8J*&w>pEwIJy&D6fDZ$xoAnd%}_KA(6gKb zTtGv57j4Zk9=X@N?QuNLC`!fz{B8H695)dk+F$9d_)^?~qjMq1gBTRHO7V@pkk8dr zzRd($jqdg)tkGRUdoYno2A`^V$S|5)8@+?Dh)aDahNzSHNc_N(q%>#77Js7Em}m0_ z4RIH%98r$H?;o1Zin@n9l^rj`ncmN;hSbM;fC;K+>R_aBs-vkfjwTyJCpxm?OSqz1dh?GOb9b0rNX( zzC176U^Lg*8M_A$akm(bOpe<-WT*;a5PjhPRGhKq$h)YFx^}DDL)AV!$y>;ReX7v_ zH8@w6Qhqf>wg`SgP0d-6*)djAR`*3^30Xyd&lG(TduVI+yzO_z-&VGI7f&vC~q{!LgQ$TKlOKpvQ??4RAp488H`HF z)X3^R{|q`hy%4MWh+Sf>Rn*Dhay^9-PKLQU3YmJf`GX+^Rc6v}p?;Jp=Di4N>x{Nv zo0TGbZyUY_9rY))x07b$ujMY)|rLzpSkv0t^;t;8E$v55b=6wUlFoVX$yjC7MV$H+=b;1~uSr@GF_1&NsHFMTbmB1|TdMf8;Q4=$v z7{=qW^FkI;U(q&X;JDnufbx&r$zsLCM}bPBp3#dQn4P^cgjGn@_0^Ab#;oO|5itB%X6`XL*vQP55wj90zgpL7+f#pzbi zDMZuk(`=&pW4^T;xoSKriX)6cM`;$Nq`pvlQ9^EE-pzJwkzr?;y$#g`2Yq&gC`o%o zwEWOa@KSd>oZ2D4*1N59m%jETMfeLbIxWIv1ywccd(jAeoi4hJ<;mMmoy?UnTvFrp z34SCN82v<7cc5ryo{rRu=!7bmI8D6A*yE>VW>F7@yHC+X&Gc3B&>eGZl-V|t?ZIxm z`gkjjv zPrn2!Pzev=!8b|$R5#3*ix7fC!m(sM!*O==(N&@Z-guP!sITVJ0Bq78)MLgVQD3a3 z3XBb>-1_Q$^$CX1Z70F$LuVNNdgzXB6&x08-5(K7{i!axP+#SxL-suJzVR3zGSdGX zQ0V9D<^j?Qqm&PZ%Eo=U#0kSBT^ft?zN0W&ou*ViH5kvRRqi@!$Otyucf#XjimRBC z=B0nok#<{aWx6N}8z*202PM5rgSm8q2BA~nC+}eVhL-LcCY8Q=raDb!bOSnQC&6$_ z$hYCR=O`@r!Wb!asuE=>OLwAp(Nwjg!_)?`_6aOt&>CvYRAuo@|3)wH+`g)x(K)hp zO$N_!bxy5dx;lq4PHXd!NA$CY+FLm)U*inIl@EpUgjLabhWhD~{p-wfkq8$>@TWa4 z_@y+_MnupIF;i8rF8CE+$qeMfL$0b27EYofU6hTflIrW*9jyw;1?nk= ziFS4&*(Ii_A$3pLR18$3b)ud{H)Ibs_~j96_K8rHtVETiG-f)(>5UVgqROgEDoalf zX6nmUK{TN(f2R5f*Nh9!nJqs3KHcWDe@W456*W}p{wu7-)9sPxOEfp26nyPo0VTMRbT_$Rp_akVqzlL=a zr`!~3q)MSK2FO+PC$dz3l^(b{;wJUYpI61p{hWi3RZ$TSv6w}NT%pB!#2&~F0lFgU8{4pg#-)uG&rEo+Tou7W zq~ks=;U~RJTo2As@5nYfA6vMIWH1|dX3ULsTN;Ku;S^J=vC%RxRxK9~%%45NU~1Tl z@)XS$;3cKodNn#~E7G>PcFy#fS1|9bu^oqYxGj~&By!(K91WL=D>H}JgYqj4M7t`B_~l86rHGq@0rK{kap{8`nL7A^$AswO9GQ1aY;3BHmMXn zJIxePY+csNXf%qth4GM!|94dx|!P}EgDV4TKk@8C~WJxh?j+t%ndo)@W}#J zj9N=uaDf8MNFLnt7cs6w=BCrP{!$t!O2s%)uNcks2%41ncJN>N);cA6P&+z?p{j=3 zDZf^N(({U9zWu2Vh|g%A^#NTJ>%?dpk9)Y3euyg5HC7dU*-_Uzn3QCFtwi_RM7Hu|U`!8WNau|rH1X;dWmwg0rshx~h z#7J~Nj#kMz?YO>28EvVz4C=PQXcdMXDqEijmQe5NVvPS5Q^iPlta;8ZDw5O3&QMEH z4cgyO4JC4h(x$Xkx|{W!WA=$kK`%ylfsuw`4cD*5P`%b%>Y;euoVr@Cu-3{a7s%em zBvf=g>@+$WM&=;C;dIe1*bL`_S9J?jj2j2@R0==;uPA1W!l2-6RfG+AD>R@dbU}Zh z4q&O;D$2{Z$o8Kz^0P>tz+D~I<;rJVPzm@!v=Q%!QNa_Ukm?wiO&(cB`i%sYBNrH- z8pS!uHak1TKKE_?oo=IZ^dT8-nuajVT3%Y_ltEMR2Uq7G`WW3kT~{11CmBj6Hl|j3 zNx+MVnIq*S|L4BxrgPUmCu_=D)K)3{sRrOYJN~&C&Y?YzBRr0Kz7dh`JMf_;RpB{) z>Yb;**XNLHbXPy)s(Q!%-KkD>gVp`r1>(5+cKbzk>#caYrc)0} zarcV*v{#>0-GV@t|y4+{HqU*^>JH0&h*r3YLrrH8ZGl}G3cn98~lT_=D1PV z8ICjfOm=b`&?fy_Hdj${DB8JKsEr7qjemvigwb&wJ+h*jhlbEWo~+k+8JZ?P!giXYc9{L6 z@tD4%eleTzGb@hC_z-(B-nf`&P#4`0ICWkWI)^YzD9O6uF&|Zne1f#T= z=-(#}P=4g%mgs3maRaoIs-#wNcW9V>*=%l@+tltJh^fexK3d{rQb~NK_M$jV#2DivaSaEpVu>5| z9Cy1p-Xqd6&phez4e*Ev@soHbu`WktChFr3Hyic5|H})@sTNCuAGu5#`7`upZtiVl zCMf9^7yYqVcNVp&6+S{)xh^lw&BL;rI{Dg~#^3CXH&PL5Mny52 zB2k%cn%^7L)anzw4xvhj-l&5JXNr03jY|I*O%cZ(#{My=Yqs<_o6yM`BV*N1=D7&r z6N7M1&9aLze-xn|G@F;B!G_F!q5%{Fp}E-VVF z1LBu0x5W{)83RNDDtObWOR$N4CGHwi%p4Cg@E@+JxhTOsCxzCsla(q8QE|4_|Il*L zjU{1S>TW)YrmNJ@`bOC&Q9hW8W)HURYN@6Hf{?Yl|Xq)fJHyZV>pOZrlo$kNM7d$8tBB@ z`|JzOw>n8h>LD~0b;K7`QoTnRfnx4^)mf zB2=Vuge3(PGZHb-Oo}p!V!Qqa{j6TJS^ekMk>#j} zZp>4C5ZU^P9wg3*US^XhG;-gfaJK=A39?VBVfu+U5PE?6Vm{8R4fdbt8+evIQ1wHO zl^&>}-=Z|9y44sp^&Tvsp!=Tcj=CyV{H1GfG$g{=h<0G-8lyU@k%<#TJ$xbl;ad(@ z$Gt-^U7z8pyMqx_AFt#L9io|3iNY}1AFT?Kbf#bz)uO%p_bVJye^QbfoLW)1zF}TG z5N&j6Tt>+J#lx!%D_H35mX+lvs73Fn0a###IiH}qU0wWcd`T<@Fp$8^QUU9k_&11` zB1AiM3H?l!^$tKAEWvZ(QXJ0VCKfU!j?HOm4;MwbES4M5PC+>i?HG#sAFwto9U^F|?+e{sT7GWJ7u`8ph^C23m=ei!1qhd0@F$m|(fhG!zBFOKq5cR1d z43;1XIPHYPYptb)v;qm%1@6c0b2G&xYU(R0 z?tCU<@h#?ZbY{8__kGUjT z9nymXLjr=~|4$~8Ih@h)ERsv8Si2Q?2aec+$L36r7_H~B?E2i@t2SYq)7<(*ydOL- z;=Iw^rH+fhyCNaG1J;_q8o?G;6;+Jak2R|MT0}F+Wf!j;g%?zuK_frjWlc25zajM# z>?5x@N#)hA^a#SOC&sE<>VDcHbra8!Bnt%#JA5oKP9H^6yS-Yjhf;HMT2yMqz+v=2 z6Q`s(FGBX#g7P!-SvX49k)t+qvJF$({+mwHz`!mHz!$k6@Hs`%&*4O-%;0aqPSg=Y zG1}VTv^jsq+erLHwd89Y#m7brr{6B4mC8h>8p8`lT|!O#Q#jm1vy#kH9@;EsbDV#o zi>!lk(;Cs-)H`ZFRR}c4By^xF6sLYSZs45P#3r|rQ%==aM@1d;rI*5(m5T{#&pzY)VP6yJY%5BVK4X?fpu18W>*l~CDKg;!`@4rRPvK-|I+O?4;tFO`KM zw8UzNnB4r%7OwJ&&S5-3ThYmT*sWp>SAYgo4arxKV7YX*BcFR_C~(nGO_Iytc@ zOCRfJ;*nTRHBxJ-y}p7TYAeW^C8ijZEc&WWDpiAeRPkr#tsoB`BI7o3hB zRS|kj+i?m#RSAxWS4?tG$st(4ovLzlQtbXShUb=o+_aTIsJKbMC~Qi=_F=#VY%*@r`&JBkhCwJ(0(x za~@69CY9uta0dE!lAk`tKMXqFi1}d-u}%EX86gs!`81Jcik|-E@?ZJHKbx-2pPfCO zyv_@wC7!DYJ%#i0wA!88%il{qVY$Cm+|#Y7jrv@d5o4&9aoptP2?tt2poXP>j<@>|_A0%w%3lTwwb+*qQ6Sq3+`(?Q#65 zuL9x>MjJy>MobaIXr=nyAMIVG7O1I|$(27?B`F5UP%r(AIxy2L9PX1Z5I zsqUIo9b#`#`dk?uU)HS4JjYC=dEvD08Mxj#7#Lt7R^nLras7QTP z4f@h8t0tO*BSlxv!&>wiSAH-5c)ONrm&353Z&5z^75L)Ch4t9a7Wp??i1*bnUu7fbcWj`+`k~!QRSU&91*}=fWOdCQbsQJHu$i_q@D0Nd z%}oaFljYQhv;=E1~kJF^bfyfy}+ubn9>UQCl^pt=wbYqrXFU-6+1jVGNb; z@PRcd*7?!yI{&(}(|U?QNHD^g<`&ZRw5m+o6Hp&f_*5-a-{SyE>YnIN8^u0Zh5;i9 z?c_*zF+SJ%-L9C0+p4!3%r@;)=P1v_PpE)C;#F_}&Ba0P>Ijvl?z?Z+73cAijCzR% zh}BI{6kV|~l^gn^qg~MY6lA^?ff~9ieSwaFRqPn1s*cjj4cSzDr0*MNnJA0kG{xW( zJfk1|*K_h=jJ#)EL82~?bXqEJpb`VtYRp#O$wPXajtZ_Z>M+!Bl?pZG7p#$w(w;zNro?+#ma0M_mVC|4e9`z0A5js+gaplS zqNxYQ=;^uDutF6lHodZ-n2{H6PV~?unJ$O2X_&xc&vvh-9igxAXW%rgv?>JK_F%oa_t zh`NY4(F#sXf2%%xZpqZ;BEb+wb^61+>Oog(;((rb4Re!Mox>D6PX2}GNF)P^?iI6Q zlqlv7wM&{?d5@kb?%d|?{{gknoZ`tbRcFS_IF;YMjDaG_N#>_Y7t?Sw$3!7fp2D4R zP*g+i$l59^a3Ae4Mo~?%i@pcgRthmLI27snJG!Fk@rJ*E9E8qll3v4Ge+TI2Xsy4; zRkaKM6Mu00tEmoXqBrP?SmX?2{v`2_Y^bX_ABk@E1b?N#`J74S8LybgF66G-P3LJU zrCI+%aZ_kDJ;3`;6?!`qX^k>(8?5vM?FgjmS%USeu@&99MjFe~_Fj}?vE0ee3BZbb z94~rFOW`z1Sk3HJbXrwnGTuc!sgS&neR>YHqQ1hQCDh7!Oh34_RkG8X;}IhzlIcr~ zbcWai)o!uZNkg3Y0CBWY4{%1}U%cijUdhm2ik0D2XQJY5KJ1*Eu`KDCvqH2G-JA)O z4@IoHMg&M*j`5&@xhIMuMXY#?N7Ru@sf5sMS;iTGri`3lqHRuB9AWVK2W7>(MAkRh zV=OU#CeP^u3|qTg8_%hlXr#{)RD=~Ss;g>S2F8syFv``?28&j{$d`0ad_o~gbZ;30 z8FI%F=z({kE^HJ^T4hd(keRG4gnS`4%JoipxgM`Y9ZE$yTBDp>2J_Sk{W}ahUOjid zqpr-x<;?^mltumN3@sDyiLFj1^_Bm_A$oxytp2(X^^B_$DUx(OMCi@Vr;gyV@GuUR z;5D2gYzBYS189|)!g}T)Mq!$`DOZb=D1^7Ukh-}4;RD{vmB4LSlHuZenr@w>0{B7x zg8U-W{M!qS_vjNwneFI?1x`VNdDufEQIej!S(G87n2r>h>jX^21$adQmZFDQ&1+VP zP^HZ3k#@BFddoh#K(%O)AW<@K+Ib5lP>APhKp+yan4;ebEk`!>wmXU#=bU$nt_WN1 zW<}isZ>t#nN(~ZCs9xIJ@>OV}oopWSsJeJttfp03Q~B&Dwwt}h1YH78M3V=FeC<7{ z^Dxc&+@Gfs(L>kw4Je|9+WA>S52jz}dmY2%^8_WWDL7{M+{P}U2%W$~CypgqJ$2hm z@leCut$I48bGzx4-lorDgQym|Vx|;wqo@Pj%knuMvsz@R4~DZ4`yjOyJyhFuIBlW* zR(o!=Y|TD`x(ud0i1cd?cPQ9gmK48R>veOSWP|AZ7D&c6Iyi;Dc}U)S8pQTIcF**<2?)uwg|42!$vg0$EZnX#6+L; zQFZZ)$Z!7Pff2(?VPcA|6$)}WH0EF*;rVZ5v{0+`6zUf2EZ<2SL6`jvncT{`(YhwR z>-V5__88IHt)+kD)cQtDL<4I-v(kF|WBS5uU>HxBTh5`QyMQ8{gZ>t3Jid_&safDN zt}H*w>u<0H#poZk8=cKhJ@B~Sp_eF1)!6{Wi_)}OztZ`*Wbd&m%hG0ME%lk$Ef3L; zB1I*IHoIP`p@vf;49r)Z`MJN!DG%}{SUtH({g_&+G(=Hb@S2_dcHTVsjM3*)_n};= zCb%xn%5QL7&7*o;yY&R&l-`Zxys`Q*o#(;5qqH%tNaOKmDp)(UkFL1cTBt zq|+hQhudV8by1qaH2RFwb{t%8y9`EI-PNR17e8wgBG>t#PV z*WbtbNi5U<`c8Z3krdWEKE1N{wLKj!jaIX0G0I5g9K=Lf7(<;*cLQ$NXSZ+2c}umS za9Wj@&ZSs~n)uQT=LocDnDiZbn;%Dro^%e2R5Lf*x|m*!s;Qp7-X1Fs^+bJZ9Co^I zaS}UB8Rzgqg?PjrBvVyADn)+gY*+7OaWd589l7Um%zlb*bW2{VX@t)Bf??+U z%n15|UUFWpp^0pdF0s6BE;pi)nO?{kW+oY_S)u1f2VapgoQkDUgxg>pd}$HtveQv? ziTxJun`0y0p<+HG(JkypJ8Ftz{;A&Q;vPql^QpfXYnZ)xQw>@e`>prIBJ~|t-!8_7 zt#mlh3IXi!*Vk{W)*OT#)_c}n?ruD${ghy|!;k7Frc&3;24>$F;|lJiUJ-vM_Mzrt z3KptH>JneRgeSQB)R!4Ztv4o)9b@l9Q7B` zpJD+0%-P<^e)E>pXJ*LDWWLu|H(EeX*S*#1v?e(X@FBu=U*{dF^^?5u{YXT;I5eAO3R;g`oF-LniYt~VvLmPEn&J}LO$+yU>{hTMSg5}&#cv{=h zAl=bTP-BrTE>SKQY6mptZpTI%VIOfXLs(_3t=z+IuAXBb4@mnyLrVdis?6yE8oMrW{W;2kB3yWTHehF+B`h zR6*F>HOW-hj949yr}PSC&010N9u3X8X59!x>j4<2C%J{x$8v74xR{0v8BTrClXvh| zF@DEJuIM;w%*x(XZ?=jyqDi!Y4q~vgnA&sOyd^jFwzAc!NME!1Xr$^glog{I*yu{c z^ECED6A3+5Ofi!@Y;}66Bj}5DB4GS3zVJ`csay}?=8i~A%^4&9RtB}!8>yX2po+Yw z`|F(Bfm`V5{SE)98Gwq`{}W$0AJEvGMDej2CKfT%^e`BeFKCawj||p(zlrI-g3&Ze zMv@=%P*U__Y1<(88K+lI{iO}6h?jNjR~cR-uqQI*KAkh+vPIaYCN5pT<>ZK5<9@O2mB7?!Z0 z{+W6>w=jpL!gN&xRkT6d-7F{1@6bTvl%o5{R(+gjX3dD4r>M;QQ;J-)_Gbo*(-5_f zYO6GHUC$3h89IXyEmLMsA$Z{Mh8!j9MY+^M90 zq=E?Wm5YjHn3cn*sG4`4ojaf30-b;{qCa-1)%4m;L}Tj+gT^cWG7i;Ooce{_D)PGc zjE4E=2Y<*rB~6+q*2u}~g=+1*qSk?TDcL^@FHo2AX_a~w_Yv*b*^@)(>`eDx>krgL z6LSnZJAWUsgu;wfR)+PltEr7Bs)}1JQu~RrMvSj&8RgYwsXj{Dqs{3aYJ%ayXKe^} zQa{lr*c$kgfvUAsfe%rUMx%>ZYMl4_ooMb=)#C0$UNKMLnpJH{r&`=fN znY%|x*h%YBQv#zLuw8m-T}LH!k=@u}p2r$p<-ZkZa7h_dp*H94C@r!<_yW_uL zEf&!A>3j6Y_cDRvbbq@z6~Rnilb%bN)@`)*9up%(K|bDvis;G0uy_eR^rjzVsIvZJ z@F7wyaSHG>uR#R+fy0>Vj75ydruG&#Vl-kj4EN0Am8!OO47F8G6 zE;*qQe5{mjO%$f;6qGXlWrEJ7Rk-dv!gd@B(;rDxE9bQ-vE71p^ zpk?3|6@_EoFM_A|TusfJhGO;&R5Oa8HfB>#J5~8a7D_Ny|KWQS;nc;yfvs$aFWM>2 zZ4Q`UPYT@#w#S^z1iTQV@QN=p7je+iqdOVpv`4HBCQ}$l&NojPv=iTu7cioK3(9Zw80^k$L?ukiBUc1UU@_CcL$YWX{^Ory zqOE<>z2J1Ef7RQZ!NpL}?CH^4WI>dox6m-}v3e5tTCHVrIqe=ds~D)vaCOC>Ol7Hn z@3u$H3zm0gt4a2p;O5k#h*06i5_A)3vO9Iae2UTIum>}!p|=(Fc515SBv6ES)f=U- z*!dGv_=);ZJac6m=cd{(!?T9bBTOcmVL0_tkRD}gp5#k=CbDuZr>yG*otFlVWn2Gc`d(z_O50UZ##fKtt-@IjWK;OsI zp!D^OP&@PzIwyA1>FiTkY*9<8N@{LBHXBmSkf zyvbl_gg;Bnar)6!rsg`BY^P9yJf$aNW$p?bc9t`8#ZYnWx1;D|EW(+bu8DUISb6xK z&ah;trE)kn={arF8u=af2@AQ#Rz7~NcUUC7LM44OIKt}XuZ+LM47)M2%V%gxD~zAS zR-#~dU^dfi1N08grc!~c`XGMQ#jsB`aId;c>7kTJp+vkei$t#2`CDSq_#$7b|*KF}v~>sqa_oq=VnzNKcX)2b!CM=>gkBt;s3WsXk0L5F02bqy=A zUdO8y;_uvi6w5xhIPM1%(MpuF?{dLkqC7RB_AmBk)>h+OUSd@KxGz1zj~bazQINhx zRZfeoZY{KQlB`Xr;(UZVhzos7L$M9K^M%FuKTb`NL?uHPT?1d)o%8-saZCU$SYf{4 z5}!|H?C#Dnxs`9+S&!8PbLTJ-ER@Zdpo`K|-s@jX`8ZQPqU|iv3NwXl()}24lhk80 zfS}*SY_&}nVrADnwG3^-LDd@dw4mwqOgC_&QRw0yENjNH-@YcJCoP4+#1Q$pzXR|+!UE^A{%GjiSN*qY7G10UPrgjdz)tRW0k5)){*Sb#iu zsg9WIpA$zOtYMrmWmEM9N;;7|_G;<^C&^coWxda+#T8<=Ma{(gnBZ`)PH)CLw1l3@ zQ>q)Pvuo=odtkoXD_GntVPL3=P~Xy+yhx)xy=FOBQMSZh8o;9L64l{=S3o!J8PwHL z^6{2#@&~n8^O7y`c4(2#3VOKOuAb`3xi`3xW{O;tlM!5#55fB(lAds9V!D%L&M|P8 z-ctAUG!{?!oMZ&x;x3D@9c#RujvJP9r=`_ViRd>+R<&AiOG?Z`(2_HtQMrK`u4r7I^ zk7^hY9InghIrNrpqps6$Y63g@sf}%WD(Rn1;Ub z1m*bGKO>;C)iG5R-Dw=M>4ba5_iI5k=A9YsXq-4eBT$PUF?(@c(jpoB9wq+9BXfkI zi;MYm+VQGS>;r0nIve^&h7sVS2EhZ=k&V$597Q~norg2YK#-QH?Mz)>&cv49{@xkE zhwOho=1t%GRdtccz*88rsX{0U|EQxVYEC!tOqIj}b4-LvgEZ%OT$iS!Iyq7xF||}E zqzBWs#hx|6xf5cROtx@A?S2b$kJ&AIL5$S<9vY_b9rgNU@INaMwgS-D?1T}1qw|J_3k zW<>lP%Tp`3H)IDZdQ@K7GSB-z*-CvN{$p#rlB(O?#9u049*S_+%ir8> zR)FiagnbfMsQ}Btpt;-=S|OP`)DGtbFWK5LFBNb67yR}12pXEL(q} zYW(di>{grbkSA*=zv%FXW7Ax04Ys5p-E}oPzdF99IJ@F@M zLP0T^E@2F_d7*VozhJnBiY4zO-vnq?j(Y@h=1xiKyUSM(j$WqC7zn zbR?!c4p<#zDkb|4@OCk01{xUaDW5Z+PamoI4GMa*)4615wsRjp8oS&LwA25d>Vo!| zZmx|`?J&q%Enn)Y+@DXyarXmffnmApwUZWms z@UVLC7Z0oV-X=V~*GPzrST#zNDA9&2(L)jhk-P*^fff25sikPD#F^3Zk&p40F6Da`w8pxdb@t(sJ>#BkUdmX%lI%5XmY+ z(5eXx3cWy4Gc{*gs@1{mt9nS#+=VxDZe9#2)8rsOO;5w1@Kmt9jPhC^IAc&%pL52l zTIS!e);fHSCV|`rI#E8cR1uYEk-JPcOwX!U(_TKS+RzKMu^zBbf~NKqgSmQZC3fP(zHE*N$7*gCo}knIF5q0p@B$gp?6D?xaY3 za7rrVKT6-_d|}kbo=aP(m6eSj2OA4B>}9Q|m&Tu}m^WGPjgECri!k2GYL`%(qz4FA z@Jf=@OjaUU4x(hNlB@^u<+mdlM*Zap3; zo;GMqLUXmyO}EDL5%aBh>q2A_S0tN9p@4rqT5>&=*V=`fQnt0-XOq+# zxyaVp`H*C-oTg5gfVqO=Ysm~2HQV|`z3xkY@Jr5n|5IJZ?Lsr@DW1_^)Qqe1LQ29| zw`1@QKBL~k@+*7qm<@a?#ja!ZRq0kvt1$JA_Mk%1Qkcr0S$*ZQX@lC~mC)y%3%Dm| z`+w*(bUhk+8?f9ej_&jU6~|!fmOjQuxGSzwURssj+6vSV~vwT=oo z9Vxe3Zd8)k=INs}eF3#_mTFK_e1O7{fR`Ur?H~97u1NV@8(M3%bac3xoPe|Xh@M3w z>Baret!L4Ym!U537TLkN0&`vHXRM~~>YC_h3m|!U0_o#K2IzzmP7W^Ye8e4uhkT^U ztMNufYqaE~d%6l*>TbqsP8O^%oS)obx+fyWw-VoNRNaldlG7JCcdn<9mio-<3oN9& zC_@XWXNJYIxg1`F|Iy9#2TbZVgqB#<%ti^QKtuQ^b(oG(N$z58!T-#1J{&_y3EY>a z^(47BL8-*9VDDe=o`@KRor_$|$LH=0Nwe%N0zt@~?G(lV9m^l`0V>Cqq)S%_-r!@p zESEwQ&zuIb>!}PNu(A^ez=e5y(x3b=)eG z@e9pB5nZ09JBGxe=fog$>)&Xou`T=~|AL3kMYQ80+E2~Q4yH4~O!VQ1B!(oiADhwJ zO;^V;GFYA_Sxr?oS|C?Om9QL1E>Si}ZxO4f`Y|ml{jLWJv#N`B=+5sewi5Cs2}>M` zd;bj(qnKF<}znljJiNkKMFHll70wc1&3b;D5eO02xJu^6h7 z^%BoA4+`rMgAPlSw1MOiMW!lXK9__&b&d02oAZQA3qu(u?L!*-%(Es-MLKn+H5f!s z#U4NP=HoOjdL77!JaaYx)SdErDO4%aL|#)VilYZsM1gux9EQ0$P+0w>>*0;0dY*DJ z&{!}vKUL)a&>(o#il>}tZk5APzK7+Mra#5@@LVe^()3h+MKVPV?63NBK8ke~MNj&7 zVo~rNLGVGrpQQWJr_NCKgP@0v$fJK%SCGa-tt*k(N3)~vQx7#8l|w6>E~+n8#{sTv zjxf}GYKS#wvpS}?Ys%LQ~B{Xb;TGOPW8DJua%|V5cfPqk(t9F zxVOTSa7NZqjPKGcJvDq7tM7lwAIRw}po%*Oq9r4VxPkGQ!`XvX-JIcX?a5AlIgukJ zo80x8<72T$ZPMlJRRW>nnQKr{Rl$rm)Iz!({K-B;nT1*@+*qWe!)lv*lYZffsF43B zpE~aiajw%*RZBqpf>DLf;%=lc%Ii&3%DC$luzt7yPy^_{FwuWl?&ji}0#n14!9UBd zlgRJvq7H5@DnfJ26rb}(`N*IrSYRDiAFKZE5W%=!I7BP-N5W2C!-stLvv^OaB5vrf zF^vB~8C0gvbXPj*%|Q=J4lc+0+M)5mR60rB|viDF%k_REEWM;o2;s=i#dA{Fn55A)N_@GKM)%Wh#^l&Xtoh+vcc8;a*!^`r|- zQM(S>JNc2K^I{yo!e2TB(nXhbw$~UF)%%>A{QNd%X6)pgmZfe=?R%``6^cLEr>Bd8 zz5%k~sv(!uf9M(dQQ9E6Y&v1|L1J_>zpIMLi;3Y`PBHkCOIo9GE}=FzaMO9e9PyUQ zur||fs%RI)DZc48v3}qkZg1?M{6=BbOm*TT_-S6&NKUBG0{blh`_(^q*W&b^V#+R3uezqG>iS?Ih|E7(acS&}4PI*psOR#@I}{U}{3JiHPr zGmtnb(QFgZm%0j`O})PgK{Q8Cy-FoYy~bIQhO0sOmth6X;YRq~@`2pWe8DQLa-g}WvLN;Y_!mukt31>Gm}!epQu{6h27S@ zNH)%bCDck{#N*LF?Fe-qdO6ZT0;bYD&H2k|Y%QlGqlhsTp5@X;n7Rm=)-9bG+988b00ZW%kghgLYOs>udkVNKCJRZ~TwF8!Ap&Sfg_ZM9jo?YlByt zpKlK z)A8n3AHJY}oep#-wIseW#v+j)nq%X^)D1rg7piV`b@P*>+9o~si3%=sV++%3p5({cw#V&dU8~I^Vnp$PZR_yV{f4M z1g^r)5cx^FX%a1_sfeew)X@TzYTythRVx64kCbUmJt*(0PS*I6@bxQ$iw>-QJRlyU1GPwyK7U z+TS>SBQJ8OS4ic9G=gSR2&?@G@=#6bhWna6vwo$#D1rHOiWeZ&JM28Rp5lnQtg=x- zCka)puicjFWyT~*aTd7g5?~JIkyJK42j+1nd_?Q8H2eXIIDL4o@f3SPOQcDPwUIMx zm&@x$I*kreJ6xka)(z^bdhvXDnO{n7%Wr<3nDJfov7N(i>eNsr(BJx>(9u@r5Fb8B z>k%oB@-#*Q_reH?mz?Nqq{m(=wGJ-y8j>%3!Ri&g%qxV(?Q*|I@w98mf!cDIZp!_U zI1&`bWyT0O@7RpK#K z7b~eUUr^1=)$w|)o1xzISK$)UcA}F?qz0-OCUbLj1c%Igu}&yG9mT1I@m$T0ZpCwZ zitJyWz{*(kn)d^h_m9Cf+>Rv6+f2=jacZHa2vN0Zl4kvu++m)UtsFqN)C&9;>>wPZ zf_f_6psQX=UG;HHag$m4LZSf|LP+Hl9@K(L;iupzHP<@B*Qf&>mMv@`I^G(pWc`Z$ z2vQf}%!QqT)JzwUJiFoHx14fHtM)@hF`+zWDZ}^+*{PV+49pj?-$^GkFxV&kk*I`P zjnR4;nmC{0Lh_a5oLccflgiuA`72c5rn0Q|GzVoRAXDb| zvB*bz)BaNRQCsMJRi310a$?3pi9#5|sXM-n_En?gT@8_JE_r<$!yEW7Oo={27umr? zq(}kND1OQLEt6JJdp9-mkI|L=+A%T{qiq@_hO|`Y#O;iN=0~xfK_%&3e5WRC%TKk` zl4t>u02d^Trb2ENYaFi7YGE2r<@5YF!g;Lz&Uzyd?dEK2FGbjnNN(xI0;4^Q~CGfUimIWsO+Y~XKH=HY1VZc=yg#=R^;);1q@ zKq3zQF|v?1TZ{EQZb3D9oR~^0r zO0MeU%%cC)8f!XblHaJ#Ei{mkLRBR zmx9&hlZL5}X_j+{UmL^q5$jvz=8|*}FV%F{FG%uOE>ZrnH_%IK3-_eXcpNFH>tZp@ zlE7dOHAe$#$p_pt+NpX7x0%kzL@*eWfP?NI_?34l=?Z~F>vXsgceW+%Oq_?GUQ#T4 z`VAGe-qP)1p+R_v(4ekDxZl+IKIELLgIfYS)L??!3`xOQaUdrBfq``2 z%P)-r@=$JstgKX#eC|8eNSaSC+~wR!$#diyvr==m_AzF5UjhzJ<&ifpij`; zIF7r{3_7k1ZyNv4O4muen?y9`1p?`3^d_7P3RU4CuJ9$gO5-e*+@2%2g9R|``Vz5! zw`g9pI*#Et_Eb(aoSG%)gK$!_d@S`uJ?BFtAtrdqj;H*zoPR?FtAilGI{;TQ55!}G zju*1CikBMmy<4`9)E6}Fhe|@@XB!`D3?MDOHu^+YI=t3NCB@7{m)v3CU}KX=3GAtJJ}(`n2L$I9!=g%%e+5v zTEYW=#&oxcS{e{D-Wh4){bBT#N@sj;pXWMUT{1sf8sT=RPxCPghDxxj@p(HNP2wWa zBTlQpk2(0VQA9VBS9A!=sFqN*Yy{5u4AVU1!&XI9QOZT% z(yRPMu+J@}c1J6rBfm>q&``9vz4i{Og}dTo{!%w_0=-?SS66dgk802{yo*+7w5<_6 zF9tIYu)% zjjrl6>oY8fc0pyav&Z~46&0MSC7oWb(irC>tid@-=Ze%@=T^PFT|vK_G;%PNRxNSG z@ypMP*DJ9It~^v$HIyGe}Y0c$8G5%@Of5Q~c3uwO6*gI+|KvVI=m# zh#nz3d|lS^l`drUyMM>I?$k{$>#opq59u>gTg!PHiU2wUT@aT(f;yC z1@X$RDuYpODFOe_ z1QXZ#^ScT3E`5)e;iANH_n2t=ygbsErw$IA)qP?>t{}lTrcW$|Cki)ugNYmkAq|`S z_pvTo#Vr&p;zi|1G_*g#Znvn>#Mx@i!dJWq1qBeES&6DR)eV$NLPxQN!E#2+>5){` zZISV*(+I1*|LIBUv^`s0wJT70orGkRz&87sc$Z(fNwhT|2;EY-@Pf0;-;c94$-`bi znwy_jqdzL!zi>bAd(=jLQ9vwp7vDu{!9;y^Uuf*#EPh+6N4r77rUB6gQfN6$tSLkn1^i+ts9SIk9OJV1)@)%ouhbH+K1=Mg z=#?sJHa2j|z82mfO0A24b5^*|e&-u~KG4F4;`A<8cQbHH7oxi6xH#FTui-j5l~RjU zoLf~5;*t0s=VkdnBHdf;EYoWh2@EYEGPn690v;&`dU@nO=NX<;j8RFo#$AjGH1p%N z{@a*=`Dmq5_#r+;dydC9l$`ptB-C0@3)1N!NWlD^5DI&9~yz-&96a3Ndjqu%5Q_6=5GqD2n^~H~po1Nt|&4 z-BD*@21j8fHu2}~ZuRl~`qWtOp(LvTYC8kT)Rj~nBOBd94)<#`p)Ptx(g$&BWu$eq zDqR*XFaTcUMdYHmmpM8oeCBkYW$G`&?<2n`yKQNM1TTqt7otMfs~e?7zxbI-V53zE z1?dx-tINoZSJ=C*6NST0H0nuExCNi;q(yy|pgsnPbbUlErBd8c=RiW}Q_2vZl}B1| zkktO+f`Dxe(IW!79`{pyoQ~$gSUKEBP!gY7FPt|Lk3aNo@=fz?Kjp`KQNw>hAL|Hu zgl7s_stCi)k5ZUsw)3H|eM#+QA0Oj}coIBH?;_}c^6>!dY<=ZkDQDsn_hx6bq_3!^?GH9H7sg{CDjMs!2la+UUJQpjES^WXB8i|2Y1kk#vuQ>CRB~9x<{-9sHyK*qPI}0oPl^C ztsZ3y)-VwlPPRVt0*FNtJ%A_PVIY2&8{B{Ner|#}#$BpQKSa;dWaQy8sBi6(+qP=7 z9P_AagwUPp>+*uq13k-*cZ*u_?r2pAd^|qXdiZ>SB0XpXKYpA#^mYkb43nBuzS-s;tJPeW=gCnfC~{ zoxk~4Ht7rb&KZ}6+gI-Ysb8_>UB^x|b&g=2bA&n%@;@e7kjQ{omS-o)8hOSsLi&lPDsVb!QCb(yfv1a2uv=e zD(Wm9bIz*-JV2WD0ZP+i)l?5rMYt>7<|2xDvRaDfvPY+B26D=RCi6PaCKjMPzlqheW!^?ZRA^fk>!0zR>((nrz)rBn2VC{CrV7s4*OhFVfy$?Z%L zG4WU0D!ibu98DvhC=pFvbY#-~);~(Wv1ksQd zqM9{y?2@U##KlR5Q>y9wj)tu7`5MJrZygOZZ1gEoefS z4X^v^+E|BVj>jbO(6|Q7Xg9YO2%L|a=t-LHpuhJyRN6rMoFyG71l**BpRieU zQ618o`;S>xV3whd>!DOyynDb%lb<<8Nan9<3H^&1*rvLPbTkWPX)NUs3V%djb6i?a z|Dc45p$=AY)mnhFCF=M$3MfjmHDj8QAwQ!uN#Tb?Wg}4;@0n+$0k?4EdN`~7s;J$b zPdQ3D=aliGDY@uO3#mc4h=d9kL`{-Fl^;ds7|)G(oj|Kl6~(9;y{A1q_Nux6LkZdT zpp#P<4Anq3#5k+Hnd)8L)A)l1(dP6`#OC98ZmK>bLqAhXkff@sX{rxqu_T|+f@mIz zF#4l_{+_NItr4_}%5|?9ERJ!;IhurjoRv6dZMKWr^)ZgyC`om7ipVQ&i}ff)UF?lk z5tJh`lH4y$Hx>cuyZS$rRbA0I)Ic}WAvD}Jgr;B)hmj+V&{T*rtr}H{Y60xcd*3b>KmMk-$H&ySOcY^B% zR+(&3B{ME3(&+)Y(fa8%)sD<;BZO~Ajf$L?mN;a~d{!uem!_j9UW(rfBs{?07b(N=!RA^#-p8ojY&tb2}Qhtc2dBt}#g`>d-1Vm(zY zOeUqyVV7Knf2UXb*C_L#Z8x2JXNrQSQref- zHY6VEfex_vZR;%*L0SA~_^1tLnY9EdLZj3gzCeMbqKT+N3q$~r&c!krqmlOmsyQ{I z(#a6dRdrsrt%w@UIcT<$0SK~IOU=L{=N-;xCL7*JV<37XMvPR7PMzmsJYxkmmq-p% zQB2nj@s72G%JX$^pp!?Hq5xJ4g`Xxym0)UV0b>7%3z+O(w<=0vPb~h(Xd%q4ArHb1 z85)y}>?lutxhhXaEjvvYrFP-<64XtW0<7x6e^oZ4f$Mi!_2nYSEAJ^mo$#d66}@#$ z=Y`RW7jZ7+L1C_n0eV_$Y3|h~CznxO)z8wB=LbBnyQ-6_E(!(;#wPqHzOpY`>s_>% z+c+<9o2!fb5-%^VvvViyBV0RoHn1jE0C77Vb&EJ%RkAan(lB(B3LK-o>M!DoK{e1f z+L>=tZhv~PfYn4id0t~$P!2}2YaL+7uz|sHeJVsP7s~K@d}WVQXRK4yR!;XDs&1C| z`HQ1^bU21lkkYYO9l{;E3fc%4s3J6_pV}e|+$WImEt=bXRU>>DIa8@Yy0nEr92(3U zEtI*pb4q6@=4UQLZ0LQhPS$v&sWxvjL;TDj~sl26lLeVI_kDK6KTw&3!8>d33q zDV>VMq-W;5@yO3(tZH&nzqV>pJGFk>Myo8vn5T293DkhrEixzxr0yA4j4lZJYoRL_ zm%%($e32x%NXhDbgwgt}Xqo55K{Szor2^Il#s;XocgiR(9ORa)qOlsnRW++l{$1Fw z|FHj+!%^64dagJor+#ObkktpA()f?Yxkv0LNaA+Fyi?T^F~v2y0_E0==>Xd3?s7z9 zLkIjHTT?Q=L%@w($zOU-gV#pw{Xj>~_R?cDeFw_*7w1Fp5 zTY>UKG@==*zWWy~w6jL~p@?@M)tFEm<;+|LO~p5Qik_H%e0-o)A$%s7Ig;km1^g1+ zp)wWG*l=mx5jOXwv**4;H;HafTMO_V2N2-*=|@^)HK#sqA+cVvci7 z|BSfX5&{VsjNB4 zXQX4KZlia~k(@x)tX}#{)tm38zsNr_KK*BWXU(TO@;p1$mvoA+i5EMrZ1aF0M zX!>bPce^-+jPq`FJq1@(Z8Z*KaZMb{@le5VS;SLcTA%hb@Ux+caTQgF7a*@%Xf`f@ zSTi{`;{bN>S7J!lbU&^bHlt1WjvCI{=_6Vpk8=QfluJgqnRg@lKkkD|x)$$uCZm^J1!JO-2>99^H75gh~H7<5W(xS20*)uJHw9XpJl*kNqAU zQpI%=I)}gJ4a#(Cra!}QNctI9^x8Xx=rVQ1R(wedyj;|j_R$n<)`Xa77S#rGwB(kl z>UxFuYv5;7x6_pnh|H9`fT7}X%Azh+)k@gH8)H(jld=4-t|qRgIL+ca^sZbJ#L6Yd zQwelWsWT=5Zg(2)mNAzXb84jD5H?pBlSL@#NXMhqRa3oGT<{eAiT#rrrM9RzQOQ=( z4;&VU5)U^p+K?`%zBij4uSTu+QHt`%BlrMTeqnJV(C4*cbLVk8>kvi z)${3@_lCwp3WxQjs6wLT4;~R5$VaBGY;?wbCohL+Mp&-S)hzEX{7oJ3ktda~T%K16 zLOny8I*y0)c5&Fst!}{-3`ljm8dLd!3fP(ZCJTIVSVHsh43|f9szR+BStB$Rbu@ zT~Z#OodeC(1uIN3!DafQE-x2>?J;>-5Be!1pL-^`u=PrHuwx_%w6w6~QMf_J$wxQr z&G=i;bP#D&!A3IqrqvSbxQ9wqd#(LZr~;T|cFpDb)eZ^3&f*PC36`{9;1AVDl#Zxy z(e6f)s3)08gF_&dWL^gvj2=9}smFb}Gw$IxYaE}C7U!Py29-i{sf=o%>${&~m+GSO zQ`jG)df1;hD~&z$rPDz|oc{<97AV>)cj0XlWVc$0^fTC4gzmglub`snMjvXQc%Sk3(1@{)VV|=3 zIo+U}ir}}#3xM|Kl2pZ6AeFDPjnp6X$Z(P!z)WSjLFM7Lq18yPqAN;E{&Sg$>Yc4y z664pQy#CV0>!jT|oUQ5|x^0ygtA(+FDk7Fgcj!!s^j~^*yNR_ATO~ndA)CTkmr7_J z5WL&LHK{rj-KHf}(Y;Co!r3DiBdbL?_#J7ylr|$29>D9}1Od5q;au~ca16vN%TBFO zB~UWlGNYG~Di@^hsk^jXzT-=Kw>pE}Xs)k#jY&qHh1W_erUkKBf{(1Z)Y8qPhHopV zOL*@(6UDn;#&F&b+n<}>!_Wf{oU>L!up=J-LKU=-5$MF zGQvg~bhwu3rl{vPiP4G!V5)WjM0LXbS8m*|^3AlY!o|usVwbs+#n%BfXWlrr>g4L&$xWoS3Dr zIUnLj<6`)S^fJ7`%WO2pC=mQe4&w=#4)vU>)s(TiG9p*z@c#eM-jo?Kcvpt+W=@~Y z{QJydzM1oLIYkEd$~4~;nX(JXhRXc%6!{zZI{9Mxa}y&dBWszB1Ydg@-P;r?nS*jm zp+wFmDe{HrlqE%FL&x|g^3|Z2CNjYn)w3|_#H6qUa22zpz>3eD5sV%(Pj^h_yxAJ^ zo6K>s^L+BjMiu$uK3Ur1km`rY^8X7sno}N<+21#YSq(G_VW6ax^+LWZOg{9NZ!|I= zA#;u|>cqdfFLMmB%+aN3SSo=wsc~7FSQhPzNohhS?a4$L-ZInlI3o4=vFqd9zqm593!=f#%4u?gF2^35?@YXNAISmzch?QCJprnX{_n+z@N{Dptwy>8pJ-!O9U@PG{tWTHRy| zR6Uw0_`MA8%KxXUw)CY;^SY;g#vJjk(vVGWq^>-dD--ZQOJOq|HhObD86~Vc>Pvtu z@GbcRA4DT=iW-7*+w>@XSU&qa0>1X##9AfJBOexpYvK}@ln^mib#aRE3Hf~fT-U_| zeiB|r1>|;fy!gMzh>>sYtzD{$iujtPESg)fVa`K7J(}CO zwRj*N30wOnONu3R?qXI<$Lyu3!@v3$32M%8r1G8?<06<^sY2ETR0-!Iwq|nO&&3>Zh{^MV zu*<6>3lK}W@T2g7cv>szgfjbpq!}8Hi$GP}MT#d5}jp#~mWl3S} zIVzEJQz2YX9x~^<-JpoIAt4cUgno=v9qtbc7pIG%f2bl=NNz%V>{23d)K)k29X$p2 z?NAnWZ84WNJb@dbt?n#$uYZepTuv=jnpef7&qr{wcD zM@=%}*d1}soD<9YM04%zZ=-8eDj8KeQMJ{*B@9vY77iMZDR;P`BTJV@sDV;#@~LZl z&G|Ig$0%XEYwh!7=LKL3Q*LKKa^>)Ex~1Gk{D~Kr>gWy$HzT7GMp9;{Cr_t2ZB&Fr zS=EJ=pX%a!AqZF1MRB)GXaek3n7p z*rya958epJsDA^$OCBe`qfX-+svmAHGJ6^d@F3aW7uFNhqBzw8L#)sA6#F;fJWZ0r zXlV&m^e0lj`+M1OW;$Ukib{WBDvxURkJL@&LmJLF5A-6*Ww1h`3kwEb z(2V*vQGTk)f2&tkkL0VELs6`hjwSU4#@1P1OM$|K%smxu((Gw* zy+#(d$nK2D(6PaJ!F@W9x~PNXhALqvm!})4Z~LF>rx@i9S9NIy62wefQ(bb=U&)jOLC)CzzBOQyX`>kEVd*=wo#d9UGiQ3%XxlZXu(?|8rm67^}6}Tm5 zR@q()734BJEc_|0k&|^?y-SsN4VFmn+_3@`YTkE&7YTGeVS`uQs*C#cfnF6XhvB5D zl_XlA3opbu6~z@YXbXoiz!;m^S%0E<;}S>@ZWkvpjo&c>YLcl;B|(OBLQZvGE02_V zwQRGQMg{(ib>v!E+<(WwDZ@yxKlGsq)eZv%a*3Lw9vD?N{W!e;G?skBbAFE280COJQnS6~tO$ z%A> zX4;!^@yIPsEH6&rziqv!hy*U9+|^#lDokqwsXp$C%GL{t!+o7yuM~OV`$$*o6U@bS zz1#RV{V>T)S(vz|KTZ1w>**?A&Ir;pE34WfN4LK7IX^;%BiWC3j7a41R(oudo)atV zmhuRB@R+k=9R_n=^M0IYj#30gt@R+>@=q4;vefol|6wU*4zS$An_2J~8s@6*il(@mQ<3rcPCxkZ25{nur*G%7T8kSEH6RwEIG&ucB@tZox%m9;^7o zL^zz^^x{GZA_?R|4~dhT!=zMmc}_J%eM2J=q4vf!d_oD@&vV^`q^7y-Z8*S3h0>Se z7$?zun$z4Ji@wqV<%Rmh_=j^!C0P#9d=&MuCaKptZ|HN>jqK-pb~W^|Uf@3ZIyWhe z#*0T;A_-C}_%*)u4v{4f^U9dUmv{ua&^6`2Gs>sJ<3$?XWY+ge&jQ=#M19bmGu5~lIt)A-^v{L#lBD}|5ky@ z>Z&4S^dn`)phDo^CLz)6ue&d|9I&0vyKt2phTh4yu;m<`pdMx8> zg+mlj*HBj(?gBg)dT@*qjIrtr-^YGoM0M5sbU}Y8X4DoBjU_tWNLRJc7q`^!NT*WX zSrrhmB>}&qi6mU?h{{A_pZ=3?%OIwhM>%v7c17NIzr*mf5;A%2iK-JlEnh$-F-iE< zpBSeytla+Y_za6B=H9^*=_K-*-}&UUw!vUYeci4aB9AVkS_dal433-meddxJPJa33 zn)HVi;4_@SpGge8lRI0n<`HoOSgfliEsUoW<3IVKO4Piayi_ePHUtXB2fx?*GU725 zgYZx`-A->{Fl*_O4DcI6!o3cB3vl=b|_7z4ytwDX5*mnk#y3*7&Vyx#sMlK za(pUHOYbWezRCBmBPYJZNM~$di%-`QTpAeNg|}5aUsbcsOS$Ei;C$$?^m`r_uTO?5 zQ&VMyimF-`N2keNc8+$Xwwy)wwVi6Jp1b9^RrIw4<^${#)+7Pbje3wI(#SnC*Wmxe zc!nXBLTIYyP(KWC15{d$V1gW(|Im_7Wi&&;?S}47NZ#Z}x{o@V+6H$8-V5jhb~kkn zjS|L&vRT)}1e?nzrZRa_zV1JRyyq@aQ%tP;Zd zr3UV-@Kf=q9#OPw=n~r7S?Pqh;1yOYy~*yBo<(vw3Q*fWMO9bw^(MSq;-4ptWv*hjJDDr)q8G4rg=!RQsqA#-?|K12eM25-gK!92s%s zk4?Kr%P~Y(^#bOeb~jFZl{v^}k~mPsWN#Li3pgDtl_eMqVFA)mBL z8?D`=UVHti|62*}V#0sBU!sEh(6nPwoj&CT>aehp1_J7Pv{dFgF}xjRu|BOC6~}(e zRZ=4ENrz9>6xHYD;^~ksBSw9>@t%cF7{!MA3v;OoHKcNKUTpsayPfE|n~{N9{F5`$ zp6gcDEkuHQ7D?-&(!@}LnkFZYiUZZ`r2 zJQ*ZN)#!bh3^rem-Zwl-Y^aI%Iaw5<+UCu;d3_;6!{qUPwGU8j*@%i(Yi`a94F?Ob zj|T=_X-K{Y8=kf@ri&YeX;1;t(95x92{XoC+6EuiC z)aPl7`Ie8SQVI84LGW)do+HV-o#WhG80Aavv_L5JqbrZnM9rI^U-(z5nbu*IZ0)vk zIIS_wOE&kCfZ(af74+s|?kS9wm;DIy(r4+fX>JDL7oCE0;>Z2sp$EuT{AQ&(e^W~> zZSzrxD`Jc8r++~|%qO4FiW2lhXwZ)~p>8;v>MqA{5K7S{f%iQ;S-)J^ShHjy>@>iUQp+i=2c8b{4hT&L+CDnWPUmFSJ7n8>}BEp&>CJ=L<} zvD|rJ7LMmwx@^6@m1(&RgKGAi@6z>P5amSfIsJ@C_^Vy8w3Q2dq6a2%Aed8 z|BAv@J^5cOR?66^8kmja%#*p(p3>Ze!mQLv-4V%_1jT{h;wM-!k z#?xi9j}Nm+=CYT~4ZG%cw%fb!Ap0 zVyKEEm!bGI{iP%#?Kd#TDI)gNTbBvU@~J3ar6g$<{-G1)O-OTBs0Q?bn}Zr7DY>#! zRTV%4f#JRn4BwA7iW<7Eb1J5_hej z5%ed=c>Q!k%A!}f8S07sh;r$Z-buMRT8pOCLA1d+$%ll%_7^xUf_sj`jW8nI74_j) z^Qjx9prO|VL(HbJ0>Q6nHi}Y9@qY2q2QaJ;yl>SN@rp8r-s?eG>0G#t%8!b{Ndj#3 z?XLo(<0ZN8Anmgc(ok$t?|NtGfi+1c9aNA)>sWp}{3T44DmQ&a1PkX;1J%O_(mwIi z-?;xOX&a1Q*o$9;^~>NP+Ti@bCsjZD3#W(pr&!*L1xj+a9alXMh%}Iou30H`fn)rOMFQ+0rWAB~Nup$5T|xk=tD8I8fpg)$ z0DgUXja6!qN)5D@sq}vHb0Z3jv4N8 zhs(+kyQ!Ef< zj*4h62=ZE7LqBeAKKB_D5HWk_rFHn)%x6$h{Ao@x`Jj;ZLw2XsY*;FhY9S*`m=V|z z5ElI$(i7IM==~;sxUo1)(q$~kau7FT;A@mYAG(Men28usWQM9leL_M7sl>Ex5l@cO zaa<2t;qfAWy;dctJD+xbR$WD)oMrvUzZpZZgigwa50G%P8NM-UQP5Z@EfGRq8Cw5; z99?&SOxO4K5tn(pJC7Z9vFgFaVD;5jaPcr$y|cI&ELLyP9|lXbh+ZF7?}8`^gC$Dz z?i$yD`Mq^0g* zvMPoo8cLOk1C<=I$?ZSG-?JysC&m{c{#J30CEBuLf1qwSANU};Q3c&Wbg}D-Vj`>Z zsS>`qG!V(wd{s>*vh0k--LxRsP4VUnt#mF^edMHORvj_LxuH84?Uc>Z6{};|^!cc^ zn1Wl_Dm#gld@R2@8!18ak{YZ=Cnuh}IK~m72?q0}wh^uLL275`^z#N|b5yulAcAhu zT-MK9a_7c7Vsvs>4y)ASDzC!i&@J2TA0Ve9v*7L(*mN@ZLNw-sTw#^QI1leM*&;l? z2BNhyk=bESj%K)nH!y%@>W0o`RmNa@t#Jc8b#@rE1tIET7jz8$Wi~AjWHp}{@_;*$ z_REL(A$6?Et^1&am?>AJ7of^!l}gA<3&i4Jie1C#CAfy%?B&v3sSMlYS-LwzRb#&F zzp04b*jrZxtaxO{H7ZUml}T50OJ5=Rw%ucx(0OG|?{VgN>F=jXqbxp(3$^PN8M{1eWjrig$ETO65U+3Ofl3Cc46y!@VZkQhMKAVN19@E zV_M+Jd4lcxGp zkXhCYJ=K3PXw}ynba7gP=3*v1svf^t7qye}^XcbhSz5^79H(D1+3rgXDF=n8E){rT9Z`{)YA?uJ+9r^WrGK{bF1$<_gXAW|1?1CjYj%4p1XOTZ6 z>XHXrm_6TicBWpG^;9c=7IpSZHA(^W7w!BP0;^S79j9{z5;%S~3l*unx}xq=PaFYZ z6N*P(QRRFkDOqF^+>;2dnU~4pl{a4RM%LITI8KFBchQ56usT;`gVGSGkkRv0_H~$8rYhTglU_O_@Y;WY@3^?iU{tm@qXj)?Ly_M&#i-T* z8}N4;xr?K>D!>v!0b-*PW%rczIkbD-J}3rrGN6^1CCXu)XyiXgjWANK$1XamC0?MA z732Q~HB<~r>9b7PKlu7dZ_uY^iwUYc4NgMU6s<+ejHKR{Z;Mw z*rSlK3&kRK2?hjj;<-*0)%9V~Q8cBMPF|cr54wr`>MzmJo~Y`eDASrRv)wBB+9C@F zbPu2n$5&b~&{h?%{Nt%OOZ60*Ad}er&^2K&LRThW1P{fj%fB4P0icNF&Gyf_PfQ?! zo-r;|XDW3FJ*g&&=*KC7wXiSGMa*p1VR1d~oJVm^FV<9f)D%5Kf5rFm4<@L=%s>VG zWuG=XhwIy@=M*L7ITLOOrmv0U1baPg^z+CwRT=Fu8w*5d4pR)#zhWukjY=Hq7-Xk- zU+EtBnm)nH1N2MlcFtCNKX&3SpJq9^@Y4h(rG3513*(veP75ha3EG`B?-8@iCScgH;8pf(OZEvD6tyn?w~n^?#)z z`BL)WkUh;@%{oe*oK`rkm$TA65d9d*=ewhn9}QD88%;HVSGm)6rqUT%%3L@f?c)+d zaU9vW)18OB;x}i!_yHA=8_je=>~4mlBJ2$9Wh8OQzkhu$yCg&*S$-K(l)%tk8uir) zXE|Uu)8q73yQ$x!G6gcL5={2%xo=~Pyn)H6q%MWN=Gb+Jid$lZY^zq_M&u~bht{g!$qIgHIpg3d(jjN&qD6tuEbcKn1%h@tLMMd+-r4+l zt(p3M7yQZL@mzWupV4KCWkslx+8k>`6M|vp@^Fk)UGP0__;cvKW|K%BjojKuTS=WT zh_X>XByw})4CJv#iLw-Le=W|^IjZHX6McfehQj0?c1FIn_TwySCzUtnhcWp*&(d4Y zTZlOw@BN|p&|7;kMT=GPe`wA2QR5Yo`PNRL1aqzuOp1-v%{0?|7b%-kUHgD%hBDbMG-7wIE0-U*5fF{+?^UA8!1y^!@G^kFD&Jfq$~^62lG{>$=G*FRhnUz^ zaQy13P7j`x&8fR?Z`4qd(*eQLEZDz`2Ld__AFQnmHQOR9Qe!cdH0xMf(ZgQIKkG}| z$w33pcP`OQRAu|z-R3!W4C+6N9Kmd&7EbwV`}_H7AjCSnn|kBR5E@=eCG)Gf#v(m7 zu*6r!Y~vE=^;Wu~rlC4gRVQ3VUtPs%$}Cu_?Q8*DvaTG(5RqTx!AvfXEY64h9TUIr zng2}WN}9s-WyX!kIi~6d8s;K68CY!UljzJ4+ro}x$LhHv9ov*U5PB^<;Gnt$>rvuc1cy|k*rD0Sz&$~Dc zcMm@#Pt{VcmQ9^Ptif)oX+*@{LVqlFpLLF_jO3#cGxN|jf1*`Lj!zj!89AyEOAToZ z7EoIb09M0wT%-Z2F>gx!ht7sZ$Y1Hm%ztT@I4=*YM-)wmi6fGVO}T8$Lfkme$3D&{ z(3Hxk;?#|iZh<@l7*30=~Wa>hGmA#yO$0va;zwNK0$};h$d7jX^MS}}SeU9Vq{phx++xxu+GB*i zPU<5bME@D)TxFDeOk3Hl`Na;v5gVh83siqu}MHFFv8 zqm_M9k3%c9h3#8gU4R2?agx)neCLKf7VPF6#>*k}kR6q(#!fLu z_t%GMZQz`zeVFiyq5KkgS>$@#Bn+2a#ksCGF>w zXw_oG&%~MKOdMt$Zr3ATK`e&adG%4MM$?&+3{dY78GKEW@%ROb_?x4uD#7R#$Je?d z=_u-iK3XT4YQ(B!Y%vRlka0v)W;rOfI~{P*xbA)txu>1Ape)@ub}AaKZteQ?T4J+(=yIT6#20YYuI6bW9AFv z5TXBKJExZOM+{Gf7{fXF57s7J!)Y{;J!l1{E8ayKSxih)kwKR|$ImX6tZ7@+*gh_w zGo@RlC3}X2>UmtTg~hT)&|!`PswqqO}=wO6fKck)LbRL_u`ewW*T;8fS_?>0809~D&&41Vf?;vV(oS~b7uDViWF@nSe?Z8e5h{_QWO zx-m-BrzyTZ>Kl6|mcj4I>W+MU4)oRQS+Ljqie#BT|%w3PE&s2=c zW1$!je1xN7Co*HEU6Ac#VJpo!C$Ad?c*85L!ZF8S44O#^b}gn6saV1H9}AawPi<(F zx@^=A-Npm@NE3y_s@X~O6Gw1WtmirG(-Dt{Y$cP;rv^vabLi__U7AW3#yMf;<1l?g zb+B?V7r!I!(*o1&qHX5vY#RsoTHEDNkf9)Cj&ZF4HehGRft0P4t^yINHI-XtV8HiB>rJyhQc7Y$AmGs72So_o@ z?=7nvjexR2WB^vSU-+4|2iox*cC7e6=%~eIM)~Y-`U|*%XdK~mKovi9) zMCB*7IJmx%N99ySXUfEs(QW-Bo};AvUUhTsa`JkI{+p6wi+bO2(3G&4lM+2c&)`C` z`Uan@38GqP23sen4Jay(q6)RwrO?mX@6SkgXo0~))#;SV9$KWf;xYCH*3d=tLq4l) zXrsR4l%k%3HIf^tEXbQQo6e(d za2D2Jniaqg3E3#GDyMJalU>;^=v4B)5!byx(m{I&Zqgj9lWtCX^iUR(W_TU9KsBYd zR8x*r5jwyQ*HiQet;Z4Cqzw6oyg+R*-{vH@GYSJvEf};`&BFuv9!u47^!H8{Ch^8A zTFI}Tf=h^itpPn%fL?3wDeAMJz&QN)(HA8+n#fEQipYnma$L+aXQ7 z_Fb_-!j6){}>j=${XRMN;pqx5^70dY8J@1VlwjWDW+?zoS`I#G>_%_g#Aq}d~ay~W9a zX}BzBdzIQu+niWW4;P)|^jkry#({v_x+HrTtP*T8ss`(!4SBSeR_A{nrn1@F^;*>C zC|uN+UPzwJjGuHWD$!=N=7FST)kIa2MIr))IiFrh-FMz4)szW{7jsDXG)Fs}*xjro zkJ-5_gG;Qd8#&vwFlkIMyPRUz$6fm^v$Cm7SQ@ehj1U!gc5@BdI0Onwk7z_WoI8SdfZ$uz2kXUw)VeUOhUR@&Y(cYGxPHJjCz*`hJQZsk6;s*h zd)=RfK8b3BXaRdLsjL=}&TP3)@73Rn541ekl%?2kFSCm3D|ld6w1&!ZslC0WLzNh} zcc%}tx9SXB$D@h{XLA^v!ygnCIWFyMDlIk!voKtI7l^Qj(rh`!*-Be!ZR}Wk5XN&) zL85=3S=$gj{3T7e0uA&o;$3uX@J?%?cps9epsHgh?N*~zSr&ZmQ4hSw0?V)#U;<8{ zSK2BSMjQv>`QKJDeQ!OYN2+xo4sp~*u3*RL0bNERcFV`oPO(CaMGPHs1pj@Q_defp zk%ThXLNDZNR_)T@sbx%dFDWT%iBe3cyU2E0a=TH0)&~cm2u{eXX032LimlJDEJj`M zEyo4A(QAB}B|n0jw+aNF*}S;O^hc|c6k{)e!_=mqvD|8avcYZ%cf}NHzzLY?9FiHq zj>Qn~FWO2ygyVVx|91;+bSTRfr5`?vZq_4jUtCrXkV(u}1LSz;9-fK`GzkaAWa9lW z_RkOVc#mM1o@vcx=Cu>u#7@ycFAX;5e{UtC85-l%4Svrtq3)>bWUzm-enpTuNh5Jp zFAMGCe^#I)sunFpTdCZO<+m6tg5|2PS5;k9!02E*y2*685sg*aA^kT%`w2C&Ac8 zw}N%)XcC8VP|Xeo7qS<2i|yGn*#2c?APnbdy}?s0MNLX@rc-0|q(x$YZmwR~UW}nq z!(v5#-H<)YvNo5Bql~wLCrbp1(>L}NdpFla@S<`vU$~uB=dp$dGFS~&ehvh+#eP|e z51o4vH-^OsORpJp!@&Jr)u$L~; zazBS(QW2zrZWDi>Y;kaLDLG+{E=%@SD7iitGmjku{>hI^Cpb)vPC0=ykD!a_81x!m_z4HCnBw- z?m-zF>rUklN@E~kP^C;HG2UIPn?OLJj?D(FPB~@|z0H4nA&=XXku%NK_ z+7=p$e27;aFoL=;M|pM|devTqgVeNHWn<#dd9HJO1ixF)Mu zr>SeIbUt_$14MVKXPlH}t(;hHzB0_OTxt=!*`4tV3|dSm}Ju?qtT49DwO;AX0~B2^{_uG<~Z73qOf$V|(eW#~j9qqDtE zPj@dY|MfApJ}vHOl?yA*+Mm8S};2B6`C@3J;r1b;({oFkM=M!Sqg`%l1v^( z>y5s{{vTC8!9&A+Js5ebQ$adNe|vl`x@#or_o6)3h&I8MXeVq8WlCQ?G>O2!T9=i& zYZyD>Jy4TPLO(jk*6b<{(=#!_o!Khq|17ppBh?n|J+_-MYMj2p$PLkqCRi2JXq=k4 z#F}YO5Q1`33DuN`Cma_iJ#IrR!x`2+X8XqPIKXe-Tz9r(;111DnL}IXPbP*H=(V*% zme+3gJT(dtp`{o_e{k<9!$T!*u1ONT?0j51Ji#x;Ph2X_?X6JbvD`U#T*RrS7Ez%AKOA*eO<{CE4li0rkYD@FMblk)R#iJzN-1+ z8KNwTc>hF4(H(cZ85zbVTSrja7>xe(RL?ff(-{W0t*QjYhI*`@ve)-s;>^NHc9 zj&WOU3#EDcnf;<;JJRgb43SMRGQ>-|^$xbWQFICs)X=$W-xpRwXB4s5Xm0D%MLEF~ zX)knm=7TemKYx{70Hbs-8b$TwC``6`$iIC{WG|{s4m}EFL?iQX7)w=h$U;9^7A0^y zFr3jhS#Rc-Xlth-0<+Y5wzKuI!aE`@C)3O(+?UeIh4D0>kA1XkDn_%177js=Sn5Zc z$6dBp-^EQw+#Q7uK7EuouPZkgp0v1pI7FR#lu~@S5ksF#}aYl`)(xT`f_)dA-mY;`(f4 z(uq_AwXHpD10?05*(wJW)K*{@#-f7CMuWJ>ZkOFom~3-<(s6Glj@9>2cdc4#V6ud}=TBN3;6n;i8 zno7U13XPotnk*U?C|OT=;h zyozY*M5)O*Lyw%Q!F$+-)xyAbyBQ2Cs~D-4qaqsMtscfGRwyVP9^NmTV>_B?p~o<} zuE^ZrseZzuR7>?2JZy$}fSc8{wrYvy^j6dq+u4P%`DY7gI#c5aS(HVuv?`;=;vyst zShM)%%j=rvOGCyog=nm{{D0nLA}zEy7mo|9#b=n!S2yp5af$9RaVRiX2Ll&EQ$)Jp zMr6!Fd)&n9U~YX+9>I5RH%{2+SZa2tQ_-~yRYz5|8w7JJ_8I+G$zrwHCOCabUzyKM zOuUC33ibikm8Rr*vJu}H%<^iH!)X#Y?{IXg?&J-aFGaD$U zc~3WG#u2qPaL7^#o9t*kjaml&;u`0PG}>(Fj-1JmvqvYPm26CG3|S(c3|dW_by<6c zYD`UW$xaNgC8rpjBqY~0JVZOYlU)~WxcFx^GD9jKeQ|xC&?n~;`#q@)EThTIcNZCX z%ULPX0+G=XhN_F|YAzCGNn@j`!)G;<;+Y=})(2D=x~kFi5$()>49r&t{7HC=KWGOj zsuA0T&N3bQMNd_^^Z|AV3+Q@uk^j$6vJ`jsaT5F+y!CceiW<{yoJ1REzep7K{0m5> zUBN;1NcSe*cB^;=mVP9&1#`23E1G0fr6B%pZn)B@rsfU#9rBz)zOIBk`V>P8nV254 zw^BxNMzx|XI*Vs{1U5$&qq*o#-&3TXX~$9>OhXOzrM4$BR6-?lp&oaBE3Tp9q7W|G zljWK7OqE$x2b>j0)NOTM3{F_&jYMOVx?f`dRC}3 zbXNx`4`&Hm2CpZT@YbZ_svE~NQp|+mVvswR3P%f(4MXieQ6;?|7p8^ehd_$@jq>Zl z_{haL2wCzg+rh{3$*gJIse(S=-S zXtZbb>_+$fK|9T?6@jfx0R^K$L~2`3cb;c9SBaWAW8HbgYO3tLOc731W_)YZK1{Lq zQ9;Dpha5j5k(64H;d8IbPq~9FMFMk%W9TX(Q~||t%-|G<3&h+zcw}|bZFW1?;WzNy z7<892X_&Rv9?jSk&F1G4_666FWW4Sz^XM7$4OKOUsnzyy`bqu#kllB+jZJM_s5jqi zi=aib@t?o3+c4H*wEEGmMOV~W^*^JbsvexIzv9qqAsKI9M9vh;5+cpgkmYj?Srjf@!*_t%Y4f6=ON7%7M8N zDq1Cp&ODN98kU3-X(K9g!uf&kFDfn#?@kWhRq?-eT06s?63Vv~fy!!#_m&zWzUH1# zhRF#?84H*G~h7UN0DFNkcUJSq^o4TSVrTcjA4s7yW9l(IJV-1%7qkm^)Dks+=9WV*H|UVDNboD z$E&~u4)9f_1v1enh&ALBBgJ$v0O=f_+KVTctzM;{7d+|zdmisBj2i`pAxlo&_bxioAiia3sn6HV1VqmuET_*v9IILgvK`v~jNW3-U7_`|PS z8L?IUY}98QoFn)Mi1U4E(`k-c#c`$j+#cOlR}wK4$39~fbW+n8hW5}~kjl(Q6Vw}G z|Dd_SyLgF*l;9tQ;&xM~4IUaN)2oSsRKs{v* zio<$KQJQ(tKvwG6x(f!IGhMXXzgLZ509xY7Z%}_)hsq+=oE1R}^-S#Zm(&v(bBf>x zd=DSB5PR&77|0j+EBDc^(ffS{=CIwGrt9!&ypW05%wipx($ag9i$20wspHWcf0BsI zCg!V4R$m3(0q)>5{3V2Mz4J^>5IgC);&E0kv}HS*k=Bxh#?&5l#VYnRN8zA97fqy* zvX1ENZh^D(T(tN6XDCFs4AZ9>*zDjN)mQw*CaSkxQ|yzUsk@ozQkgJ1G=-shIwQ`1 zh;-izB+Xm-IgtcAESd==?D5FDIA$aId5UENW=}c1{2u~jbS;QgZ9y0my?Xe zfwgqYzn-S@DNLa5dYnGbw^5Wfh}+;MO65=-_Qyst4S6qeiLo3Jd&NQRV<@4v%Y3LE zxadEMM8-is^Z5fZnl_nnS*aH?1urn`OMJ+qC8@PJGmLntllV6XvS}?UD*nrOlTb6* z5q~lF%IzeJH^{6f2O6UjQq3({S@Bz;3r(O4{0H8Fw^vXO{DR|ZkH~Fybs>`aqZ$>tr#H(s9kuDGR_4tPR*5j*s)4M3tdJpGSebq&?@K45?~%+ z6FOr@QW@v9+K|*yKB1rRihuEcR2r3WM&(axsoolebtPKr|DO3uZLw5&nX?NjqwiB4 zW|P}y*5#n#Gz^c8r2wYeLotr6dox`M|6s6vgYsgmIAW!xl|>_27ft0$Ha)e?N?C&^ zokGSOl~3Pd7_5V97U@r^i+Cg0VE1?w7QIDtF&U*;VuMEWgb3V9E~p2XQ^GJIIDy5t zt&XQFj6=kXk}LG-B6qKEODRcF=s}M)?;*1CV4rU~>dM@G>c931}!E{>Lb3!8Wj zntQrYlDJAty=3_4qyF-|cJusRWMcbN$;!dzWk%!zytRf?Ing<-4JSt?;7(FrdV_;V z@O3~(oz?mpC7e0ISVYSc;;=Zw_OXRiPc&EOQ|pUw^h`B?g)EV|$1UDpH&4x@w(OKm zrflGH=*U?6JSF1~qu>pav)pQmuzuakTy z^g;6{6PtY8$!}-k6i|OO^=H@p*$hToZ@JEnU0MHCwj2Q}yCB=5t)OK__vO7HclU(=A0Y zRF6vH7R$j_Q#D4^)GfJMDkrCtat4NBn6X%A!$ek|3f_XA`EF4X-2WLkNf-12nrB93 zwHq+f=a&!6n=U-D&g<{31JuBLV~9LxZe>-!qN8`Wb3*=OgjC*ML~O6QApyhtL{w1?hsUEQ5sX}PgKK0L{nGf;783tFaP<7ujz!p zmmW_k*8k9mUh9-#R%@C$+r=blzB5oga&}8wtgtJa8F(ix=c(^{e`i%6nUbP*T8pR= zHnh7OqM?{hEkZ7LX?Xxw z(-rcG{M5Y0KVQj^DyX`t0qP;zi|IT{q9zSPR`0*4WL5}+hkY8ZVqv?xkZsS2$dTO3 zX;TMQzh>$e#;4E5{o>{f@12H++NsRW_qbAr(zW)!KdKkeKdG!UFCn}J@ z;bB8ijKiJ1)DAYGyYxT4wazC|(^IsV{TntSh|(%qc@r}UFIEitM?|pJoM%D(EY>J) z9J6joQ+M?D&TBlBi%$kq#6jrAT|^m5%T}T;iz=@{h*K% z9D!J-f0N;i#;dsgx+s2v&1sfk$YRX+${I!YRAKBgw&~o#JH!6srH;?T*pY9Akw%0& z3;JOQUb@}Xm_s9Bob8S0o!EgFG)FE`12~#KU2w4=%|bGHWVl`;w}(ojquQ&cQ36%; z#*rgT^2Z)TB2S^Fs&;2<66QKyxTq}00NdIH*nlNd4u<+pT%_TqN@@x%M^)Jt*NJ-! z#Vwsh4G=@|Pw*T6Fn!6+rmJ~5J%W}Psj>zB*8c>*3)Xg4&Rki(kARJ0nF$tt3Z{S}(10;i@0yQ=WC5=hpU zs2OFIrx7D_CS~%+r>5$c^wBvl?s*15304N*+~g40}x z_ki`uSvOWEJ0H`!M~_cq~fWS9NWaV4yCjSE3gp=ucs24kS5a)VNfE zcGh?pc&GcQ*^1Lh=2FAzp(?3?sx23e4A&V^O!X2q-E;Kg&`(rHm!_Mdo_{(kaW}3$ z$RiTeMarD=P&L8>Kkv`eRgDU4Ri2xU87yz@eta2}G}zN1EL4_Nxf}*FUY?-cfgoy$ zR`{$-nXAGv&Cd(Pa9LH<(V_wER29S;d6Wf=*{_&v52o?fH$0rSiF>;WqxK+YAWo)y zht=%xoKkNvp3k$4Zp`SFi%JD&abWL=RWUWuyO)CvG?T7jK8ssHq+%y8)12Yv7M-1F zp<|h~F0?n`&xGuF&$Q|tnyc)=wYW_ORZsLz+8diy-=j9VB4$z<1rcji^Gij%roMCp zyB2&=MsTF9beNSaFI8T9VdR*uO z?+7@<^}6-t|UT%S5|xUW(rg!R0zwRc}Q{Fl+jz%92-Wj zQ5k-YwDBTA@dNXe%jzq7WoM+w^fkf~rRX)~j2#M4NtKB&pdepn9X{HSy<2SK18&W) zdJx~l95+Iv@D!yy3nJtQc~#d!1&XrP1XI`puVp5>)yUv|T{~u#3-eUbBsWcu{6TL{ z8Y6qi00v`j;Lnsey$Z$jgro=JfIP?5{JGeV-|l=*RrLo3uEn|nYs;0i8X+Ul-G1n_ zRRu-!;2pzoICi5;>Sm5`6ng3*axpGohuuyW44kE+@KGP<2uh^hwel(+#)#3zMr1bY z<;F9b#v8h~xl}L8!@Qsy8tb3*!=!%5Y-!cn%A(qHB7Gf;-Y?Wz9ZQ{0>lulwiM42^ zwsW=162AKa{N(>(Hx@AcZN*!q|D!mSglFyneh&}T^&?St=S@uwJZa%}KY3F&qU-** zbW0=~lf)mP?-*={FD5%-Q1t3qO3&SHNk5(3d0;4s+tM6zG~&yqr^(p9TVxCJa4idq@@v661AP{ zRF5QnJH`F<=>s0vUvJ8)?%sm8I_Jb z#cc0CT87s2%xqNz_rUYAF_N)rCx2OPx@vSs6MH+W{~Wc(F0O|1cMb4dcUdo}uXs&S zI)h#%qEG~rNs2f$PpZ!@ZB1^sFD{^`3O;Sa!@k`wBD3dLmsg0)fi?C(yP&h33E(F= znFb`Bau$dPRa2}K)4}@#c~2otGPt5XcsG!TRWX!w(EicgU5&BJi>o*^p*@#2`~jlIlmG}Y75#x(k}WiFi= zL<9blw^S{?%6OG}1%d-@ygtazf=t%6$UYluu%|u9%o@hFzcZfUrly}EV?DEj1(Wpg&}msZjROrb>rk?4#8BQ{%|`#L;6 zzisuQk8+lyi73B8V zd1)z^VsyYoievw)Lh!7IJIO+{!aqRRtd`X%5+{QDusLl#%_WZCQ#0`zmj0It2m1L3 z8B=&fKKpF*RetJ?CdP24xw~n)BJn|Wq)O<|gf$LjQ5T1mTTkaxZl>R|2fo|=H%7`m zI3`Z$>KtG2dJn?Uh56Yssj;Y!<2pBW;V6f>CW;qFV(8{t(%;0pw zGPa`Su}A+bflbtuJ?weZkFJXRD2X+G!f~c-eHg@o_6FZ)%!5b!#6j^Jng%AM&Qtl& zje~(!JP*rQk@_>Ld$NRctwOx`2~9}Suc{^!!;ff)8c3m^`0xXwzWonMz_hPnnJ!10 z{e|5XtnuoGtbzPiJ|4BMQedl=_(-ufz>fF=pd1*3=7X7LMtzbdqw5YIaWgisGIe@^@x4F@ja3 zpZ|BsmX;fRYxCvKQ3o+e9h6_OVAyu+KqIFS-F2FYy%@#;iLz!DmvsnLFj`L+!$Ps9 z%t|$64y4-^#eG!Nm#zMcip|BY*z3k{YGLODFTH2q;x~U*Sb#ULZ0l(@YhSlp}b7 zv%{cottep_bL~c|r_L>7{1>?6;H-RsAIw5wG&QN4U5vv;u}K%HBJ*y;NXn6aX9P;o zx0I|j%}{lXe>9JfqpF_gZWi$CSj1=A(my^mf!>REbd6f#3;Vl`!o~o~h&Ucj)6op^ z^j5XrdXCDnjWFmlLGKa|Lu))QUCJlp86SsHPsW#3lEVV(G;-Qub_|DmBBTB8&;Yut z(!iA>c1xqSQAWq$1oNYj!8UAAZkt)daaPC4|8U5TX0@y-cKV824Rq$zT+G1YB+o1{ zhr3YO|1~W(OBiAh8}na->6C@_t`H36qnr%)f;KSO=qT1Ov#yEuj8xCqHS4dFnWwhq zs!YlFbU9cM(ISXCPF8AUkB6To*%ufx-v<|{1@fx3#CwX;eDl<6_Dm01wd`rsm7d6c zd|Snw`>YU4QH$~=N6V*zKS%c#USwt~A!J-|s5-Cu@>}PNooU393qI|qn_DZ)yu_?G zCrj>2#?m{+A^nb;Ij>?C-p7)Pb|=#GKEP#ptObD*VXQZ{Fv>c#VQUr;@4!x>a}>bf7&Y1IQ= z)M^C%6H!oDX~$896XuhskMbFf%|;G#gbZby~tFTsdEHp(a%(q-K2NM zDn8Klv`;N({ojT492XviL7@!z9?Rt?&8s9(M$TjyS?evX{8UM)QB5$yXb=jsyU;9uGioX=uIWwZIE|xL&`s3##p>C3jSXKS zL$oTGhO_YNhK4jNx~P)Qq*kG_{Xw*0PcoAj$EVcXF3LpZFoA0GFz&ACo;FeV7~WlM z!7RnQahspQV)mL1x}O>;_R$RY9cF5S**Ko2$+`w5Vjgc%MP9u#WiJZu4lxs&-EI zYR;VCYFw3(dFSX(UBZa80RVulsdCR4X;1fMyT6E0r!DX|-r8q2xV3zvU zJdmFPvKf7Le)VmocEq(ZG7@tsABxH3kWVz-IDrabfIF4vfjkNf<8G%w4qr(sp07C` z(itW^JH=Xm6`fc8t^07)^(3E-PxMcUKz8xcPQYqaE>O~Ro3WAXbu9?G#c*?Y7*(Np zXsmM6T&%z`r=b%k9a<4uFZ$9$+*c!6W*>^v_H304)8Mu*qowI7FRJ-_p-S3p+@p$s zY|lk&pknT-_HlIPZptUxc2a1&^Gro!1oGn#^+=CGD|N?7MLAVcyiZ?93EmfaZc=-> z)87a~^*nzCvu&6RC$8rfdsQEH7*{KU_Q_rT@iSA|z<%TY?5~47a)6pbcruKdEQqI6;CiQFa!EBdRkp0vX=JIek=QtWJ zvYMm9Cy79X%1N6rnUTMUlTCcaTDvdG z**(c(yn2Bgyf7vrLeHk0G&6d=TNcD9;&vT%4hQHdQ;jk_e!98~-K`DYy}B;*sWsMU zgrT-9V`hmEhMix>f$J-rYPyX|)l>Plt0BT%7GZzZW&MKPtkOxf?e8!Ie>)k(V4nL7 zG(`z-JG62to1ddFA$1$&bqbr)c+L8km6uDb+)Rf;P7i8D4bXw^r)MzDZ06Q5Z$l@D zoK!1V1=ZPa{1*=pjTi7y6KjPk8Z3u7p<$r|d?>t=hjOXo?0I*^GWw5KWLq~yX`frH zR@u}@dheOw#!mYpT3XWU(NUPH*K-^#r|#-$9EKD2g2+Y`PlYjoT8q|J%TO9~;^(Gt z;ZL)H8$Th3`r68(-|9Qe>|WYSaoXr1lBkN>f-ShOtFx%jKuuhca}YvL_C%7b0>Xo` zXi0JYa8yJd9D`5x6&;Ms;tYMy-gvyKUGAaR#jmqFbWP1w9pngqHtwd%YaWfzAx5Fx z)FIR)^?T|PI=Hcn_Y!+r$8>YbCMF}FKfh{+^Yoj|;cPy!8mN#oDs?&uO7PT*U>?l5 z4iU{+-bUom+Mvmv^LJ_p4T?!{sWqyhwi$J5iP7zAQ3XoWGn}W)C_AELpcWM}ud*Fy zQy9b8Kalm@!AJ&zM2<-s_EGCD$GE5Pr?&UJaA`BRCr z=ra9;ax4=4@KIG4yJQzX7ndVP(h>hk8XNqd_>T_YL&yV%)_E4Y_JkqH%2mWa4%Y*``_ zMQ)i&mPDb||C!&vUVXncF`v{*qUV`-!HCyWOPftM&Sq%u(3GjhM-% zk8q5BKs`JRxx6Pc}$KiOeEp6pF zsHWEP=HRbN#wzJI{)hv57`=yB6$;(Ne0-BxROeG)iOB!MHO0aob+x_Erw3bW#lH!JGOmPB(>!my)A5jT&1xP-#kjW5h-Eb?y>m>fh3W{MWQ zfKNky>KptS<;@)ok&?Gh0M-)Ns)(%X37Qh&L6|A7wl!Cz*1_Vm+xvxk;R|}C-gnE> zm&mUi7*m9g;_YAthVXhSjb@2LyDAR0i79?n=$_QoF z9x(?!qcBa(S+9C%ytgcgNZ6GuwX75W zo_&VON@6GYHkMLDTb9gxCecJ;(~p$iPH8-qcH)2PPhR4T@MnvMY?U*eo)?pq$Go;E zZ55WPju1o3?a86j2E0jaS5cHSMZ7(ymQXo?-!CIx|1AaBUd}RVs?JHNt+~j~t(-bk z8!$iM1U|*V$S}mK)oE9-(;Xj`@{2&V=xnt{F8x?fLSt$~i&5Wv4kmf9x^uRexk1RCJIbFCYs;B`nzUs!JAx)A#p}HhJKvqru%beSO zmIf#jok(}-H6CTUh1TG{8svSf!}<^V8=ozEd$il${S?d9EmeTq1|Qi8u^8&6>K#(i zIqlQSt;TL;eU4*wWv?2Zs0>$yA4;rLQZI7 z-|muBt>6^PF36vVBOfcc-~*PQ=v@NS=JP88AqSv&G)^S%7k*pf%Bd)Bo)>x7fnu=~ z@U}_TDe#<6j%FWZHFB*3BIb3uvZ#4!`>3s6UC{Zlc{&3 zt@;N|^h`ziC_bY?I4L>mh?(VWqDe{b#v)dqcXwj|r>J>C1X0FJ<}dUy=cGKqAl*s= zv$blTJ=lVu-gJI}V0_&`H76aIP{{qr>6^0z)2NE{f;RR!g;fG=z)Mbhq(SzPV{zUBwu2ic{88-#R-59!~UX}d~ zt1R<7j_VibcvgGK*XFm!#8DkDRrSFQ0V;`-d~JHckjM@uM0p=*ejZ?O?1uFwR{p`RO|RraN6tkXaB}2D_7%$hUv0x|qu7 zLYrk$J&aeQW7Hy5PyMBr$PpH@#<~gSH8J>iq(+fGRVDHrS#n6#RTXL~Rpx1tSx667 zldSJavru)Sv!Nv@8>%By=h|&1|Q`oHuS-9+%~HJjF#+L?4-D;OE>P?kIG$lj4Qr#Uq)LWQ(q- zR;ecip1&h`6b+gFGsXB%${W?63Z`^-F7h_0okvF+sy>yzh zXucW7S)vYyjFukux@rW+psH7v>PoE8NyXCNTv|_42UMw?Z6Ippo-iqZ%8a~gE^t#2 zI*3gPn(DM2#4AL63X9izgz4%F9?$#LAUsezL&@~28HuJMVwsU|ovQZFK8Qi+GxdG+^nOqlJv2A1vc}47>dd9>7d2ZfHp)P@O7C1~n~fGQWnirn5lyglysO z!bs7)ByqUGtYe|S5)Vdq&=+bIcfnJKy|HOef}SXC^1I{s^_D52=YK^LU}D zf!VQi(K+5-XeY_FTBs5U;qq*hJHnv}aPkP}->&UZDQODSe$2xq#_{rTQqnY|c_Y zk&S<7Ft^9&Tr@y&bVn}?6%uFtvLNXEIL=8Dsz%gb?kmVnyi5-xX{x4uE8Z-1v&Fdd z4PNqE(f{m^E$14BoBGZSQ`@w1UeI0i13}$iQ(FZ1Ct65vi&q-L_c0iTUPNnkAGVli zoww-jU%o;yp~=Y6{RNq|$r;Iupp(sjW zsPs@zy}gSOG7!@wv`VHG(Oo zRoYkXOO&5Fn_{|@T7{eR5$aHi*w8;k)ki?lXjvn%*Igk<*X6(6Sf1qe4~9t`v>Aqb zQlhFH%M;U@;zz9Izp1J@g{eH(+}G93RJm=Rv=0qeqdc2`Fyr{Ld)B;7e%gYf+4E$l zQd+$J4xsWFWd>6McgE{sKZVRT`Na#XmQ5w%1LqBORLX1XCh(HT+zi}lI9=L@Fe zG;KywR;qbd;M87Z;8S-YP15t|9DY(AcvW~VCs8RuC-0cBaL?SaF*_Vj55)4NCU1*1 zeZ_Bo;I1oZ^DD9Z|1o0QFD%hxw-t6bDextN{TKW7?Bj*4EIJwZfqWS z7QLn&U0rxf&SN7zAD)GB=6jXSPoQuJuX`7`TiOW(a1M8^VpgDwUriCwXy&~~1EWh& z$$aA{FM}#_1}eeN9yfQU(r;2bSV%pcaY~wT%p%IhYH2;i4i|* z_G2A3bF#ZXyhgvLCJSh&%PG`|iBe1r{6Vw1OSpn6XLh*pZY9%5ZQ;FMJl&Ik@`%%1 z8TU(d%*_zk^gmJgKAaF)g&Rn~%W6DQoGY@&^`+=6cfa_W_i(78(A)kW`>pWYa=X_2 zf^gf){|VJ?-cN-Fw;lf<6Rs*&SboopVyd`LevZOE`GiWXEWZ^pT={V8c6cscKGF`A zW4eopEa4M|Tr?5cu?pug>9)377jDOL5!FmMY~}K(kJd%yQt~&(=9fz^!cbvO#RAnK zkA^jnUoMrdP*i>|pTaBgdCa_Of?PZyKev{LeNnkS`JK9q@-gkywrawQE4M3mDHOTt zCfdfd6J37}z0|Y3KVMnWveSgz7JsVM>TpbV$|Kj^j9YT6m10|Qa54)RhMf7#Sgrczcu5WT1)l+KjH3X)sK3q zrZKI>fxe>(Neh9cztxLHu8jmC>s6XqkRCD2$=1!7)?Am4%XzjI;@EavQe+^AXTqRM z$pXIKpp`TSCn-lBd4P2c#pRciW0=;+KdGo9&)ivG%ioUaIrS9NsA5bz8WYo6)sd9p zi&B=Z7{g-V9oia`fbN`sG_bN(=J#=zF;ixq9Lj~W}&}a zz5*{sQ{H9WJulrC+OyfEv@vEje#bPjj6x^(us#Y=GO?|0CMpb}0vm$<`k#v}f zIO}j*olMBv``(H&sIb1Kj_V<4Z1qtop_dSmodB{lU6pYuP54^5mWfdlV5LnU;rEdf z+KlnkGp3#HCwGDvb~>6f@-8qk7hWnpqN@^yV|u_sE2*-$F2BWb&ikQM=RXv83e#h-RBd~( zD7~7fig5M(-CL_f)NvQ7H_S{rVlKz9*V)~ODm<2&%kibT_c_0NLZHY+Ihrv7n#YUf z{Ryk7NvNxrCg!$}Qx^Z}x2$BO1ox^iR!~57*IU)g5>J$-=>m>=;#F^-u8R#kNYzCV zd?D3|)e^Zd6_!UHD&l8iIMPq0(0}0->afX#ybfoo?#Qjviefrko)zh(yO}IyoCyMF zYB~R^3jTnz#+{4~UUMph!qI$m+v#eBRh(WPl4TSmTPW346fmV>bWKxA2&1Ve7JEZc zmqy5goW-5gKPex|@MqlK=|W}c2XWMnTL}j(=|J!rVx8cOy6TkE1#3}SZrU;H1P^ya zck|48RRVXL>!~92In~e&X=U0nLDiZr=|6(GR7O5gL{$imSH~#Ob&OEGBU3IAu zv6|I)7)@2MQx6oA-j0{xw5g57TvCmuMyQRq#gQGi_xP-LBNcQFIlZre?L*x)ypP65 z{2Y?Ll}M6!#!4B(;76RSk4vEi)J~0oE2;a7n82^vmwo)AyW6eG0}wP*P&|?!w>b&B zf^TG(R0B-0XkA)Ed$XZ$nQ3Mb4sxpdC0*Ck1@n9n+=AU=at7(PfpHe+(8AP9q8)MA zk497$h28z4|2s*tMpQyvwg71Rocmh?IM0kuu3T1SX`##Q4 zegxy``bj`NyfLUsW>2ZRgRv~ZtjIr*>@1efLh6EL()N{(3DiRpP;D`LdvV&Uwz&E5P4E|WM2bN}B!#fg zJ4OYFC9r9L>7wh`q@`;+PvveDgS)|~{!1)VZ-Ei@VD6Qszxa~(p}EY5B9_?ZAuOD5 zJjGwl_F~aNU!*#bBJ@Gz4>6kK1(( zK%I)T9?Q(S?QIelpplzQ#Zf^BPbd1)>Qj~QrL13#PnME|@VaPi``|0I8_($)Wv@=)YJ^;z3y>+-~QUC62ypBrMy)>O=*m@ z@u*6nL6QOuQ+bw!+RhM3UHVNb(rG+xpyuvQcp0a-i(;B(3b|9bBH(?1n%Vj23`bcC zE?SsLYc&%4&|98u4-dgCUf|~FdR&&Pb1#yi4)skS%|Z=tvwUxgI*rw0Zq};Oe4Fm@ z>CiD1@QAm0DQM4shznJ`O`YWzu$OvK1>H@KW*7Iu3nD#{DZ%_6aR)1?nLUh=zj2ssjYgNlADwE{X>k@ zJ(-G=lw?}1%ubkXSCeGERfYP=OKMLQ#ayP!^r8A6Uv<913To$QlODd#`JAd=%JQ3vT~GeI2_`~0aGa@(G0k#zr~z26m*3hj$_2&Uj(c75*!To(gZGV-Ts zi6wS(VLre=-Z4_V4*wewjv&d(7c@WPWTdztapISp zPp}C)Bor_dOunW?Y8qDiKLo~);n4ACIdw3X*? zjtS06olk;;FA%{V$r7JdPVnW9Ki~_AyBD` zDPv@zzw)~c#DshU@NqLLdPSe2ztcXG?`VXlxsB*kK8K?c@x@aNVsR%p*}aePZXr6s zXXr3ag?rIKG!XB*O!rf}FpfJ!(@3GPR{#y{eDRb?HN27T{O#*$6-IcQRI>e-FS6J! zY3m}VPPluifV}T()*aQD9_uZlF;QuEIF?#d13H!Zb((J z&T2tx*V48rSUcz$PozhB8QRtBO>NMD2GKXc=Yw&$j=yC3bA|faPWMI5xIJ;qJRhn} zV+9cYj&=0dT#5XSuXQyv5lO0!2{y=6c1 zdp>tLf36{a^%kpZnc{St)|wE?qD5}^u#)6RE@pZ)Ri>_q$#`|dyJjI>LHm$YN4``I$mG?QEb*@uI#-GW#5aJG-FG3(G3zznj6E7 z#T%r`>3o~^D$1DtvL2|f58Y1n)O9h8;&C}r*lA+w;R@g7(s%>E;#+StbzvLNN58~M z_^`!|HDgRMZ?}CakLT@O{@t!-*<*Z|Fs6$=FQBG~wGTvxIaN>~VUk_QmaWiP#nm!| zxwqSr=0&ze<089}hVy~>vAPtt&;nh8eicW(jFxdM{VF)M0afPrP}>`e|Is*lghTFj ziMRjFWQ^v9{EWAXSogqfneH9y1gxv>wX6;JnC4o&>>`EL6!Qyax!uu>2GDEh!)G!# z(_~tN3#K`KcZ(t0T4!EUrOBev`ZIx=e`2oH6LHQ-_c`?sR}}Np#;qiIj9Eye;rblc z;ql^RYSVTxvx)eNJBadcbL$HQ-_59{t1G5hc{Tmrw)#cm7p+Lj>{qKRzA)|S4QD;d z)Aw}JX-yCL2Yt^inE5@%;v@n|jpNNqyf6NGO73|9MkBZ%*A-=4lKmGQ<=cTr7C96z z8XJQJXb>5U`ta#ycr`3Mhap)jsdnULS3YM44w7)#n?sj5uh125meD%Wyiv?hJ!gls zw-2G;&^qS>tl`T!A1fg&h8-%2dpy)MKlRKlK>N)_F&8(@5-yor7oBJWHNt(#6t9x& zzVH4>!VQd{)q6UgFPRQ9m7Iw}?r`rh)#cKvF14f&Wai~gsGP{}AJL^MN}Zj3IAf*q z*I1ufgH~g*lh^99mCNf{UI{Mhg5E-v&>k+Ps${+6bfaacPiW^z5BKqhbe(q zVs&|T5s(D@zdY2jX`c5s{s%|Rqz+UN+p?2%``}zFPVm-Oswm0gJ||0MifikD53{$a zA(+N#;`JYhj+WI!kd2YQdIO-^9{3N~2M@f}R5ATH`pi4c-iS`l3AZYZ3&G64d@v zcmi(Ai)lz>a>Jnkl4d(k({WQzp?(%&8DQohQ_iIe_Nn=F6@5boy%DAll@1>htMr9D z!6N-L_2HMewcwHzEWu0E--BvGOR!f?H@{Q{%x9RxMQ9jGxZSuZH=!nc0?PEj9JIHO zS~;tn>hz)4%j8XWSf(?sX8%inkGN~fh{r93j(TJ;32}M@R(M^h6Rptq%n{u4&hgV) zab8L18NDpw;Sw)z0yb53a@y#k=;G}QuA~gRpO1E-2t6Mxmh+kSBE65zQWeav`|<&# zx`c1&I5iODJWiZ=4l?*AeZf=2ZOx}(c2CY1jFs%iDnZ8Y;Q{*6EXn7$!9r8WtziN* z2zNL$GDvTsnJIhHexTBlHS^IyR2PW$8+~Cq((K$Ok(t>I)KIDzo*}O-V0CxTsM<=( zh9X>#(kiPJcncG`fh0;c5$TsHMeyk#(mDBwo-6LOo4&>$Q%J2wn$_QL%rFW_&M6U{ zcr%82t+^<57AxS^(rDqya63uv_MFb=u9Y`vDVb{#kADR1 zs9zdX5~aB{X{TDiiD=Gm2dBv+wc@r`cd)b8dx@J-IuzG14*d*YYH#J^1%Uy65wk9C zSBl|r^ggvxJ-wUWK6jc*)l=2qk^62p`Veo)DDyUai;t4V#L^l;s!XmfstVvY*F|r> ztoxyn6Oy-iT5slYTojA2PF5)EmjUaUsL+p~kH?#$$3IOhGNel^J^!|CN-GM zj>Z=!2J0ncBH2;W^P1f1U6P(T1V*R7}>Y z&$K!T9R36oDT=lNq&PTUziD!X-FJzf8mmUw>mC)6$g2$ap>T~}(c4gmN=m|ars>S* z-0Hz@u+2CLe=Mb(XX*>`xV~-sQo~$|j=H;e<-M}hgubJfoeFr09Gd3$+dX_(7v3)} z^_I6H^G&*o>z)v*(Gu}X)u{%4qhp~oiGDlfu2Xj+^#dWF7?-4QzCgsv7#_Uqt)pr9 zm7b&GNzeHtoEoT#VLkn(cAFAZNY$0%*%Y50^$7rc6+OJ)r4BzTSbO_oNgG4~y;4_0 zfahVNGv3>Tk5!u5ly;u}M{!vV++aqMAfRuucAx>>HqFgC+QQjLa2w-P>O0|)xUO9_ z+o~dE&sa&1#L+6<2=7n$T;1VVxn`2CgS`DzO-yejW)49TuRB$mQmp1~FxbQ^M6Nn$ zkBy^xfyuVBT;HcO$#k64K02X}QFU<)B~)3i%Uk_r^*97qD`sW2=~kPPt>uQkf`| z4i;o6#ka9hLT6bAC(?15WnxlH%>uMfE2tjoHj&!SOWZZBS+K2Yo}FP8aC*>j!CSW^ z6R|_gR=VvkB>kv^ykGDppHg#jYK99_%dAw}9~)kRmij3LaZYq{iUiY5#bCHpJJOGB zK9pGtH8@pmcmMQG*=J(C4X~moB5U~zu}T|FUx2E(RXxcSpdM|p$HpVk9#9B#RXlw~ zWmS&-p%sd%lDKR?w4{-|1Qd1klIRhy}>DrTWR|OU3^wcG;>4ttyncfGNJ>~Heqh`snx;#T%2SXE{-jBdSNOPU4=5< zuHXPPWDB>M*nhAJ$pwj3jcTjD_DyHfQ?jC(loyTj`lfSl;;`jw5(Pb z@$Xkf%mZRVz(LQNML|<3g$_mshtV zC18sQ`-P6tLL|FgvELm__ta*c2`4&UTzeDB6#V7#8MnB<+?Y-7JY11TZh#o=!Kg*O z)dO+N2gKV=_jXccH$WYD3=Se4WvyCtF}OE)5LFP>U&2UO#Q8)-AK3c~(+pmvCy8Tk zgR@8xtQDnVlD`|r1){w(N-FYRid0Zls5)wzhN1olCnfmMlhVvkbCRX?j0>q|YFQ{* zP=@GZQck=Wo;o?bs3!(Q-@#S)Jna&RY)0+GdA&vl+>(NMZz-W(h@RAwNrwv2I>}s? zbgR1U{o~9>)E{GyxG zqrFOel=kvCbPlgJX9V;ndR5J1siKvm0!|-2RJw^BBX+AcW}&ylUX6n@@gg3Xi{2x& z6V<507eX)bCqbiojoD|-a@*^(lr7QL3;{*gX_{LHvg`^%t!(OJtg`_r>LawIqtTN* zR@b0r`V&;5r6zy40SZQ&W|bd3~MOaD@_k%qvK6v z^BGlE+EM$`LQ0|TxZt$aE1eL^przBeItVnPurQ6!{cWaq? zCc%u6g8VgJsD|^4_?uo(b#MS>+*o=JuX81pVg_SEmqX4YZWZc7(vFYT_r}PT-;r#2 zE&G6v(p3{R!pl%aOgp_PG*llD2hbJIJfz0ZY>LH*NH2j^HF1y{h7LP7P{{tLkle#9 zu0<{Jo!U+N)FQOXJR$GA2*I6!QgHl#BFZnwhOj?n(%Fi` zSechzo3}-|iN6xMQpgA?gY%?B3%bTnWz4$(<>NW14Zr$JQk#FP_xw&MA1%gR+CrTW zp(-@jU8{%6w;78!IrH&o?^c9Y+sw5Vb@Ym4LFZ`7P&Z;9nml zXBJmgkjUkzg=^Cbx}$p$`FRds(49~b>%i(B@1s5FFXi((?kn`KS)O2-_EZL+i1{nU znRtP_!7nYu+|hc1cEQG6#@VNmRR@_+9G=;olR4JAkH?fRat41)+*lZhdHW| zvsgXW*%*^HOw6;=gHS45Nw7gWst12ax@DibrA~22>@gd}@=^9xEN~lICAT*JsDKsJ!xE2j5)Bj`I~eJ#5+zF5u_%Z{DjEHUmq}m43K}ggw89Tgr)3Zm`aer{FMU6NyiB)&bvjO;pV)c$R@%MS1}@s9n?EcKZ#dbVOySw zmp6JZ`U+nSwDqZtV)f43CE~nV*e}SrMXjP-)tiMWvX6d4S}et2XL7)&w&fhwJ!pwE z=YJb?rC>AxU2vKHC&`~%IrG#(Nrf&Im1#?Tb5{%K?#6FBKd7$*e^}NHC427B0Rx0H zGV)E@>nIYwOJit;dPJMqN7ceLvfHbYJcFxZDVl@vu4yP&JrdlClH$|el$`S=x#dCn zwz5PZQdRXa2feoPcH8qyBHwjWFW?_8VdunBOzn|6-@yYm7Zp5gFm>VdLM4cMH^EE2UR)yltusX zUy!f~9R)(oBMaS?iw41ofo4TiWBrHZ4rh5UtEJeMDVz;xC|Qb+3rP+)G1!}~)2nJx zXoBnrK@i;o9h2#7&Uw@ce;O4sUo|Sns8NDDU!xdZp3YG--np!IXtmf_qIz5;shUNf znLYfTG03%6;HZ7kLJ>Le5#f5?37xlny`<5UCZofc+LiT0G<>;cwAAY^lyFm1Li-|FZXn zej96@Y0B~LqPT279?D;t=RtfZ(%U^MH#de>;2OvY+Tt|QWs)V~W zbc$L;KG!{UYS30)Wr7#`3`>pnTYIJyLh()72@c|2-hdZ*$*CO5La`Bs%dcm!OQfXoMY1V09;6 zNjs6ZH_tLfX@xqC4pwt&hP<848uv9JL6tX$)HqcjH#MQVDrx_ggg2}{UYpR#=!>d0 z7EuHzsIB`I|Ac3}R+mUSrY0zv9>wkOQZ-X-`v6ro(7~sE@m{5_H0SmtdMZZjmY~b= z{4#Y!b-JEWLG{R%HS#=(&SEgWaf)G>dl38eakC8V)l5-08E&C4PtzfZ88*3dR8?NC z4sjy;a)0(7V6=S6e9x#?bw1Q`m!Y-1($xu)013Y*DUGt&=9UqgFKTO^&{p#|s_J4? zB)ee3aAgU$Zh#tYfkm6~SbS(Z9Q5x-T@=Dlt0)RmKC$s#R96JmX#SpxqKx?%)y#1E zPK>c7DYE^Xto}1gaGC4#E#ZL*ndc~PG+}g7DIaZ;KXjLpBLA5Fct!Q0>1sM9OS|+K z%T~weuo#P?X1R{a%0ml1#Uj3s#?D^+sPEAlzMuJlv)shuC@kjy_fQMvHg8aI_Xa|O zE?#sC;)kpkO>N!Ixh<+)Bogtma8dw#PgLTY;(EG^={?}4xIfZ&Y7VB_>DKl|^eY~s zSfoPE)zG&phhFBm?DFDskQF`@or7Yc|08$+*1PL;9K~m+pX#9dVxqq=o8WB0q~QnkWyUnT%W9@($-u%jF884@+6p1U%holkDmBM;JXAdbW8-i#v!*Coe{?tR3ZgOUD2_yf zswY~=$FhS#-JV8r1IW5`t|JRN;t>~mEN5Mk)7-Trkc&6U4GvM4AT(C+vm~c6n~%Gr zQO`@3*+o;E-wAJ57ef^(RV}0*g3yH!6ced&q&-TZsOrwUs0t^B@Dup5sgudJk`V; z=gQU-c4;9uw_3|Brl~2kCgXixq-VN5xnw#ISAVLps=O#>-yQ4uDk`}d(7Y$g0*UrV zin&*#M^IedG-ti;{3TAhoyB}LS4X|0I1XEI-D~!EKMlubt{~uL6;gPmnG%awnpB%l;Qry5Rtpq zI7}rdP5tIoH2csU>vUC_FMNi=W|kMC3b=v)g=X-#dMt|AXDvGB38GLL+`=1+BWNuK zzXUflWmE}C4S#};bXa27DrS@X`_IaU(&k0njb;k;))M{2%ed=ycRhs&uxPnkfTKDl zT#lZgDXkB_f!5TF>bjMj=~?O2f%4ids#~w&3J%Z@>P?hEV+nh%^PtcJ2ze0gw1jh3 zW~zp&hTatzDng|)3yX6dY4)3~W&<7f%E-Bw*UPAsTTe~)?jRxjRNg>SEHzSomEwC$ zxDqCbh53${!f6FqwwT{*n==T_oszQJ;X9hE6L>@5jxXGXXPCy~RG(LM=yeLxE-K&; zB`q!QmWAQYG8B3RBgOYWl@#MYYP_<|33UQr=t5Y?qs)CIdv|Dv-tE?~%lKr_thC^w zA0-y4M2iH(_Ob89aaGID-I8Aj@TgF%v&wWg8*#`@M!(d@>H}`Xjq$nrjT;cMp@Ycq zwu}6KrfO!(7&ZD(j$7TW7n&^oqg+lhA6J#+rfhnH)<~YNTI8a%Ma5%Q=6mL#5IGm7 zqb31mvB`ajdb%a(eF^=xptyMAozjGORDDeY_%&tAu|DP)VzhA*yqfmG!uIPGgZ*TO zuZ}kbt!juhwe$|^uA5>Dt)kC39e-fG{*$-jFt&R+l$3gy$0ClSDovfTVg*eMrn!;t zz0RtsC~|Lp%iToZlcS1zYrg@b)3dbb3=8f zi`%m~O50TimC5MFoxESg?fu6|m_RbgX7!f5{0aBFU=;xh4bl>jLT{PLJXrS$R_1xm z95YHStuc(YnpO&ihJgR0&&||Be*IOqm6)GHF#uMbg4v4}WQ&U(&7>zJh=h8^KU~FJ`tV#-{ z(v6_3nBi7(Y=Wf(x}O%G{f1POC*m1KsJ--AsEcj+^jg=ChT=;b*w=kjPD*AaP)>_q zw|44Ip;7kBu}O7&%0;{FGE>UAw%_7z_Vzg5k==%FMOK)n`VGNviRvoyNUM&@Kc&rl z32MtbpLj!@_jtOSZi=Yd)+Am)*^zk&b4~aZ@I6gLRrL^GJG=crIbJDFIlt9dEpX$| z0}tJwsiWGD4=ISZba^bs@9GzE>mQ>D|LWDpA!m$gplYf$D1dIknDq(8824P0`w�L;h&DAmW8pgQ!sJJd__Dei+0!eh;`GL2i0x#!I)SX@f=wWJe znnep$Tiy_D8d)3)VrF&=yKuZU4}Unr_yuu3`DIw8IyPq4q6OHcHsfWs>06A#Q0nI9 zQmsYt-iJC>ZH!FhV(d3#)kyw`Uezll#C|c`kM|(W$4Tvc@>WAC&1>kbx2yVSHPx4w z-;geg+I9p9LDB=msNyY1>R1G>sff>HOubzk&T|V|A|XU^_k4Jc`kl*2RJ%f^j3Vi> z%Y7#8qX;cVU)zr5E%XEGb8_B`aK2;U4Hcqag3}e@BUMV`ohv@rW)?Ajf#UQN=IV2@ zk)6Z(iMW94sFHIg?=WBsCK~Lv$9S42XiJ;_SqrrvFG@7~3)wM1JrPxBBm?k- zy5hy$e`y_Bs3a)2tpASqd{hPSYHCT82oIF~ z>WyoSNJ{&Lo6$>psFI;g^s$cW%J|VLmRd$2^%8HAnCf{7ky6>;$oC&rjf0)T8FuSf z)da`fsg$34qqWmcD2}sfCMt8Y(9KX!s*e}ZgKqFm9m*bW&$g(N)H(QNt}V(wTrz8# z_XVEDt9LO4%o}B2_o8@QkpHo!@jGrb@?R+4O4pZoS!gtR(^ftRf3%8fBj>hS9EkQ7 zM`Yz>Kz71ympuTh)DHSwZvXAnf|wdCAsmrYZIm~|DWDR(w@oqp&s?CgSg6aVeI$f| zLjv>{F63;|&tON!>4SEkLY!X>R{OlqJaJN(Pv48JP1i~EqR9L`qzP_c6g(jco0=_j zDE5n!JJ8#gdX~Rq&ZyI01t?&f3_3S$sAfK7;{Yx{fOY*)N&^5_=pOM%h zhZma|axq)v^c_K1q3qN83Aa>Nu)^fA^qoaUq>`G!lgNt9VrXHSbGrmeSzK0uZ_y{Z zE?orx_2dnYGOV^8P4y1!A*EPHtSi~>BQ0n zyQnRCT$t9W*7UOE;#LJN#7RN^O0c7DXr?EIf|XPOyR(hy#+YJa64oP2j=d)=F3gW} zpU<8r2b$zxr5;J?oEF+Y1ybwz%i<5WF;y`Skw9+=oIWA;wRbpQxGM^wbSR%G8+@G# zJ3)1x`w3dur9Y?RXurKHrABq=p`g3H=;?gQUFjFKg<^$P0Q=?EI#G(*W(^mhEA-dg zKKkG7H?S2Od3Yp5u7vdoblUokN?RqlS5j9W6~}bmhbmCcJx&e}b#1kP>RPe&rU4E&eh8 z<1MCy=?lddU0ZMR>Z+^eH>;^2p0e%-k#~9jh@S&lak`=qis71Tkw1GK#$%~o%lTYc z%7vvQ!8;cSy@nfh1s}DiUXl%IiMP!wbdt8}_2x;a9NiaCy*+z}RfFc@MQ?+6+48yp zy^UpTobL4Uh<|x;=xpkgH1WxT9O_UdPK59qq)pKj-7l(DQ-pmQEp_*1Hm6&7PmQOe zsC?R(3oml&bAL*!-TS>(20=>>HkZo3}_>e6x zeKSoG+W?&|Flz;U&h?N+{j;Z9v0e&=GK=7cP=Ytf-^crtUzbd96|)jrILe)te1m3E z8`m!;W{Ff{C!hk=aJH0nN%bVg|M>V%I{)a>88hn6kROJlHDn^{>+1s*+C z4NZIJuo(?U@Iz%&*=dc9;w74JeAWc27_CYkvLvp!p{l41P7TXC8FLlGk?vnK<^J+2nmARwvv{gU zDB&v0k`2)mC&OkhQD;40)@?esDBX?1C&6MrDbjo7>&b{*SES7JF$d%@T?1iMkNZ80U`F%)dwgNB;lE(RBybbiV(5DzDy? z|N`{MbzG#gIkT-dvnx96t$^RE{z&(?NMr6ZEDokQvDjg zC%-?0rZ>6gzT@+Jp3k$WmHiBls3QHsojB*%_ZcrvI~!3}CZGfEP!l#P<3${G&-|a+ zz_6P$2-QYuIYoYoJZAf-xRs0_Q}t=l%g(`i@h<(8AuZjz7H^zAvW?`niuge%o0}uB zNEMY|2cO^!bD0AAf;lqA*WO82JW>bK@er<*Ijr<=Ko2&gwM8raL@JdEi+wnY5%iXD zqoJ=p{s8h2{pQZMq)Dt)9BiOiPG(js-i_Yq` zex9qP_xR$}Ja=}xwz6`YIxUYg_~sWc#7oK+6#hhAbx72Z?WCIzaZQ=3`*3~PZ@8k{ z&_o)GtgP|zxp`O$Ew$VDex&c|w3Wd=Cv)YLWuli#)cx7%tceq-OF1bQzN916)cNv3 ze$-c;?FdcO)fq%L*#QW^q*|E3-OW!0Z}G1_hDm%%TFUu#hq}x3EsSv5}ZVLbql&pDaJ2>R!sAJzO_>P%NMaPA7rha+V zEme>=tQLMlH*W!>BJw(Q{iDS?=2q!;g0S%dD}-a_j&e#F;W&j$*d~wiVrH7HBJ6_F zXLXe}ANQc|E)%Lvw15l@q92$+)ey(*dZHO_h#d?pVf-GWk;x>ft1#6rRf)8++Ezh?{EzK|lsND^^D`Gl=~R&* zHz*HnrorZgXdJ|G_ZMg{3+W=VJNG`FnA@xv!F+CxbI~Xy*CCp3OKEjcPGZRV6%#ZM zUJ%n+U9AiD{B(x+A1tX;4kumUz#mkW(D36l)nU=+R8lR}mA_I3t1?Nn5cinm%~NH} z6)wJELp^;>44_x`8I(&uYvfX#M}b3>7te6ly2z)n3g=LPd$)K#%HyG@}Pt}3i~)1 zk((;vB9d$_KakZ5#Zgggp*(s!Jwvn9gE-+?9;GWfkyK9q$$cLO8aiD3rh40rvc}Uh z^?|-aF?G`yuBy`wxhMTB{*seeu-Qm|;kqv-v5*z7(y6WWPz;xA0vFJM-HC!?gEc`% zIiKli!c8sh(dMBj8XH)ol2Ah>@Guj6<{gB4@+#&M+vmVr)<+KM&3NE^iv(Ry9JVBu zC`sv9ZZub1S}RJawqmxbCF{cQ|HIa%5saXLcF6LABbQQExOVuTM6^ai&@cw31bqa_2V^L_1 z>Mf_}R`A-HC#DMcNz^WhUpSEWQ= z+($>OL}yQnD0PfDf8YEvf^EVL`!{`viwtsVTZdB>C)OuqRi;4CRBon#7kT^l`wKG; zW)s3z>V0B$-i=>^J*Am_Y&udBe~S8;E#9fA>I~`{OW1`hh0W-sr!v`4f72WpXNjb1e=88ZmVAus_n{&ExFr(?7m*PA~i;>QieKMdOhPm%Y-eN7KXx%rN#~ z8Vxjl6PrY)>QAipp}gA7{(N2Jq&NOP!qQOVBcqK)0L?_HA7b&1n`TGJ9`tMnobv7H3{iIYfk9OxF-37o$@dUBX|K zQ|xx53UiPx4DwOFuZ{SWx8i@QH#MbhYDsW23(YC0U8RFCl?>$Ji*AUCbd8C(x1sLK zIKPU@gj$#%a?wL>ic{L&<87q3;=Rz;Ib*t5fEk#HRlGh`r72?l_w`V5M>h+)^fz=n zwjJ?Y8rF558fp+%I3!bP`9|DkDY}BX>Aa>9d;;&CW6FOg*d2l*w57(r;bscS$(8+*=QJyQ*YRb zNR*3I8}P^-@ry19q84Bw*shJ*p_aj=*|wL)d&BtCw55Hf8O5e#w_ZV6GzZoAcC1m# zsOwv)deD8lq{?Ccu0Nuayhh>d%DV9_X0RvoOR%@98L}7M3*<4Jn5xi42;-lLo9L~ivn93da6C`|Vzip^>L#9*}_(V_e5JqP5PM3k!zoW(TofN!Mhsy^=WWz?{T2HR0X z^||z@N+!p$R*6X_A}D~psu*w19_q%;+Ve$VDU8G^SmJ(#{8lBwyJ|(K;#lTg$r1NF zNTVj4Su!`HD~G__`>*1i(G-c6Tip#V^lsyu+faKF`yiKn2)pbeyov>7drIXkEF^AN zb#z-=CLW5esL1ej(D|0H+XtEtH(#a{ehMqQ>x9pnRw(MOyJXN_G{v&pkwi=cH*DW@iOi7~pj z6{Lm~ZJ!4R&QV%bqP&3k^8t6Tl`&ot_rOJ8- zBd?6HI?`+y&P8lB?-{zZdaY|AFD%=OzSIz@Y%ZEQW9((joW^_Kxa)C(N;Aq~4vS@m z`jEY(|KS)~^0bjv;G?1N>CioxIn!^>@cVN-5hGxh|^ z)4y=(ml?xU_s~$>r{b!d^A>IF{;Dahv4<+oUE~gC0ZGMh+nibOqlG8g(Lgg)UtUL-op`6ToOexO{8MrZ6f7#});s{Z2aFAy8AQ@S{Z z2ntXn)j%FRlX+;j?#LQmTTz|+Bjs^&>j2J(1bZ!24P3`Ld#>*iL;dwoNUXDZD4vxW zKiNfvxBU+-)Mx4>2TwlZJ$@LQTYS%H4R8ZRcsa>5cWsIfxJH2+Z53j@}Z{gED~w1@~DC8T1vod7lDnmk+;|9 z`^lUWNi)40C3~(oP1WtWv{ZfS?SQ7DH%|IERoKc!WrRW9F^Gd`g+0-n+r()q*KkPp zXJCuB8{-o1{xnRbBi2+(7c)@QDW}F^k@JZhz*191E>f8zrkKs$Mt}J=@q8HTlAWY- z&}x3x44h@fqdXp=f653okn+jHq2VZ?o7rzsoqDR|)O2?fezvEvqbZz{%#;Ycrcv^1 zZl262$%iJZFqKzxq#MosH;pQ&>*I=FQP$T*x1pfCPcPX2ABQ*8C76t$_d4V1IIs>+ zqe6eHu=HP1&llBpt(+#MXKv zTdk2;6aR-xe@CxF%kVwzbkgkEaDn{K@jLb4&`o=reCyRX(V3#@D||)otXP>c%3>%~m59@*R2z!Xa2M#(CteVKKJ?J606B8l4C6V<0os6Tio z1UsR8Vm$`tNCu&^PV2NPm@N9TrO(Uca3PC!dAycd?HIlo^r86JPJB%&iC?U|#O<|$ zE2yDwqY6WYDyQcrO~stU}?cCh2S0;T!)B~n$V78T@5o+(BIqhv01A*DV& z7tWczwU$_zkO;t18riZGMs*0+xLVMM(h*GgCfkvYrqxE4~L3CD4F+;}Um@3L) z%QbQ04jOA7+krSbiV1Yd9;5uuL4KeLv_M~`o7hEDxZB2TtV3Sv|mjE+lcKtse%d=DYsiqU9|ljK1?9-3}>E8+qQ(M9)9R7FGvDznJuvX;vB z$c19=+Hx{g=7n11`N0r3M167Gy$~OK1DOXE<_|PZ&Q!(RrPa02IH#$VN=?LQ-Bv7c zH{sQ&LA!XU6eknCWs9+lG_8@HkUw>=J%m2a7zDpq>ok?s81fRyqo%8HDyP3SKXPLf z&4y%qwoa81;l3-VDK;yg`WzL4gHb`1)4Aj!xd!8X<&5(}(s2Cd%+UF%DralOQ8E1= zulrs!RMFxs`#qoQ1Qm}$b~ERjU<3Ps+@qpxuIr$FIOJY0Mu|jlJ~of$spA%Ff)mil zyc#6}zU;AW3+?*&$9G6>ciJJrno!*K|LX0JHJ`e%k}};*MJIR#PcXt7+R7S8JLD98wIlsoPMVxSJ9S;%4BzT-TvlaJ zUgSl6RASreV;tj}Z}k=J;{zO`yw<-OG}3O$ivMa6DI>U_gGnv8@6s^uAFPv<^gia1 z1N99gGwMFV*V>Z5yYKLSHN<+oT}dj9c4`*O1DnNq`*VH3?uu0Ayv5MdoL}$5ef@eS}%qP`iQ;?V{dxXidsmg6aH#J}Sa8#y?gx@ib++N5|-|P^QtI zzLcrKSC~)jMLZQ}e$pwvT!gAA7a~#T7C&JUZDVBaCp)4IRkLPN9duBfKg^)sj6dl( zvp}R+i%z&uiX>?cSRUP?FT8aTGC+15p_VcOhBzb;}`JbVtqd42jIC`h+ zQv{9i?^ScN3fWuGIIAsjzLsuB8Pth;z6EK5?&!&9h!JLMgRg_5zVy70^R?j1eoI-* z-Op0Lz$w0`SMeKrej}KJbqaW>FxoOLDMp2v*$lSf~qYgCK7;H=~| zjiWB&02auWVv(3ad$f)1_E=d9=e+rNNC?_-6LBq9sw?VeJ{>3JY3iu{lqE12w=td8 z(?DlOp!=yc*q^AZhE$8ja{o=>8}OtW^VcZ-*zRdO6t!{DYCsXbEo|ne8@uImK3@Ou zV%Nb{S`z%v$}N`RCC&2mb=iuh*g4aNuvI+9IYbp?7yOU+fDerJve`b%CY>-vO>?%A zkII-YId|CF9P*hjBb>hwuZQT`be|`ee#P_beT(pq-AQ!7M3&{q`Ihtce$bp)F7l(c zEFvCmOA_tvgDhG$X2W^gx`eO1O?(b{?IKJDpOb~&&KbQ<9YAxsssFXg1NIf23pM<= z5<8-`8b>AdAB;eEEvUKd>Cgn#-(4Tkx|V(-+$q^}MNWz592%*uel!+v{^X|>giD!VVJ7(KCbso&9*8mPIxyclg?mr9M2 z-`Z7}rdG%(u3DmtOmJ$k2Qiv*<2BRiF&Gle#Rg)&djRSpfu_?H^q}p|PFb3!s3TxO zp1RQ^-PqoQ@z!bIDA80`k_~Y!J=w@>UN>Yvy+f3cOGpLduvBpxzB+{%(cZYhA0`s{ z)Ckp@E~qkknEFv~vK~?wJn{|Uqghwfa_7_!X$7kBY1h)FdW*)Q59?L^njAv>o|CM|R5e-lEsO}Gux^hO z{g`J_ymF}#GD^*oD@8i>r_HK-%9^AS4i`Ph`JxsAR9ZbyYwhx^tvyrg#D3XRlWe0( zAv`z<7c9mA2LF;c<(#L9iT}u@cwro)d`5R`vc<)%a+B553G#2a!inj_UE&Yw7F>a; z!H-pAJB!h;qS|EV4>tEd<9j$0zo3x)8%l`o{`-0njm0#_G^=^=FUqAY#&^0mQyose z;A`7O{7m_1x|~WaLI?F#wz6J+c|UA2@`-M`tkXsr>Koh>B2d9NX+QPvf#5RkpCkQ2 zu@OJ;3LZ1b&}FP4f;o!)n4@N#oK_s&g`gXBD|>|08{ZWg$p&|e*CItfAwxC~a@#i1 zAsEj_=oL=eoj8A^v)oTgHnd{sOBxs)2%gS`=JtB_>*`vc(~qc$=k`5*t{%=)on=nr zMis#ep?Y>XBn#opSJ%MCM^0suYEWO%l&<&^#2b4Mit-?x9ENWW%Zq*N7#pm2O%I-q zn;uFM)pQYkSQOB069xLKVyLFC(@VU;A}Xeu(lmFR{eq#bl>RxbZ&q8;xYZDvo3cv^ zR3M-8wHV0XVg~LA7 zizcasDC~YuJLsw!#lGYO>yrG4uF`Gt>6gK(?io5#M58JvN^&`#;K_2BpmVBb!r)(8 z5s$r}i;}SQ#?S{=S(<7;Gd3P9j{}rK$3(z;){Ylvb(vsaofvvdN0R zwmHzielFnhg|R(pJ?(lYnyQFui1Hq>qT;<5fvbK=;HMA`0%b{33~O?WNdp6wqzKB8j>q_Uoz)W*<|? z_W&OTq1&p18gKUvRb?+A zyNeo!7_iV+OI*Y&b;2sH9?DPHR56F6ZPWq%v6mFOvWY4vuh4MJaDG5(nNM_K$F3I| zW1p@nxS5UIL$gCy+2o}KrqfEQ&cbGA*@Kc*VdEjw&u`7~Zgod}Y~{fTUD`X%;VG{w zOg9!~R2)rVhMMNI@EsB3oiXA)2Ex#bIG}XNGVz6YO{u}oOmLPjxc&0asgBT?v5l}2s*&Y48#1Q}^tEyt4H;nkE;tKNQZx_Yz9Aj6a znd%;M#NSu}IwTkKXWaxJ^MqUADRg>9u!O9DmKY$r(+^Tj2gBJayAnoMCc*Da}uVuv%@Py9W29(UYzE@{TW&+M^H7 z$OwvO0KBh;b7(3`_2loRhaRxgGJq4U5@|j-2i%ppNc|>S;~`$_vB9z^)~Zha-y)R5 zeI|X)sGa;1 zVle}g)oiw;^-)#yL}6!$e&^f6HuWQs&pz%SjiRtvL+Qw*Y8_2-+8`W5$noXOhJC6h z+iQm!&>s50$9pHAjz2x^BRp25bAXTQUfGcS=2EI=>`ZGD?E<$5CzpESE9^+KqCss$ zTlJT7mbgq#&PRD@8lehc4?p!=UW>a3=jG3*DpI&;n%<9(SG-(jl*3dfz(!(|zn>2A zFW-qwx_~#Hhmq(LXr-gnbE60%co$}~bGyWTOxtk~-7whZPAF=XZ-N~r{uS|3G3(h6 zH|-lbOh)MIs*c!gaI369E~FrrxXC}YigJpxh-Y-3E1%1aGT#13OmMoJ^$hEPXeT&> zi2F_Kh(I;^15eOK-4bzV$8A)bhyD~8HC(4nC&XzI-+N2=)^d%Ua_PsF48Jp*7rR%@GkTuMmv*bK3~vhAfI?GEWSM7 zhrUD$+UJyKvT}tF=?qF|CI=1^u4ngSh_?r?z*fDS_NdaL3AUTlIagF2rT>JTy3cFL z5oW-MXRPmLETQ+oqk0CatNOM_)NpdCbxx|dkA=<}aST%^0q=Zma4vh`$8{r}Yayq) z^iFFJMY_wumK~^{YJ}FfF7ko74L6vQj^F9&85N1eh*cW{m$)Ip4+!$n8V2q+M2qnN ziK4dqJN+MGkfhIu!E&LyfSpLc*b}HPQe+X{LFavU@(edv#OVtKU%$W7QmV)Th~9Y^T1qzsGKz6XkfWo0J(gt3Oc~rbm#$ zAHNWL7vK8^G4RGwZ=K1a-x8ya6*9*gjNk%cIIa-q^I?z$`r4d!j&*@M~HyfZ_TwU zsy$+WeT@(O7VBR+XYMhu%c+db)>HK(2M703x*F|7vbBljC@WV>%d31^Z+baS<-cVN zObLTjhP6Ywg5KaVjI)}tNjOg}5hqs$FS~{8#GP(uBM;@L22eIPcm(I8*e~%+eL+># zCz?DHP3tGs&O1cCb=Oec#7o6$67!E5qC9@Z1%Bgg`fTC`o{IIGDoH=5uk|W(NF-++ zY-Gw-D{FzBp_;MVa9h9QzrVym=LvOpYJ~P9hU;Q}H=6S>!Ij*>O`sb97-O>J;}0?{SXn|Drv{VbrGa&Iju+Wnhedl~s%i zQWf@!exw!Us-ODHzY(@`k}GYNsLgg6++r7K zg_VY@`j+9QyMYY!rhZIAIy2$E&gyYz`-Gm1L`*Y(jChbrl`)cy%!HJD#9kv?=60BA zf5S;xoG;4Jhaaq!iu(m1!O4g0P{&eCw{y_m!aI!lQlF#(LJ7|GR*S?IlGbYUQ&$5YBO|Lk9n|w^)~iurm?B0YgNN|S-VD1x@^z1XpB7dzEsXuV@aEr! zQ7ydB+Mx=hmsPt^ohe(jP**la#We@+^pTL?Gujm>pgz!UHh6V(ouskUPZXu&-uL)a z|BX6&z7d|DT~ZnoJWJP93X<&QR(@%rt9peJtizSS=-_?dDCZ6LZak*=t9qLn#%G~D zlaOlkh3cm2sOMsZK5u;&_#L+?0u7n8b-;aW7Cbe zVV0G-2na{;m$Aq03zigEAF0Wye{rI)St-W;PF6=<4GgeGxVut4a1J37{9fGgW)sba zj5wR^$Ks-$OZL^L=_j_M%tgd9Z!PtiZmRd$r(}RG`~JWhm4}k#4|;s!1N#DgLjuKl zD|yEVWcKrJVV&=PjH^ejHEB;!ED&JLtP5$Y3VVSs?Y~okW^@!EpL@=Er!VH?sC?k= z8d}K4x~f`0T|H?oGSrIfE+j79#-5t_J(9geMGaID+eN$JT2`gjp{D1kOEl%+wT8`sf||Tkt7L{OZKu)?+)kzgpU-saq`#n}EW92K+GfcJTCAQ@vtSeS zHr9v+I+`XJ3&nX|!r%rnXdrW_(W)-9j_K~YmW6KATK`8$dKQ*bs=92|4rZt^C=oI+ zhfhnlfhqWdEkkeo6&mE`9T!XO@35b)tL3PrU+{*UWjj5>hzh*NZN{3i*rqm$4D~I3 zr&6Lhiy9m}u=~nZ;y-mCeRN*WeU~V}R-mIb9jlp8zGKO4kg*RZjd6MJ)=u#N2 zKd_(sh4CiyxLrN?Exw}idY9prt?9h%%UGB%I2CJYo&8E?h!fuR@+nbZzNfe;s19Ma+9MYtz%PB1wcLF<_z`(A165Mu^bn_-hx^dI z;(J?#enRCepHoyRG*A4oTB&E~J9YUtrP7rhx09;be-TcVZ2)38)C zsxH5mO;ALY6iaE6oQW&W2HyH2lvn?!en5X~uWuC-@Ee@a94S}I0x0M-#3xjlUJ1@4 z;@vm!ftf^Mt|eHEHXNg~y;IZosN!<48J^4NqwC7@dZ;qQOy=j!?PZ)KS4x$@7rGNO z`CC~)2R0^knGPRymQ#+n=Bje=2Gflf?4`7nnWB~E`SYB))5=YktRrRG%#49umD<2HH+d>Zaza z>o|d0c$73Wf1i9RlX*atZti`gOIp*ogE$t8k>Z)Sp##+Z!ARLjpKzyABVM=|a_jeq zv%4?@=;7iEm`VlgUP$-+ZkRnH6(@#qv+ai_4p8}wYAC}JROKKyj=_6A$ea1^4;V|G zWyMfqr;L{!B)gxj)O+m5vQQf=wD*X1tmBMjr}!CWTd%36{jWF?9Li%z8t5@TTa0*c z6+?|tsxm`>%Rl5udRe^d*DIeq`ae5GigQEq3shOtf3DYVj@d$qwdu zLriDZ`cA!5YgHcg69(&Em>>EbQ~7o!S--G>nn8&;X0*`veC@flZUxyybWbat*29ie zYiUxj7={Lmv+elQ7%LLF3ust&1E;g;C9#{@B;KSNp7FVj?-_xHFpAGMhN3d^-~yY< z_G}mrkdOY%JkFfH7doiEdYC$nN8)&BC0f#cb}Q=A0#sJNXO2_Zp5kY;!#fK@&DU-m zk-hZDa*%g<`t1%8g3$Cc5jq zjJ65#3p-7;RIPpCNI)CMM0eHEo5ws5iO2MbCBY4HC0C`ANs~}A>mi4U`_c}Xo6B3G zu}LJ^?L+0@4(3N)Sz2rgz4Hwdxw)V42DGqeiPB<~xSn;(h!%CRh}N*~+EvV;i=M%T zD#)ovHQ_T_^V<|tMS#%fjMb1`G8084_YXh95kBd{a(4Zf&``$XCxH^`wknLvu`iuG zyq_n<_ZULG`T5%0)9D#oChPg@0DNcJ`lMv3~m z4-KQ9`W3=ylW&HIKpwS}!~H)|PW+71dOdr|&F~Y0#Q@wwIjU}-kzG_WlgWjFd??RW z=NolP-E(uCQGG)L`Hl`jxavaP5XBzY*WPf7k(Y#F9^|+;C|?B>@Ns)#^su8Dx+_pY zcO}sn|BAJuaG^BRF#Zm;(AyF1yQV6;S79yp6NzA^bWpYq{?3fOgIXc(v(r#RES7G$ zG5A3~r!{CsF&OP|UlBXpSgY#OhzFmj2z}OCs}52o?c|n=`=j~OM2WoCY+fL)tVC~K z{t;FkOjPB}fiYaLaW(M{#p(XO)na>g4)9lare7&917$;=t8VU!6nWKLRL4afMlRjT z$&ZaHS9%@wp95bc4e&%7_8dDWf@%jw`ljo4%u0sI3CQU?;tcdOi{SHm4rgVuUPUqL zJ7KAgd@epkM;atcQUYJseuhV1XJ?JdizO|NkZBI`e2m#p@9gI;WYKTvhl_RffH5?sNKm*VDQs4lJr zH#$%!{ev)D&SYe=#YFoR|L8PYBBL16nJlvmWzZG9TRb+diG@}DLz>JaY?nH zf5ix#pk?B=IE`v}sh(T!@wxhnx%zDkQ-2{m@0<)ZeGp;nTEK zVR`3MB6ZZ|FcBTGPA?T-q*SCXXh*fFBO0qS_=P2m1r%;JH!MzKFao87M}J|Ju`Ut$ zMj@x&OP*puqZ%^76PfUubDo)6!0lwWs_JQ7;^H#h8)?4n)WOG%Gw?`7SbxhK6tB0j zig}KJ5iN_WO9zO%*jR20`g`KHo! zwGZ_&lXW+d-TI=UIkt!|h5wn3zcHPHh@<5y2LA*X6GU!2(ihczy}|kio%p*o`NF(m z;yo9I@Rf)~GCQ;DbSoqgdtOY!G6YeKxXM2JLc0{ZDb;kk(Mfl8=CSPH9$3`!Sw(|$ z#dCIK%Vu|FhPMDv{dMIcr{ZBYSpGly>m}oU5*>hk{!s(r-8#=#P_( zE~Vi@302DTlcDaR5{~?wgT{IE{}*i^Q`_*Htiy(`t_n{&h}lZ2_GWvR^AH>theSW`PAkkPPBrNXctkF2=O%U0KkzxJC7$uu zcsfZ_Gxwq`vso|s@h`}pkbfIPkzYD0{;Oy-LKmII#oyn=$knpa?2(wG_Sy+fHD%ff z>Ltym;@B&0xDUc&12ZctRU|TeZVB+zE78>4VIth*(9lnKY9&+&*Waod@-;J~zD`b@ zw2y*^C5o5&dtvM8>ZQ3pw-Cd(m6Vts#{S{eTdBJCIt%zZf>KFaDk)015Ppx?0 zkEonx|7g^RN5lUAv|0YjBWv4c54q*BwAmwLdDJaD9ozEU zTYgn?_Dov-q3pr9{6!fFs9P|5HZD)YZOe3^j!P+pC>}lANHh*>go?tQJDGpB5s#}i z`XPdc&ANMFoG2OINb!JFg-?uP{B>^L|DUs)+z4Iy7c|1Suw>rofas5j;!C!!%~2$K z;%!7&Tcc{25{bqs%oL?v{NVVD58eF@)dLxtq09hRS7-zUBgJ=2D3-XBsnn@wfh^$O6 zS@EifQ>hzS3@t_5uvwj!7IG??xLZlON&n(nShCTbZlWN^HkXAplAq9S+JN>rto}%u ztxM9Gu(sZU{H84N8c#7EDZG}TW2!mI@^^=^as8Lpg|(HH`9lt)dnlytgf;Tj7b{e5 zUv|r>|Ku6IE(OrX#b%|F95;yT! z4Hi&;Qv?;KA6N+GOb#AW+ZIh#W$wr_gRyp@mO{E4*U925xy_p{;=FO}R6kZ1{ZoRDsIL~{2JOIc#*P@gLaLRlx?+`Gz_~5{ zPzl;=4@Or;$nV7%(UGB{lYN_Gdsi?^)M8isYgkS_qYq1OA<+y{#nL$0B`FeZdH+AC z(fC`v<+HaAx2!$%QOZBF2lHui=vTX?{*-3vKafW2sVEzu8nBHh(TPJf2@ZJGTZt98 zM%_gv&10FJ=6V(`tNxr!7;VS%YuBE|eBH-=L!tv#~n6Uyz*&YH}Y%t|uLx?nDJ zQ3>yL`4PIRYHXT)#yG5&b?FK?1g6_M;m!h|2Sx1X#4%m;m51>Rb&vucYL7$CNcx*D zQw+A!_mt`^Gm_MHZ+6AlT{T8@Ws8<7N*kwB@`?O{atmx9I zX|f48dl!34a56ycs7mI?>J5HGI&JnE;IYL_(5t8I6ICB?Pn@jA zV6A$YeNOq7Win4Mg-y1p=VB65D;`Lo?@CRJh^On-9KUzQpyPunUVZ+#t88K@KdLT9Le zy3UD;GCCSL`G7&`7n91e>WBs*9?CWz<3MwMxm| zCO=avwms9}R%?wlR-|*B{EU4E`Td64?|3hWs`K*hqU|ahc;Bf_9JH8;5jv6T&?EPs zD9WU3zc<43nSpLNiAovYam3ovAIaUKP~a}JDu)Izs4RjPpQ`RE$=d=+&O7~FEJtx_ zAjY7IcdrGuqPT0*{*>$Hed&pcR-cj#enc5;Zqc0F~8v$qF($9SjlnQbfv zsEJ~Qh^HisMJim>k_l@|wLrX-WkplmanE*^dh?6D_W$@uo>1*^Q9Y(iKF90DP5u+E ze%f137c;op-B~1Nz#W<;x>+^Y3@{Wk(0*Woy~S6ExM_V_eeYRzn0Lzmi9+HJIbLMT zf9>&^T}cOZl$mJI^k5yOKD9xZd42?lLuBR2s9b+?RC za)DlliJnT4)(!ecmtt%gk3Y;|h8~2G@~rBC!eUiY9&3d;F4E{DC^$h~_0MGIZM53! zKN6oayJ^w3NOe=1PD`BD=Q2#y#n_6RP98bKC~W48!EZ=cZMY@X02i(zzdmC`$yKT< zwZ`|Ny2>BSe^G=QFB^Ry?KPexL*f*!ge` zdDLhz-~S_qSuKN}j1hk2?-u{9VaB@`%^0A*g~Q%PxUq^k>TGe} z`rS@MRdCxL2)&fuujXcdLymG%%k}$+ig?fa%Xo8$CDwd-oyx`TSKLRR30xQI3J-fw zF_f;tDL-n_Kj3$Qp$*>#agT(v+>RAH(b%cO==;E#Yd&^y4?wK5|DoObh`GuTFV#@? z_GaqZ>I+%e^W4A)wvE>q2UanraS@8_kUj$=#C~TMKUZICs5)-fFgD{SD(9&fX;1cl zVQ$NAYf(zvQJ;%WPA~6Ha_et=iS{r$d@3QVQHmjpWIa|ZS*h8jCZYogdIVU-j1h7p z)iKvb*p2Nu)?1pS4LZlhCKJNf#(sctPA+z-yPHXT%f$#rNUloJ57kh+FFmoc#B;?} ztxgd+!Y-#9$R1(|j-<8G|JdKDwam}iXNtp5SV50PYepy!B^f=r5PCCe$^gg+1(%6Q zL7r2C<8m4mq0>5mKKwT4&4Y$@$a1Ud)JfhJO=KtkZ0`Z8pa$y~yv|FgGYxkZU}kU$ z=hm-tQ5No@J-uS!xQ`evNbMuL8|Uo^E4g_(oLk}y0>`e{aK(}l9GZCyvw{`971RPu^EC{P;vYWD+DimD zOAQ_LRlB!7{~#0n*&ZxmVQHz>dN;oGchMWv81;kNC3|O{#~03Tfxc=adiV~(@Og!) zzLg7nJ^C>Dx|M9gejgwiKst_Ks z?X0Z+qOmB;&Sg9MHHN7oR8x@nAS{(n-C*%|2?7}J9HpX&vJZ;es)+bQoyrX3Gcy98 z>RR-cVYa2&Eyk*AM_SVZD>KtEz`Mp<+Jv6~?7rWUXees?M~1CcW_7k7Z{p8CTFBrP-F&qU&gwwn=cY6f$TP z8xAsR%pT6CsO&%v)7uMzpCYfyZU{IDFQtzuZCmb1$>rL5K`@M#nhCkIM=%$o0o>tr zA(hkB(3|0ocpc}VgL-XkRVliVPKU?(odUjTEDO}sh2#Y!SPn(oWt}iIC(HB3B=TCP zxET^%!vkw|s0BX(P$=`7l<^lL?9KKlyDTmtyR*hKzJVEft~CZZ_38(E83Pj8mb;h2-^HGdFhWeOBJA-RQRXgC_h(7L(f+y+X z60M|-vITSQIpQWv>wA6N-3>DkXPpG8Sz;Rw>j-P8?_eXYG~4?J~G1 zDx`f*Tj&peK6^QxVNZ~g$|;wxSuhcGRG4+av)M(XrQ`+cf>QQCeZX0c5H88WJAZSc z)GpsO(VkJIwS9w$Y6Yi3v$&)7r%Ee~$HpZXaiP{~CR3?ab{4JR z?JZ8V?UlHrUwCVa5(L>#RmBkcTfBs}I~%>kVVT6__CNK&Hw0~-chM_3Kr+(mi+Unn zzM|^Z*Y3P(Y~Y?&)YTrqNfEF4x->^|xI($qXf6~hU~Y)8ev=chhZ*HM)t)}5yXYL; zDJs(h8ZMeJ){Zq#xb#NmO0g)cTG2cAK7X`+cB+u>AzI?TGu`M&&%{kt-d9v#*5U3( zw18SbQXyEPKku|#c-(cFsQ1vyD1=JNW_X%FZB>>3w2ErPUDjU%LWV)jUhbeoA#{lUQg!RMo_I`X9|gdn1X?>c4_P7M-^2vTob8 z)EE>q!^5d8>M3EKD(JhxKjBS19CP`Tb90^7ZVmz#;CD4uB=SQME6i4>J~h8BTtyxDoe>?^pogl4WY|u?Egj`^_Qo|;-qK4NndmRljmZT@ee^AY=y~& z_S?{Vs-GF=<7OD~mwinwH@Ag38Ej)3X}5mj*;klqiF#@PdfGz~^Rl?C&K4St~tzN>8=_sb6F8AKnzz4+hujN54Hn&sRxm01paMkJTu8wG90Jq&mqA?d~ z%=Wx9>;<}lHI2u7e6Aeb*5;9QZFiytcTaStPR!k0Qj=&W6}8IJe71f~oh8(oh5Ifq zWcUmzb`50eVY-D~iNa-HwUmwoOEOS=XJmaMAHt_5+uu-UTF$`ks4QU@Ce6>c4V)t3 zOMNU3*&TV0YBT&ON{Y9h3N8+9tKy2m)q9$^7Z`>yO!Y%GHaR8|5UWgb;}+eM5t!(y z<-!|EqXy!soNTvXxOO{v5BXI%Pkwl9$k}3oI>DSs6Rb>66#kBttm+KT-=L;gjGD5tEjJMMzYightXtd5`(jWR**lq(+%<0<%T3 z_>3L7((!as0Tt<%-9VQA4YOor=NQA_+`5P8lU@x!YPXO3{*v?B(aebsX3url zg_hPt@hRqW;a)d~INFX!S^8Ojz>UyIw)%e|iS}TZPKKLpWN|d`cVLfG4l8g&^|d|d z#tj1Io@k6jEw$Z#8Cqywi5BPSNA;2vKEZ(4W+NnPblm4FkbKX3Ka@I8U(Eb!7WK*^+JsDTzW{f?Sl7l() zF?)_4i`yJ&H1Y1x?1flH)A?#!;54-QJakSB#MZTY83dNrmR0Gr!kX5kt*4Bo2|eFB_ae)$dVuoBqbufp`B#-$N z>siY3(OG{5+O)Yi?o)L{rvHo6vN(3~udwN7(>JCkLg|py#FAQI*>!*_I4KCPHLd@ zf(5ywPEsYv?;Mby3GVdJW92M5NE7j!ETx{8u4jm%#zGWkz|?bYH@i;OKsq8wKE*CI zDt*3Fg&EBah_iF}^Pp(vzn-No)m*gXEZ#vZN_$O5!TBTU?B3R4w2hXyb z>8f}gBhT{>M$%`DLXRnu7mU$1mt$~q(LfgK6me7y3iLyym?=KflM#stC`va`Q!V!# zcQG8*q;(0oRP~@%zU#(zwjE34BC8}#R*NasUImx(Vf400HQvc@&T%n(h^ImLn2u2= zbj#xUB5{U_Vmh11@|2+?XsT=`$EfV3tka~I62)4Xh8yC0y`KK@r?YCd&`0G2G)%us zP0&X(y}*FQl=WjLBN1!(?o#bi>?J$fuc@Hqk+w!GUBz9=>!nauv9e;G^SgM)inxi_ z(vEXcVaeHKSr3CqhVGm3E>C$ES6KE@f2fgkhX!Lh>)ZL@KSn!ht_m_t?kP&zRc(b6 zI!#a1&qE{B8daO`Z4{Vp<}m-FpD9tS@!qkRzI7h6UhgE{JBk3~uvif@cPkfdeHEr& zs{+;)NsNufnTVX4v7t_0M#>&>N=t%@=$#ljf!?< zUYE8_ZlGiM4u{pl(D2YIMhAE8=N1#9#tL>byJ;Z$&?+%YmE`njU*u8asEIm@<)R&0 zp)lpgbafTc!8AnUkQL2EP2W%oeqx!gWabK|Zq!P8%?gID9n4|Bq@`9%dp1mP{k*fl zaf|-WJ9hy&Qw^sW#YV^roAl%6mu^-ADNC9!v%6x)n=`Ugq2C}PcoUaJ2D2W`r5(n@(#)_Vl-}uehLUl!wilSC(svaWNpaZ3$i}klvNWRAAU`tucXh4(j zR^1SV+6Jg;DATNOIKSy|SxZf_MzUqz$FEi>yr~=#3T?kR)qTKk7w8)#ql1{47xo>2&Z8*5ePmA(rA5n>J;gvu|)m zOkD7Ss7-OAEP9$-47G_>pqWTY9)VHfg(`qiG*0bdEOQv6sh}Ow<=_=9BB%A#xe^VaSKck46&mcR6Tny1ujAVU^%AhiS^?#r-_}=h}c)XEUMIPb} z)U-!-@E22ES=|M)w`pYW@|qZX)Z>p}=^p2P!tnH@SY=-0odwoNKBr}>58U1r>KL!o zdZg$MbfyvYX2-($WkOSYBoaMx+Wp6(L6;Y_ygZBZrI$KOkoQMou6oM`rmvALD-3r|SD)DKSe%;e}d7_YhEd)y?2Z#-2BMoX2Ut@C0Lu z^MIYiI_?-v4bBybM(ZFKSFw%!1NT`aXVN{|f>Mghz0W>m6WSWR%)CZ=CnPzY`65ct zQ&=jyFC9E7psIn7)dFjRC?N-m8BSij35~Qii0L8{x4Eq3t(tBqvm(#Gw(^SZyB^e> zgSBET_99E=^Ig=PC>IS%`zrP~Xa+AXbDJx+5M?nQzN=DXeIEas*QMHFua#YhI1~+) zkPRt<@AHFio@}Su==Q3RF+=uXiMcPTSnfazUZm6-i^N-5$H-3!=IO9NWs%6og%8>@y$@YbhPk>ol*&MT77B%; z{~sOA^7m44fp%D*kuI<4G*K7Nlb4Dlh7xyhC%Fbpx-3(~AgqZQXJC*DVutFVPNAfl z7+6c$eI7Z)I;Jyt!hhf+r6{K;ApfFisQg_wlwf3YAF~0;h#pS*rvSWGCdf@s#=fVxD|?e{&k7YPDRzr z??RF~rW0aY=pgxYHS>y#?Z+2fezsK&!g*zwD+}GG8 zq83J53ADvF;8y8+0Q<}udOSaf$G@2Ssd{57daza9;Y?+OC#WlB=%acDwdU4=8F=ll zX63^NaX9K4=GtX~EjSrk8b^b}gPYC%hWl#hsu8VPgvuG9J2*sMxi2`K25<;D+W)iD zQk3Jt6>DwYLN50EBdNQ(q&G@esE*s9FL5$`F+XJP3~;(alTuS<8IDie`PXr z4~Pi<$pBy@R;oNm)76BJp~FzxuG+J)Dd~9}Mt=peXDOa$k*$WRv`D3?uFRQ_3S$R- zUa!}^DGw>u^>`uCL~gV{$oyR1Q=Vz+$L1pLch;-b0=-rpw|R`NeSx)nippgb7Bkfd zF+hIg&p}_~jMYhS`L{D(PZ908sUW}ACs1TG5aC9X^$H z#6P;0(Oj-p$HYJKy*V)q577;u_|}Ra7(^G+JZN1vb;9VlzT;nF{ ziCtEjQ3L}qLz^mGtn|nEE9*6)q8t<2h8G%s4t7+2G*=U-jMGGXi~#Cj1RN_2&u~hl zv%9UvMt>#G%$d#qPH+x!#$aAd7d|^)drPmVr(ES9NQW`p=;6#zm5_)V{BV6$FC{qc zS)n+p^|O*Y8)yQa>TkRk^bel)Tw&+0Ac&0NJYvb;I(asuh^t~KKB7`_7mQY%h#mq% zCCb4f%+53x+qu+uRZsmy%ft;|RrI32Pz;&uZ7XN}P5m&RCQFNx$-G`kROY|sz7jeu z7NIC(wgog&4y7pj0e-=3Wv7o3VW?|X46}K%nN@^`?GD2|If%Yv?a!-n(rrdqIe8%y zeMh^UW*8C7$3BmY#s1Z1M1)mL@RC~Ug=e8~ohml6FA6ZY80TNjQvo&>u-sxZ2Qy)e zI#NV0u`6~4Zys#q62xZk9zLkE#sKF(DvQ;8qKtd&9A1HXsw{ui5$r^F#{5y}MmsTq zF}4;pXh1NL?gj4|RicVg8Xb`_*cx0pSL2*_I{`dtobT!*d`ls-zME3$ZTfzO3%mVV zC^X%(-xOp^+U1@F|p;y}DtQ73h9fF@5?>WY}j~}TfeU|>2-k-L^Y*0)#!&q#_ z9Bqk7=3Ey&r3Y3Q9nS?j*&LLi;D8*7GzRaDo60> z8JK`?eZ2LHRnW2aWY;+|oioG`74;tTZH#@9@!Ch61!yNO^KTAf=iHqSW3v)ysB=;` zPKO-iLU)?aes3Ckvel>)8i77?Jnf(s)Ymg8oT?@t(j~CoUy)sIELmzbz3|>ut%Grl zk!n~;tn~kg*Hk&Z0}O6h--)ugiX>}4)_LPFM1H9|C|*6wZ$FIbmM8q+K`v7}j%i|` zx|dbMevKW}%xI35!KrL2B1CiXP7Ni=tMBT|$M~JE`mtyvE!l<2_=l@;VyYe>?@&2x z)zRMXB=0B-<)sa32VGK4s0zxf&HOjJbu0a;;!S!K#(AFE%sRE>Xu?o+l&xj9Vr_cgcQA;DP>k1EOh)=WjrHW<1aLyM289`RONXO#4b`Y^re_)-7PR~Q48 z!5eu{!rv|A7VB8{%Tg;0MiHHbidIwmC&yNCqOmN;Mtd0t;4zqtD7ivi^7&ZnI`I|U zV_E-GRInBrW$Cd#5}b!$wU?6ESl6V6Rzb|7LZLx&>4kSp3``Af(k^Er2HCwB%V|%m zQZ!Fj#SL00S~DJ;Pgm4Qvu3#Y*g$99!oQb?RK4K}+np|EqWd{xR5ouo%5Fwy81@|U z-=mLFf+|X05MbXz3VJxX5KVv42{>3mwbeFMV9Y*|sv(31h8Iax%Z{^lQ*9XO1~uiw zd?J2TxgDDp;*ipYPp(y^xsYva);DS*P2&^UhWY6=sDTAXa)5Ib%V1gM z?R;pb?y*}hK*Kl{5v%^A8a%-03|%yup>!}zzrtvox4Tklvw>lM;K+ZW^3x%@D`s=C zO+~vYI^!@N(*XOkNAa*rWQ>BYFU~$YPsV%i>-F+b~kJ^9?s=6=L}K!Y<2Sn1QiW zP~B5KeP5`l)+o<#mt*KwqK2x4C75mVwOXaQriOCz2kllf?V3FBO`@)f!#jNlQ)meD zTOF;<_$GLo{hYyT?h@JKuaGTi9%?ap(#Xya5AJKyTeM_iy^x%(W{T3zJ#-gc)a&4S zycCP+AE%Jchh%F!EWsT^?58{|TaEeis`?M}+_K^RhsIty>nlL7X(;9AC~ScGn^sXC zG-Gc4Eyb(d>Xtr@57gD43mv_)_|7UBE#(?-OR+~k78Q`nX3=9u(8W`2lDZ>J+^ zfjVyfudv=ly}ix(aozMNrn;f#%2UXuXe*-(hLK<=(*WPIS^gF&{ueX?UC&m+SJat{ zCvP$qD=up3mMY%rV$~Iq{`F?gu;3-Z!`_0ivIp-Q4R>~tMd7~&C^Fm&YN$IVJnthnS!bJH5?q~5Bf$R~54ybd_c=#l4r z0UQ>Cxa9YEvKy=Ac|NsfFq5O@0jiEG7^Q1jE6~q>Pdx!CWMYbk)b5?mvm8q85I)cTz*DqYL@Bs%pc}sHSvGK5&Anj&FBVnb_jI1-xpM zoak&}Ke5$Z6h_Z=Bb#?DLlDUtdZoI^=*T7>o~0&dBp4wTO%JJ-QjU*ys!}SCd>4GL z|DaV^oHhf$VI8fqW}_MJD06dZ1MfJ}PH*(ojoUUbXBs?rcs@Q+X{za;?!Sh9`Y~ILT2#uZN0U6Q3=Vw2H?cbOGs|dwD$7pu za;R^xjZKWOQJpx{{qYEQcZ0M}fMO8`9E=1TG9K?35A$`uvd5%*_Ym2Ur z7HX92Xe_V|bWl~0LdEgGEa+B?X_~lW#F(!PR@~Vv;t%m7zJ_2$8)LuLyUm$As+C6@ zTI17!6upV6&_CdMDVpyzQ^hfzLz>Q5&L&|b+_H(j!_KfYs@hYj4`S3ScFd!LJ2694 zqE>WMPoX80L$}k*Xoil|ZKy9bQr8g2&pOCn8?1&X;xQBWOHBxbs0?*w8E?WCdN+q~ zhN_DQ|6LiA6)?`Q6|Rnru>8B_U|ynk+dPn!)+06ESC$qA^O{8rhQ2HH6Y4HE zINQ<4aWI@8<(7XrzBQVpeW6*XF!t;v5sdu?^Jybaio;aohDVq~SEGumG&WD6}Kq%gyva0^A}umGp&6L;oc- z8qh4XvlgkLoUM2$*Rp1vSGnDPqK~r~dxCY{>VJ%dONcuxO|xQ-WiPgStUeJDG)5G4*3ntC_8((ztf)Awf3U_6>D)?re$(e;QTc@2+Q^V#@t6#&D=zgjz7_eZI9AoGR6g-L`?8`|r13boS6tzklxv3*!PBbP}mOGuWKo_(V zLnbse<6C`ARdFsDP&v8hce|_}?4w4|brmC;P$7zdNo`fSor`1Ox86k-Z%bEMy1wkg zc=0u}6T_W~!43Y)Y_E^2C+;oOLM>wi?G)id=!q_}GQ$C1TzMh60P>8db92g;>hGA)0G zW>Oydl2;lIwk9~M?4t~Xi_#T2(0E`!p>Y&chb6mcV?_E{Wy+!WmHS%;TC=!h;A<+w zBWC8&k8IiB=$jPFb}Db$ud|7WG_|52mVFPBYO#rIMC+ST^%;PP8d*t37P=BORYTPPWGul(W~;5B?o$ntDw~ zR1y38^!7AB54Q57h}{{mx%ODLr;Y3!o0&_Z#U=r>TlVzEOTj}Vu~2o}%mY9Wo}Pm( zQ#Ba+2Nb9FSZ|fZD)SRVmZDu8bBE(Gx;r~*3mqliX~ve~I<-=p_=*xSRuyN+K9XkQ zu74C-Q30o@IO^+;lPIKn(LDM{H8dKB)^fvp9%?MMs+`P$Oyzw(EnSLR#O&vYW@TNC zC6rR&R(d|?1TB+gaYcUxg97P(Jfue$$Iu}jUxD;9Ybd(9gVYvF>1&3ols zsHBR)YoB(0a$m$7_Vt4VFN6@oEY5$}dkx-}i+A>qp>K2!4(ysx11iG5HCp#Zv|7)y zR*pTy#`NL7Q+5>2suN~C7xh(p@IMq#Jvf%&0uu~Nug!Mk1eG)t|Ln0Si)!2@bpn%< zrxoC}3aI4oYChsc*;a2|NjB$MpY@fS^>t0~N1YSX{b!X=Z1(=GK2cSel`6%Ui_DsHvc>v0oMVKk6CMb`%- zI2G~ArB-6CQNL%aHi=~^aIB84b4(tVk^BA-&>;p7-6S|7QTdT1QP_z|6?L{}#li{?P{fm)dF@+KnGuNPV zDui~vet3&^Y|N644t9>TgHy1bA89RWQcdxNx~5CQ+}p~@og{#ys%Quz;@Lfb16*!s2a#uqOBU`A7XF@flG+X%Yuv~m`varoTa)Vd6O42 z(VpUeZ2su7CmOXpZnq%5?b5uVkoFlv@dE?a5$HgioS`?EsE+#2@q+}+pWV1bN*BOl z>d!)1U0mVWD+4TEO@~z&z-qeKUPxm?c}0KWu>}2%*N+H^916DqMYHIfUVkm_q+=E!zS@L5d zka2g`KDtiDc>2u4@K8tDDs!{5klNBzY;vZsKJC@<=wtOkN4*4@jKIh`h%2lN1=zO7 zYc5*hu=EEcP|dlyRWB>aiBeCDWY*n+stY!;jV+B>_e6EkY;Vxt!QW)(xMU8J*Q+1p z6Ev`AG2oHGPQ1}J51&drD|iwj^j1FhwB)}LM-9|LI)HDmkQ?(0;dxpwDy1glU-O~C zpPZsH$x#0?np(^eQ)PqM3~JAb@PdqgXHreJ-G|jUY@#Bls6WCX(a=0#pcn5wTJ4Xe z8Y;`qE^*Ec9jI(`#9@EzSzrjBh>P9o8aIW=O1guZtBUBY=u0=)Jy*2nWGzB`+i)tT z9aU81W=F;i5e$!RW>_=@y>&gN_U0S7!$U4s%LEFi1ZGN3>cS#Sjj~QrPIhf^7$K+2 z13JNTER4cv16rdmyTxSH6A!Fk_0+Zk{$SDIZkh+TeSkTLheit{UOwZ1wGF?_MUkW$ z;va^5VTfUsJdAJbZ@itqjB2=xkaDogKSwyK8CLuHV-YG-3wtS6IUccr0fomt zDwZY3=|ACPjLCC>gQ*-E^pzt#2TEbSYK%Z^E*g$Viorf#V_wf*&d~+PNy6CJ)}n97 zrp+uY|B|q;prhpCStKT3K`ARQ@~iBULLWcEHyj+M>$mEn9z;)A#s7uh`w7Q5nN}Q? z2zgl+pWp)kgfM5*g3N*`o}9 zXb6rV2JO+@o+0wF^vtz(2j8=}wbaMC^mv_TUpCO2ZBGuJM?xqEBi0CUhDuwX$=vEQ z<+0|YGp+Fc$$Lg78+b?i?6%fwdo){xR@(QdCDPUWgmC(j=IWyH0tE6w5dZ|;8A1#EUh~PnX+HuZS{AtHz)WTK){e_Oh zmsFDZ%}VxcIT{tc6KN+-;i7$-$5_*Jl|!~+oU@08@V(WPU0F#=r8lAMW`Y6skJfg^ z<>BUbL!BcMZ7hXl)Pc?31qLut-s>C;x4~v>xxrA5B26w79)SodtLAW|sFB+`#HB(F z4UDBrv{ty-FCIiNPrrzk*rEEoIf_9(7soG-C`ogGM<^*k9&m`D)63 zX?1LKPYIXVjT?NEGeYglCn4b`Yha=Q3bLGW%xJ(ICZRf-fhZAVqbU}Vx97*QK(v3bpfdiaVyqrR~<&_%rD zOPTGQ*L_tL>i}(4SD0Q%bgy)}au-Kdv|UM7LIrbY_M}rkd(YUw8@(K?kPMj)sw_ma z`#o<+Jd@9GUF7%GpgGP~2KbhWvMy%aMinP0O4-@lXBVR;!{SwVw%KOJ@jrt;p@!m> zzQ7K{3jV~qL8loTP+p`su|8Yf;9UY;Js7Hb=v4hwxnt(U=qfBs3pqg{tMmKk(FG}| zRfH~D*X`d}<QTOtz2oQdj8 zpn$5yXs8;KlBr^}`crtV+DNDT)Cq29gxz1<;(1Gb+YVC$L|JOWa~K{v;Ej&UL_1to zPgFI;Q)gVpL8^!csyi9z7hFnN&T*!f24JEtV9g0kM?5!e*}Hl6!%iokT}wp-K1Vmc z&25ZATyzWX?OEJ{;TE8{bKZ6dsWWLr|96RZF|@Gxg-_$gPK33sK1v zW{B)TEwwnbou^y7`>2v)FAc$48lvlpBkVzr;`gkVOa*slMf#QD<_M$;!~YZAP-#d* zW$eckkx!OX3#v@XK~*Tq9F$MAG*1{*U(dDM=^j|AuHub70upq5=fp&01?Lo28HCM)QksF@$BTBp1|wPR-S5)xzdMObpgb zrtk8&!bPTjh$R13v6uZoMQy7XSsPu`1YPr{0d& z9HmC+5Sqlya2l(7U5>~83KJbfJu!|od?i|jieLzL4&N{+361S*s0*Z*7auc9FQbmB zVYH2#(buzQx*Dv2X==GD&+*~!_{M(^ZGy*~r|Kh7OTC~^nN6!rNltuL(X^`^jVK=Q z=ER!`;VQcxFbb0e7*>?9Oq;8yu{mpxNTi42Z_f~yH5|)T$IuPN7c=QQF$!0bxkki4 zK-Jv514$I+K1W@cv~1^W5nq$RFVGNW)JQBvIlj$ZqLu#=yW4-95^OCyvd?HKq7df0 z?Nm}sp6W=>UEUtvL?r1j8pQT3LSzT8@J|zE`S+(|4wr2X!j_ochTLqljJi0yGVMbI zC#bkC5)tkj*s0TyJsadpl|yN&&dHjU+NOdKY86}lF~L*DC9&S!1P!r{E@%&xW0?u+ zHaw8D5kFRhsA1ppPPPxxZFx(isa@VI`k?OnpPxNIKl8bCQ@hh`ny4nCx2lb{Y+Uc7 zD|@Mi1!kyXq4^nY#WZgnqaw3B6|-sFxis4R!6lw!AdMGy(Mw+Ab^z0Riam2-bAJR< z^-832{n`j7Tb|P^^6C(WHcb&ilG=|2^rKuRE{iNW$Cs7B1})yR-(_@lHw|VwiSBro zkT6`Ps+_wnYxOXivIHy>pZdAGn!e|wI?S`bD;R;LY;zwv^Qo$R#%vevuWMVssz;NjdZawsS|tIt;Sh+=Fvg<+taesk~;lVY&E0UrwIL z#UUfbCT^`Ctd5~eV3Dj$?Q{&wYqGJA*`TjQ4UXzAV1Ya#PGL(JQetUWoo&L$GYz@ww+9i!|Di1GZ)tB$ZmjiwZug%bEU+xLiQ zYAx5WMAlMQsZmB}ZxYi$CppP~nx>n@TrtH=U9l@K-sS4_{p_we0>#g26wO6D6|$nd zM`&5b1x^typc8Vwai5fNB<)lGYc+woA(m=qG`82U7)G&yIHL;jkc%V!9I6q0tqKBg z*s1gl+(7|tu+q1uW!Pm^rr*sUP4tvL{+jn{5IfT(eUEy;?MVrvj!tKt5Bt?eOec3y zKgmIMylR2&<^h*eUGsbD{`@iA)aq~Mltx804BM~*u}&sFXT-OTt@a42$9CZ(v}Zx9 zre5Mr&_};Bj}~FKUeR?Zk17z8p|Y^Q>4&R%g(@5LpD7Xz`oDTj^bW&=IhD*VfAPy~C>_Wp1^9;Uu+@su|EK5b+? zkxbk?Ydv(fW0;VbZ#1G~if1IN-Rd0_=E!nvFt0G{IfFy2MYzGsMt8XIwV7(LI6cQW zUllP@7v(H`33%{7Cqhq3|57dUAC+~yrO=gj$@wVlY~y#1$41dYbP075Jmpz7wGr%R zHZvFtmawmL>%xR^u5k&uOHdVEG}PUDNn8}s?n78XJLRe5qAJxG7MzVPbelnC8R|Lr zJKki#WZ1|wvRd$)*5`)q2tgu}4QVt#<~ZEs0AVnj-FW)Xd&-;@7JO}RRkkQPd&lPp)C*4Gxz(eei=fqmsO2-*1ydSvm z=$%JK4}WWPkzc8;WXf(>Ans>$z&;eh6fQ$3kL}hqe5(%y`Jr@2JDjP5(ww7Ar~W7a z?wz!`6V~&4B+pezP^Y~`tq^6Y2582!BWsE!Yzl&SsdsVfWKVTX6a}vnN%y6f$4z8< zYf_9pgAS-gz4RYc6;aBhQ*s5Zr=A$&kf?&!VkHf-iyDXRbBsUg>PY=POUF1f)g}6f z-*qxPV$SB;x(iyOu6I0>1nJ@efAt7f-lAf0@~`-wW_rsrXXy@84@E)Eh}b*AQ#2eS zjWAgZz0_#d+oa^TG!g&m*=mufr59rwyTdEU-mTS?u}OrfIr*mxF&6Zpka}m-jcIOX zL|_3EjuTZ^D?h(frHoyC$7ZNB%Ihcv{9%{VUsCQ^1x|$c5QxSERUG}*e-S02e|fkNH~x***{Z+R}OCY^#u55(rSuaTdcp)n6%PZb%uBZ>x6 z=zzH_lIH9B)Qbx+CLKVXjigi6C)w9n`(mfqM|f zlvn>l6$j`mv)kdCDl4GnfO)h(-GwjYN#B%$vB{rd4$rGPdOo14f|;m1Tk1 z<(ccUp3$7)g^UQwo?AGbT}j8Fpf~gZL-B{IZhY_mBvgq}Ln@ZiYwJ^Oux2H(g4SZZ zb(T`t(nk4fArB3w#!fLl@H8w(J^Vs5R7Hl~BfupAXyLArIjR-rz7$bi7 z>~OSwx9s_Cx=unR2KYsW#&T?pvj4gME&kxLb-B^p%x~NOe!HV$QZ3 zEuwtXk+mrX(*>-DEb{+YSL{#h^=b;MW;4dp>VpE-m*-pT1MSs%4RD%ey{;h5fQ3os-6(T&RNy0{LFhS?G{aZ`09%%j zyHrhKK!3AvBm;s-eMw2H6G8njY1UUQlt?-m8Had%D$EW`O*e zyb?_pql2mmHejv#Ml8{1j3pS#b#(vG9Q?$#wTw>302sC#KL>wMedHJwZuZMw&O@F9 zZu4X~W{9`?ui;gZkIiv{DlfO#;kvZhD7*9`OuVxis-r9my~HXJrTg(acA*m9hhl;* zP7SFwU1uKjE*=AhOKs&@Si`B1efsbXK$LZcu_&Y(Jo;MI12<^%b?w4nb`W=&;qc;? z?2Sk04He4Mbrut&DCEZDsk@eqxYoe zzzR{`-^^+lBV6)DsJcglqXIp5>M-q-+dbRprXqs7=pix!kEn%w%5}O+{3-IhGl7+4 zk2yZd*1~OnN-jETRKjt10{u`|EkjXnExJa3q9L7Ax%m3}imQ5oIl&bw%Gll51^*t~5sBV#^K&=gkmFU4bv|zt zJn7rXrEA+|EG~wJTXn}V#j`4?ESC*7<7(lbXasC5G@lsC2~-Lc=5*w2DiWHi=HO?# z%mc={g<%OD#5d|Qj+`f{kwn=(!zEE$BOKsE#=8FQ+0n@R!us zxM92OS8_6QUFSKegN)=btFy&@ltwMctPBB89W?y&3yPSWqzHWh? zX8A~amj8kGgD4o=3lZ4G;05#&7paQ6jK0CU!ScGdaWLkp$&D`wT&3QO##;N7#hY~$ zL&QJO#5#&wlp7oE9xMT+#3@Fy_f;uM3no||dB>f|nj9r!QCk-Ae?%8(m1jQRAgz)q z=*C!j6@8%=v16LZMeRSD`CO`$;vOSvXjXPPw?!uJ@U0_)SzXyFZ=pQeOXsYgF}%o$ zF!e1vg4}wN7=V8*KRxj+p%Egx4I+1NA^JOGh2o&RGv?SEts->Dd(Go@iO2r`F?u=? z&cE3wn5AAh`}G%elM*Pmc_$oK9p3SdqrUtZrTv>Jjg-hh6O}j6iM*JnCoN-LdsWXG=*|If4ra;L+XL9W1*kr|8=JB0Vp=9H zh=b}8HKJW~m)Gp*O`P8=W>`wa8Cl4yx~4zn2CgkQND1s(H=z@ar0q^w+Mb!>KdfTZ zNqZ%&q9WE=YoWP2j2@_m9Het+GlSp7h*!y?K>A(|)}!%>ouZ!OYbM;H8S&Q;>s4<2 z?i^J$WjCi0bwZTdBu5B$+!$k|^%Wo|VSze=$;LAZs&F+}JX5{B3C3MK73VzlT{6jj z(Y9q!>d%$P%8N_#0qrM`{0@oYXPTzUARWim5pzd4_gChpin^dUOPP4>b5kk)p$}nIk;Z9;NL~G@Cc3tt=>p8LRC^tn;ri!^*Ffa^aP2)5lhv{F7})yxE=I zIo_%RVh!hsE0IgAqJc(xT0+rOj|%Cc%&|PN%CgJp?pzY5olVGIDmw^!y~3lnjl7U_?At2^O|IPD)qJ|=qO)RyF9MR?-VD#R0rnMFUz@D z;5#MaITL@8=5r9|=~yjz$=%vXV%XVD#L54x40bNO@IJk=7|hRE%dT8O{-aEn7*haVYF9kY^9Sn&M%3xp^wM4M`pq!M) zKZ27!ey&98ph~#!iX_3+t12ArkVVHR%70LumE{>_oa8dCRHGD{A;|BzpBk_?+{^-; z!lG9lztJ{Kw=!ihO3<;)yg)W$SXqi!8Q(WV6h1*uiVQ{}gM;}_lnMm)OQJ3QC>0EO zc_ilmRZ{OmXLGAT_qhm&E%ZaILO)rT;mTcSk{GJ?vhY4v#r0oyw)%tq_BBfX9W4UG z0<~z_=7S9Nhl#Fqk_A}NbJTVQ(^$5pxiMW8q%ZV5#;6mT$U>6VXv0 z944L7(eN6hV;HKsk(w2$tMDza;(5e5YJ~uKmg?Xk zZQiN87A%iTxaeh4w4A{LA{znq=Q@8*Ps;YM~B&Y3|IA4Qe@m z?J=s&haj{KgX0LEo1}6iZ;dU=$1@SbXoB~Vs4Jc@LwlD+qEg^XW@QU9=X{z=?d&jn zAJw3~bir<}qN!LgDtL}f{yfoud;KSKZtfU9(M{E0juI@VqP~v=Dg`&MYxtaAMvdV6 z$etA$OM|(U@}Njy`%w^+Fhb=)oPA17#&-LRs^nDX&V(o0kFrKHr>H3+)&Eg+5c|#*PmDdpzFrJcVk?T;o*KkbyP~#-*w<29DbqHVmfCm4 zC|Y!(O4nce`{s||=SMm+@7;UO`F_v-TK7@CjStA{S?Us3L^G1X z3Pv6&d`5_)tsXppmK)=_8nJ%Y^D2U#sT|#clI1K8Ir#y~UyT|Ww}EB-)B*lk*7U7* zSG514MHJ0lXpSsL7P}kLLP?Y`p z7jDhiP#SHLOP6ym(WF20uU1D8>mBO%=Y2($%n1=N)y1K7aq6+I9!$XXyl;25Yl>Lb zI#vYK3wsh(^?sv-EG$>3iFDDvX5Mg_FI`khrKy^nV1K~_wDK3&_9pjjYn)vhaiJdp zp)af+BHjBq`xGTvhv-we$d2ANLPcAN$febRC!6M+unc&&snE5_V!f`xX9I(+uCfO( zYNX}x0%I4i;Zw>AmZpO=hf4BJ8cq-GSZAxb(8ayPQCed5@#y<7q5^2pL3E^33cIV- z6?TONa6MI-VKlAZ?K-^I>EfT3TTT{*3o}*Er#q0^mCiNgG1FXN!0{?mbrQQl>~H2z zqN~birBVT^B6e8Ms2)^ti$;%uXINokr!!rqmF87L#HgXvPpqO&C2n$@xjiyvvhGRc zL1S(J_&177yyiZCE*BCjxCNDm%m1hTP`sd@{B^jO>ZksqL*VF9Y7L|C5y2Bf)TTt$ zTRZ>{X-3)9!p=;IRAuy+@*BQP^Iwmro4UWeL1py{9^-v3?(47GjtqSv^X#uZOC#J* z!9BWWO`vMtsx*T4`sbTNF(`yaJBs$=_}!tdvV>m8N3d@vu`qz=w*FZ8X`3}zJfdF} z27_@9ozoeN?Z>(v_wqN>tLU!Xh(1YqEuK+98o(dHDjud(=WR;#$BPIWrh5y6YC4DL zrbx!&6-7F0yZ%S*QJ>9TY%_Sb{QM`*+2Y-W(^wkx`e4b?2bvA(*mD7kit9WFG zmQ$9mEFBMe#2++<%faRhb9s+a9eaRTA(ooy1bms&X(Em~ddu;@>Q<2o@+{}Fp~$I)?5KP(_o>nUKDb+1Dj z!AlkGp1cKbUyb{5S#wkrqQ?W(O==dLM0@Ce(74}xi)a=(@WB7ziPj@X!Drk9M`Kwp zRW{WS<(+2s{`^mfuIfu}#p~cI)$o?{f5m%5YuahBPST4JIyw;?tJ=WuHVN#f!8Ahc zq(my^e9MWkU(rn!hCU*A-@*SxV@kK)Hp8O1asZT(t{}EsUUs7YoHzVhB=K=ko;Lf! z{SCPuJTx~i;|h8qou%W{P8Fs(B3VsHnSnK%R4wqx-mX^BH5lL05RWGO4URH7FqKx? zQ~k4?59DH%X05Q?p>Vo`FhqctYT=~Od@2D((v@!5$-E^0hSe$Rt>WpHe+liAPiU!k z6i&7~SF}_7z5RE^d*IK_%|(XuK&^oAH&QWF$-kB29fg{mTxv}SrcfHqwhca{BJiur zP%nOJza=AmKT^-wL&i9}h~7)f&Bq42)IL@7=_qrW$c9En}S@bs5SPN2nI>=PH4%^aSr# zksnB$2`B0(YDh=nOMirin=dMg&Q7?shu%@MR5)j64eYEgoIrGE4s21TsMgCQ9+>_U9RRyesV>EZL+bS=qE1Kkbr5z(eCI#G)%;&Pt|Pt` z*q$vDgMB#sY@9xi&g=uY #IN`_9+XjR?Unyd4ARZw_vTyLesrFWs})=l7d=TAFqqq` zqY`JPF7j)b?w#9)Avp5|b*4{r4^$!5mRp^A+)Y##Tb;i+hN{_>xjWsbgDO@XvK9k| zO#T36{9UYlRFDn66uQY7MjWX8WjPkA{0D#G_?*YN9gH#l1JpZs7ESjt`ZXEBtjCp= zhPyr5KbJR>SJbzLVSHHEc$yZfsJo&%3!%)#vWl8XovnYfPFrEPn;zy}!~PA~*<2U( z7bCbV%?WM7+yI07AT%;VU7|NMm-^wXf1@VB489JpV}Kt`MO-U*nkUd0|1xSIYj`6= z1x11?$G13%>IoO3HUl|E{+Ie`>ZY@2MF+~?eR&9t5tn(3C)}W;mRqM$UCQ7FYPQ)d zf)?^jH5_zv5~OpE+C__GqF zk82>38Y>p(7O+26tV*gjc>j77;~vDr=pw=a{ZhW6vT6!CeD85@nx8u{IG#pQirzzu z)M%@}d`6XmF@$C$Dj)1lZK$@{EsCdvTJZ;T(Q-O}wN;Vi%{nPYGjW{1iP4bH8Bb`OGmWw+Tx_MT@Ne2FDb{hwzCkTjUMy;@ zC|s>p19=Q8UWJ;ZTdIJAZQxm%+RfSi9eHiI9<5Zl68LBL<#@eXlnm5Vz5GkKG8r;e z%;N89DIFIxXsdq2J^c%eYG#J1I;zcJkxi*?+n?1{V=S$99$P|Ov!8b~Jz(KSIieiT!H_kTcN#KcWOvNyGEZOC3=1O)WbqmTc zouA0#nqhE%v6q{-qk?--Li#CxCI0bO;6hY_*Xq(h*-a@IX(M-MS5Ag*uP{l1+tPae zoZ|96(_h9fi}&UL`KJgobi}>XeFpK$!A7$Etz8XImujW*^WX~8cW{0l6N5r%s_~uNFB*#@yp0?2AKty5Si@Tzg`WjH^ zNip3jtG24C+ERpqs1Xt3u>O#17}NMLo_mVgVYPQ|bF`i;r*lylCtskSl?7AM&8}@} zb;<6=+pQX$qKcV=BT#G7+}jTz`ZtYIZTJYCwvW*=6$^Xo($$U8c4}^^&~*Iw(?%1k z8frzFAvE_yETCz$hB8z!J4X6MV=I>4!|R>*RijndrC1?8B?$A>6s)@g{|^F#C1?

_9NG2;P03v-zC@_1oAECd zqT$w1E0ae$=~S2FoGS2U)19LJC)VferfBOfdgV@uG&RCk!K~+E$uAJ$=-?Ssgxun( z-4=vz4JQK<>l&{@qliGI)&pL4s&7^%bc&wm^;2ubKa?b@z;}H+ySQ_X?&y-b9nX|WAm-s&S0yIfi6U^TKY$w+<+tYDe46K`&-g)ozNS;F0Rsl{xk3+KZId9 zN>veeNr`*%cnY;A)Q(Y~;5m)Dw5(ackREWz)~R%}n#&zDTe(CpOx8Lk&?A|3nL7G? zT-^SIcJL6WUxarTHPM9-JMT3a2ETKP5wIilJ{)+IWl(Qw3~ITPR&YgXk-acbQ9bZ1 zGa#7DgIiTis{}7l`x&vK>;Sbjw?tbX2qvzgr&*sxwWn%wVZ^pPfv|~TD=a)& ztErdXAhz1EauUy%wlGEh>gJj>Abyk^e(l=aIywPKqLBHWNlG0va&ck}pp1`&O&)cQ zI*Vp%xe?*awSO^7MpEItSscR&^fiAanJTGu$OuK5S)ejvBE<(9ySt=pfnVJs^qbyn zW|lzG!=f|333LHYn}<`V1%1qA)G6@8qKMRm)AXzr-bt|bX(~DApseUl*2g#!;rxyC zAul!&zUWUIXn?M&F4!Yrmp=va#c&PZZ#~rTHKV=DsZMr62uEZFLQ~*yt#q zP<8KjSs(Kb@gI$)i>M&IOJC4Ee5NmLoqs|sOU*JIbEZqjnt>R%RyMcQ|4~;p*1ja) z3e|=$@q@%%I`J=c4?WHOSq&v0FUV<6vlts=Qc9~&#UnF4f_M40Q73f(^!FFh8JqIv zXv|VWRr$AhURlemY0yNwjDF5=QMl)W?9=<_!R99mk z2kcZ%qzP6Jx+)exSZB*u@aXpHdR&7q2jYxDT$j8Uy7G$0#~uZzuZ2q=Ee8IS90(8t zwAYh?(4P#sjX~Bf`V6PNK?S)9G|}R|NYf2b-9V12`1WuoI{7%83&Zs8Rjb5!y^30g zj)9DEhTlqd0gbLRkWyt+iHwP zR_rn*)XzI#kjp&gqT6&S?=2cg@lNMpb7~~KsvI1vE6!Z9y*GG)xh2BCk|wa?I-Z9p zbrnB`MyMj{6ERY?5IN?~2zwtW@=S3@V$2n!DhMNnh&heX#r1YR$`#a1qaPWbdL{f9X%3dK`8RG9^r73E+@ zkCN^2xty2cAJ(ErY_5N}EY7`hPpK&~lIQU$ z?!v4(b5}i?VyGg|rmZxIE^`xJq3$!nV0@VV5W$jUaTG;^j88=}dpxfextiG)ep+F484VGCYVL%~_cUfixilZ`3eP z*U0#*E-L81$zgmxh_GnrjXa1X`W*uf&q{R*9~}&PKg?_sCk%N;EEETz154cd^s3+s zN?0^h%;&fCIZ9N|z2kU<=qBrO7jFl-O|+%uqY{j)*uLs7(o&DQkG#}cRwBi+y8}FA{Ri$!o7X)tyjcsnE`kzEgh<&-*ZtL1OO#Wqyzz)e#se@$+B8vd4<|FiV`{ki%7Ig_G_<1hG+Y3ZV141SiD zj^@ck{5TVT#LnGJD%u=>%HOe@zjzlrXYs#utQJk9R71vOVkK|>KHDl$_=ut!G5KGX zM%aZL+ayd;ajIP;e`9aDg0=Th3f9}=>qv>IIAqLejLLsHI)r}W>bRj%VfpZtc6}>NxM&T*3=nBWr?j`Q^Fhqai8+Kv+F5K{GVd>OV+ze9)mJXn9 zkirZq6_yT^>y38nS)v%8cPt3_1Zu-WsW1MPjg7kLvK{vqZ?GxX{e^d3SQ^6K&FE2> z;tiq;DvH-|FFYXLwh6_`qErLl@ruWY;v$KkQj@SucSRKwj@NIZ3Kpx4Um~7|WkLq` z;)?!+v4CEp}SRQWvSQ#$*)qB*|{*fIUKEU@N{L0wvQrotEGne^j01Tz1eD zoMGJ&-(kASC*o^s9L#l-ur$?%N@RVAmZAuLTMnTf)P+9@%S6I_5Z?()(+`A&7oSBn z|9^Cbx{D78%q zdI)GVCf`E%GpG-oAVl^!TAKPAjm+BZ%#o35X?VKg@pL{c(_X;4$b}sn&uJbnMhPeG zA}k$C>e7QDyjT9RWMrLatHPNNx%Mfe7nW`kD4)^)Z= z(OS+HF%-{hwIfc{?m;7k+nrUA_S);Htmv*z#9huy4@=MCl&?`mzQfnu-T;5T zD3@gyx3$}HML1mTWhd_`II`{hS&FC;gn5$V*zbgAs$kD;Zn~g`0k_bj;d9Gx>99Rf zJ%k(=RfX&-{@+O(CAks+y#}@Rx2Arwr!_|j_z&f+hf33CNcI$-j*#jzTzd(`Df2rv zUV=VWrQNC;CE8ahk>kZ^@4Lztt7xGr4uTbuYpI!27X%M7Z+D_1!8zU>V7JeTkp;LF z&xDW}w%`h?k2Z`&YL1O-Q`qo;5R(lpKaX_U$KW(^7Isq78hq0k%29(-=5aHshvA*G zfs1My4-t?B{?|-#$xIqWk1%C_Dh4NreRPu)B;+W+1rp*i+q=X8ZzOT>G!O92<{v}R zxUE9GoVRl?D#o>BX#n+e)l*D{Q7cJL^=iUUC{;%o0tG5?NWPWR#3|V1o2;@%+eH9o znH*0aJIko1s23b0zsuPKk75ILu;Uo}C&_@KY=y2Wd$O*_RrtL4ilfyqBS2ZXFU39o zZh?v|M%;jA&_&8$z`Kcz@bom@Q+*B#KR9^Hd|>Fdd^{}EGtq@xUPir~%hXPMmXfZf zss(nO@w3QK^{I&dgr&bF4GN}!t^XvRLR9aoF!dN*Y?JN?ws_c}d1`K$qJrX9Fq{ml zi@7h#-BxwCs$*yJN>QA9V7&4UL^cr9pe$&wuugcC;hwFuQI~&GaWo%H>`rViT4-bv zRu?1W)JML;z+LO9=m}c2(X3;Lan#K*v(AvFX!|EI%W0t_M6*Dms>;1AVGXArgB$2; z1c)Yc2XzYcW);khkJmaI=n!H*3N!aRnkP&g;RC-L>bQd+P%*181^v@#nSMZzd>v?r zce|=Z)nN{|I3GRk1V5yN`h)sGTt1~%yShkau64=o;-u)|YsI6~W;iUSHyvx^7V0b1 z#w?c~xUVdq8sq3)9!p)VJ*XGiCdR2vh#p>mNN&%Y<^)KKZn%skTu2TKv=m+%()J1b z@$%ZSMj9)z9{MvFfDV`q5Ty#`9MUmlt9PjsJ;!WX(EC}IhK=uR*RWQK`M&Ge`rd-S zku%H%F`SvNgz^sc=KFM5{}MPWb}P0k>j%N!Y9OJZ!LG^`bOYG3cz>pyKbkRQj}m)D zc4CqnPE~iq5HIY3>U|z7!DOs%>TSMlT|mjyRaIISvx|bVV=EnxIo?*^-v zsBG`&Exg_SQhldJaY5Bq8CI-s3|Hq@U<4i&%P;gQn1ivoyXh~%G?ol!1fi5dbT^N? zjrT-t{}7sjfvrQ_Tp({p=rAq8+LTxNx&M}WqCT+qy8oj~^t$aDRZ;zDpQTECM17>| z!{ch8w>i_;raszD-NXzSfCNgk_U}3*QpES_ANn3dK8hak&qfqAwF~Ly@KxIJ5w1or z^kCFeol41Y@8U!eyJcpeT#>x)}C4>O*FpLv6i%LDjZy-Mp|E~ zALJmk%w&iSHh3AAPzAY=n(Sop5=T-e)fuSQmOd8))FTk#$LHJdKwh9@tSYo&R}3lt zHq{29q;Oi~4Ntu!Mq1xk-#ZJq2H4&>)-azDV3SDLPLwvrJJs4vQV~A-QcaeQX6<$v^)s#<#C-lU4Xl`}| zXIN|0TmoQ`uqy{ndv9~++}V7aF38_#xr+BSqr3bU;AtC)uyox}VEQJ)B;U|4_8dD+ zkIDKXaG9cY5;vil>Mx-<82p#A`42aXTys{0PF5#$2Hb=p{HL{%dSTl~Z#`8s5ra~q zjY4L*NIF17m*jS%APuEocRf?nl;*xr&L|j&CSL7NQLXq1HKv^k`|tR7UY$1wnD)Ni zpWHH=H`>qe@8a|V1lY?sZ@e?v%2`IQjFFUMe-t=_YVZVWqE$y1rgrvs+9T$>M?eKW zwGZm0^fJgsHDKj7YL>sgZ@rFkt|A}O&zeAUaR7U$sxC+mxGZGZZ>aw;Ey_8w4`xHqo90G_ZXtnmeEA}5G+ zYd19{Y#78FW(EqY=du*l_osWljS!y)Z9eYr9NQbFdaIb?DG-6_hiJ?kN~giSZm3AU zB?eJCRSO<*LKv&5|D`%qj)wd0sIh$A938DUnGX%bIX>WtW|e4b5+&(hXf3T$U2;AQ z{;QvY&|-c&EA0=h1(0U>1~=`OCIgzU)i^83f1QHJi|NYxUs+(2LDKU8Z@Ra@z} zs!q@73H=SO%>$?@t+AWHu3w9=47C9g{v2QM z7&FtOU9dXSX$M55J%v+#N!s_6rN5x1x+MKWgUKtdQy0|`GCP|h{3UrP&f%{5pOdbS z83&E;sf@jg-i;p=Z5IWVnkX9ZcQnU;K#mFi1=s!GV09wd2mzZOHDP4%SwB@($ z5cNS-=iyL)v{}$7&l7`YDM{Pm2pQHYjx>^GX;93$*5nk^%yv@(JoOdEU{#5V&>C)K zr;*`rt6S06PHt=*p94`p54pL;l}T9>yocP^LTQvgy?r5gTBI1qX@)5(Qm$%4)hU^` z8!-~=_du=SE?HFA@=<>oOFMkOYJ#2k zL-etRX6H+IH=3=!qbV}W{MlvK!@RTvb%%~BpQw@7o6m3+)j~JpyRwD*U~C>|s44V0 zwF-8_o%KS5yMp_G=pZn}N~EX}u!Dyv4xIBjRfgl_p;cVL!X!=V#ZPiSMkb@NZf;ca z9F0`RMQfB0);7@Sh`4DQpeuX4!lyoFXqr497NEZ86oOOn}(|z z#u0m}x~34&5~E)OqINtv1{f;z;u?^j{^bv)(XKOzTs_}yjS%II^naI3s(p&J30fb{rv6D-2yswYbQOEH@)d25ym{sr&wMCGmn6A&M;Fm7i!yQEy zC-Q!w;ZE1g{aIbqKk!)fwck;Ou{*fLDIlkc{3U^%QYyO}QGdRRPrK0ce-hg8P~M_@ zx;v%PZ~6&MvbT!id|2ecSmy-8Xg*D)ICYwSb^23+e^~Ac(ZSgart+?N*o9z2N17*F zJJGa`?s`VMVHT>vq;~|IBK9pzcphcxVj#`CG}wW*>MS~~+d;{jaBFf=JS`74p?3U% zH#4`9GeBq%jrP1w?Z7)E>K0;+{T%+&HSxa4^1t$Mq*af$+E@=wCaUiN-qt&u1nL z0TIcc&|`firv{5EFa1s#@VP2riq9379Z*3aD-sYaSkVS=t3|h*)d0*`8O3ePVt$#4drK0wc|OzP$8X0KhOIFx;c}&_(K`Gei&m z1~sMIXp)s|E;bBP_7juE1->D|olUsH)Bc)VNTCehY9=DghzNRtEL@BgtA(z`n^Y(D znl95B&xcV6@+8q5J(?!k4}t?!jNo8(5iiH62I7`FND@v+EC1(!%i7`+e{cMbQJr2Y z$dAQ?urw-&fhjTEmHYbtRAW3hBcM8O(_Q1T9x5(!mR*s)u^t3d_2!)0^aD@R=k!|2 z^ZhP|dHcX7dg((|B5(laB$b*5*YOk>)X5w$##1{gDZl0wdZ_VC4^xAA4$VxND8r~G zf*)luFTZJ&GsUbO=}9ouM|>N_5x-E7zt275XdMN2wlYmrK00F+vQEede}5w)cO3*% z@Cn&Y-{euYOGUyGb`vhU7k@}4Ma8p2!H?hKoX z&*vJ!AKhJyC~qRaGFBkQ+r?duuILf=yZlC+6K|+BK(MIsHvEr~l%z{~tJ-B@tGbAK zzVnFIB5NI}WtMhPa-bW%pf15dyjwjqi$oA+OQJC9idEMrsD85#(W=~ex~h06)0{2B zH>$4L!f=AP^paqQSwQ~^DojUu=hJZ>4a_MUe4p9E*zC&h*^2;N=AAgCmMU_neC&rf}o{K?KR9f*a>CT zv4_z?6_wKlh)|CEQXTc4NaunW{-U0_ZPC;D-HeO$_OT)K-5$M6%+X@n)dtOqek9+-rclT%nJTzo_Cpl zM^HKYYpbe-8VlY?eWAxCWD(LNyqdEYm0M8_;pF~scKAa?qpmyMFd9~dvie&X&=IIEu^v&AYKmL3O*D| zv#)_VOyMrnj|HOByOI2E$lb`pF8<8CWr$U1g%-7geesi*;OrB)jGfD0@D-gZ{%8Ja zs_E2F?iGVnYxUIlTpU4+DMf7L)d&IDsjo#}aezi@6j0L1;1RkdZ;GzQCWzQVu1lFt zh1irVjHh+pWSxi2FQyvvE%SoQ%!mNHn`(W|YmN1)FxTXr{7SUeOZ9Z;V|4~5Yq`2f zJ)DgQ-S&lhH3D&i=N!#`(UrNq?5HE^nDw#QW7quirVt zQDB;akh}O9BcMl#ViUwM z7hfs(is*{TUKN9AEizX|=7R`i_^2vl?~i@ppG)JcC7djpv$RWsOLU-cejCmI&JukD zI)?=|W(&7$#bK(ceNbsA4a)ldp|+UpivmtUC;qwF%~`6&8|+aN=6 zTmUzl_i+eIqv9AqCs5Q`w(b@;h%CS>`*fKooKN?SQ)h9u;`8&rwG}TYKfM#LbXvf zIh|a5j8XuSt5F7+Kv!7hNr4_3@y7mGgt$9kwuW_*Dxw}Pe|TzJ!{Q`e8+1|9^U&L& zN=7Wq$sKdEN1*D1%0q~T*g2F2Djia7>|~XO7idpQxfcBRf2?H4^ijenG~Hhw=0oZm ziq~KJcT*|M)<_odx!uiAU21_nA0f$F+%!;L|Ba(wV18y=FO*wNRy`qYJ5nA15W3Py z%B4P_4R@V)LJyp~?qBU0{x$wjf(|!HOmy)hwKCLIKhRC|eEd>6&v5{WmAZ|rEMh}% zQIot*>&-DRpA=`0c!+YvBJ4C&gH)C8*LP6$5LRqYnyBXJS*M%&?(V%@~V!7Dj4;pr~ML4V5=^mX4R_c?z(RLY>( zw2H_!q|ykLd`WN~VZp*gl&=jXh-A8{SBetmMw7~l@ww{|?#V=`%6AqFZ7i?`+R|i8?d3hi! zcP|YWrR-O*?3F0o{u?~S9!MWjxPEEJIJ@&&I(2v=#pPmSBTZ3zxd7bZaGfO&Q8%?v zOq6A5T<&H-ODAM(UV&Q*si?Zdk~lEGJ0gVxfCdj0b6ivxKqHpD{x3R;F)%c>Q{QVC~--^HXoDQzLZ_)N!EHpc(ZJK+r83oj zc&h)XxV#6RR7h2(M%;)S(=Ii}nFkrj@OIQ|`6LwE|(Q@qZs3`L=v=R7$x&yr&nI4@~N$=BCVpooJsB& zb^?XmOQCswlOj1C#=zR6T30e=+U1|Aki*#Kgw*tXp}d^;An}%}uZ< zu%t_7=N*8LkzoG`8$c<)(_#Gy^|q!$-_F~cv|BXG`I_^0SKje16UFE%f?4~qZR7a4*u*cf(qh92;)x+OGWOH)tI{P7xE*Pv7tl@vC3Q)VOsi8s?4Z~$w zmV61E=?6MEjuwh{L05MQHzhmS!8ZI2O#gDYo<~I^nC{Yy)rC|U-fUZHC2Y7D9r+!Q znm%Z z8@#PnA;#;5oWCFxe#$UTqEGEzT#TAfeu8Ta*Al6`N-t2Q$)H-E7Lmv-p=Qj;qt(^~ zfL~jCGZz8|xpM3CC7x&tJxWidZ-NVTqV0UdC#BH;Bq}m|jkkK3p+1S7ti-PhBp7D1vf!lKwtyA?to#yg!TzgHC?u)=+ z!+VAC*lXxL?@A1Gr?;WIMREN}^8U7cR2+}mRAtFUdXrqnQ}g?AlDWJSM)`ZYQm8yL zzBkK^FeuJ#5)ve4+p&nup4Nlo<9d`ef+sMk+_r+##K5cu|GLBZ3i5sB>_7Ri+D~P~j{+n1 zslE7})~g5Pu|7jCWTz33z<9?IL0%Bn{Ya-}xvpZ&HN z#U)ab`B!|rv8cmV%HMeMdekwimj4jL3lV!r=@Gs%`F^P$FXkKfDGvhoArh_Y_N=N_DC_qisCG@LL{G7_kzEwFKD1O(^ERymqXR#Uq-6tbQhyz zf>G>zXulIMsWd-P%izmDuW^W zTMQn?DfF|N2GM<&PUkH~K6D=)m81PjXrY`Gh@cwwNN!DIQHAtmUyj;^lwnn)BSlcj zQH{iOJrVY*hqx_L;SpUm>qc`2@(J5qo)1Gm$}#ynX-wLr!|AzhVz=iRBE`J{(daBn zHBS_gC%I4PI$X*meB(C#wTelN!*J5*Vu_`(uNc+RS7E)~)Kyf7pC3iVd8fFiF2flA z5VVl@JgmFGYds0`{h_;$s%0id(MBq+j`Jmd{?5x;ltB}6i?mHsEvUH|t!>19U*HTq zT$19whXk4fg`kuFIJkj#Q6D==O?R5o66y)p+MsKJUje9YJn;<(djskP$;;qE=M~_&G^KgB z@GPAT|FxX<(jK#bNxi7F&ej$vYY3Uv<#Y)pr~|-`I;aj{^4&l|6YPHQb*hJoAT-e= zYX<)aOWRzHr`B9w^ubpO&-W4bk9xf-Y22pkc2Ul+x{Sd?TPoSgXj_cB8260@pjG*y z_j)o_y=@!>)O6PovbMc}$*7T94K}N=_h+ESdq@|*C)@a&*+*cg>x-VTUVE(WMnCE? zYB2Xif(`i+TUl5p40mE1CE@}vppCf+h3JkfO^Hs6T&$&|9!iUrsu~wng15sY5R|3| zJjsS;jR^FE!B6s@(=*8u7yuXSN!8>FK+XYqonNX`o_}08y#?wUjFzb#+go7Zqcuf- zBrY2-;3EGIIn5e|$?+If`5qOrY~5cR=%Zz&Dd$Uq*T3c-HC zeQCl6tx}Y(hKUVe5F_a);gJ(`Jhh?192CnjU8foq;Pbs;Zh<5J8>bTMDUKI9x9C&j^K$5>^BK z8Q1U)5(#1k-J|-#BN}kJZ>+vXE5$MErRwZep5|s;XDZ=-LoVYVij|3oZfUC!SHrN* z#gU{Cn>!-5sBQWEUvKzNpy|U1zn9W^sCA6eXg!XnmikEUS3ZiP65I^9WaNk#<9lV9x&@NsAu23f!YkDI^F|>DAGnC{4!K%DCv3N-_ z+*)H}Gz(P(7nx*Vlx=ART89!;j1jGu=qlcK!Au9KAP~H7;3bu#%esfZr17P(+1?9g z7f!c)znR@(@x?MdM*0xRdCKRwDIL)-jXqRLG`CCRk#ESw;OyN&l~#+o_7G6x8t{Hb z*;ka8UgxbhTSURP+pVJX`VzEC~jGj!xmcDl+i&qX+e05`qO_0jIqs=pp2 zS_a&--#ed+=TBeqslV7~o{00ep|ZZoe1{HLw5_P~nikUqmE>Q9EBZ*A`b#mAzxPbY zuK>W>SF>RnDFXEY^Y2LH!ct`mT9M(U)wGH4^B7eVVWfVd zoysr8IR`*a;F?Ym`!2QcV*`(Wpr>dweZgDlG_a$F-uL(rfF=Tz&Q8%Km2l46C1|KR zqJJ^YnU~9geq;gyZ@bs}Q$#0Pqpqsv^0nE|C128bFzxTbTFOytHHmvMb9EqgHr+F? zyFlCk8Y!QPX}Sun^KDWE^>V9%N)o^W5;WYr7ztqT=``7&#?eudY&n1cEvIR^t?WS<5g8oK<4}>-Q5Q6Y0plal{B#%zc0*V^R=uSs zsXW9xuWGjpP>>gha_WlzoPZ`enE5C1ov-P zjYQ{caG*C|n)6{P2P=18^-4XGE>beRKkY{sJc70F`e*AxDGh^T)PChkc_7&b)WIFg z%iL2**?nA@%do~L#j6okGp@zm2TyTpo8@#_j?oQDlwAs`3+3sdi1Z{j5bG#nj*f6!s=L>3}S+c*hjOOu1A40f1|}jGk&%$P6jXLd}Bn5DT~^!&GjXL0j~EIwnMIBGtW4nb{$inBLB7 z@HlI19Sc04OqIC+&5()25};=g`uen)Vi22|td_Ey8sw{?T6#16!+wMW{0TGYPPDe2 z8pqxBM<^mn;SvC*|LKL=30%piy6|gQQE8drPMXeblD|`Ne2Jf>{p>yCqp7FiZ(`(x zRI%&Dq63m<=oAhj&2u)2D*W(5DunbCrJ>4VHbkkU{lccV`3z*BC_R#8oNjKTvm_ty zu~RMc2iDI`R~4*$JXK+YLke0|tjYFXzf*zHK?fh(E!InNce#Fw=bD*|O!F3d)Db!d zKK&5C6Q}7eld7}9jeONIxoiMVR zN5H=^Vc0nhhqu0^?(@*(BzV6SW<%gfeCS)%+}#aEMmu%sq$f7r_Zm_`^ME$Eo%|*F zGL}vKLb&svfG(g>lf0K z?g8{Cs*_f3exV9Ef2tjnlUiE6px2i}Xyoz_gL?L}o@Z@0I0KS+*qbf~crooy8BA4V zzgxzxVNrP#qxN~9@G~4$4L)xB17L@Bs}`pd<~yDe>A)xWO;D>s(Eh`a-Xm&*l|N}m z1wj3N^qA(FR~UKFp32c-UT%s|!|=yu0kUv?WQu!{PRUGeWIo5-uGfK;%7s3le#kcb zf~eFBwJn;BSHt>)C|Uu^o{4UGRXH#&o{Or2`i>O~<4`)|2dZiA0836oQ_&mr50R;B{!!X~98aa(Hq=+`b?XO}zT~1NGV~qKH`g=T=@qmS z4EwG(PL1KKu=LpVCuj;q`QNV+s}Ip6u1oa@BSb@KTo%f(z$@x3qxa-@uFGEJm0oTN z!n~ewEv96oct0!#_b=7AuyZsKeHkEYMG$=htbYrs*ZWi;7Xco`))~R@vqB0`m zR5wiDka_YTvz99B^^f#W^W2i5?`MA0;+0t|TCmGLZIvM6H#52Wbn|$I?5>Mqc zOL10cUdf>FAj`43A#Q{bUa)#-yQRTYm-3rKCK%eM%h3yd&gFSuBzriOunXP&i$)S= zG4U7F%^uo-kBSbO=6t0|(N?WJDDoODA+*a~hUQqESXOQ%!;?{X`aZxK6 zWESVrN1dH}z--(8hjCPYsi%_Bh>#Dyx`wk@8p%c)f!WqrKJFFglWGkW=A5#bEq^9C zvJfaBEHljODnQ4)O4LUec5bT!a-AQ^aav46{M&$K7CD7MTQ9iNd6I;wjrq#Gz}rigH$~c?T7y(mXS= z!(5KKp`xY?m*#kTzAt(^_?1=9Ym#cCdYSe@qT$Ny1=h@*;WUY(qw>2~FpxJkyaMw5 z2XCXZCTnijDM|txGejNS z6{@Txo44E?uuxZ=VN}4bmXiy(#q})yKz_H={ca-&p2h#etM(BAkyx0rhe?KpHuA7Y z)4_jJPqmq*09cH6=cpfHP8Wx!Q|>?;pMAlis7c}pR3e%Xod)sHlz_zV3((^zt~@|h zxdJVa<(Yll4CfobKBBc5yS%A1LJFyY(YIwX++S-u97l8LSNa|WBAbZH@V6y=v%o15H}4<+?=j4nhL^i*u7C{;-01wfFEns3H8z}_2RJeEbK#_ z`8^0KLT~(@7f|0e7;tLR>#mL!ox1abT4fRC; z(NcJh*=^o+tJ96p9*i*^2lZ^IP$NhGn2J0~iYq0EEXX=O$HRdWuH(7>p{0Bb9$_d| zg^BBm>{SsOp_hBFaeXcXq&AC=={j_YoAYP90igVIa`|tngm1D{1MetEd?}f37qPal zh^%-203ECgG6fXf-}W&Ah=hi7KfcZ4S`nz-=GYFyJ)jmeQSOoy%IBbXXmw z=JGDYv2Yw&@_SMbB;w3f+7sDn);Z7kG}U&N0%}fF^JSUt>-nXzx}(KE6ppoWxMQRe z_TIn8`Diudp&)A62U2Xs?);iA$v>n;(b4>Y+KaqhaSQQ9Ow`ijxth11ggauFuey?w zQ+1Vn$mT@wiuue|ZpYau9!GhAdO1gce=BoW?>Y3%-h#4NN_1c98C)8NlG7W_@27>G z?&_hM7k+|3cEz;!VRTNYu~5s22IJoOwi^p8Sk118Iq8%?`a&g!ib+wofLx@!Qk^RT zSmmL1k_fDJ6SDup`WofkHS|LIR95poFBAHdVDc)~_4U0|rOg0J6`vX`KhYNI?UfI- z_i~CcEJ#6I^;lfj7J4)7w(i6Qd9nMS zT1-|-18$Ho#49(nh}*=(VR-+_^g~kDJn{a`z!VE-Ih{l>zB-tNOHXN9DAxJ{XFi`x zTXpo~j9c`;eh_QAAjK7=&&=?YipHTgBF9t%RU#Dcyo%0MABn&^_t%|iy{rW897BJE zKjrq=YjYXsr>C<{S_8*Vw1P4Xv9@3Bw_?G*YiD4v93p@b`+!fb(mKw8wQU!DYCQ!^ zrv0x)@$L*@hR;G>=l(+J=Cpp_ze!D}Mou_1Mux-wp<pFHvpQ<5$nZ|O7N;^5FpPm<4t4d2zHJ&@sDz`a!R6XsVO;O5Wa^Pb>4 zFT5o3ISte4e1!XPvFInTTg=_KqLh+Hrmm?V1Ki=X502-aRgAmvLT?GZ1Hl^ad_`9{ zo1H5T(0qC1PRdE8VU;$}o=|r6sT*>O0(vylnW(nCtqW5mZM}TpZIG;yf78_PjEbgG z_>^~te$6<}ac)cW*w>TyK^?QICX=AkQL>>f-c%y6&>rVQrg;)y;vT#PdwiSlea^-==mY+ewPHcywy!WM_Cv7$aLdS=7$j z;9fOEOoBwm^9HgXLW8@>kLN$Z^Uq4D2Go? z@C%1vzVg#THxE67K`TRh%??cSqLR@Yp}}gIl^xTG=Eg05uewdfTlDW-D$;6uGleZmDzM|X?&y~!c#qG$92 zh~hr@|ASr|n4Ia`%i(?w(I8i;H))8$T4oyL9pWmGho-pEF93;Oi`=jV@mbwdZwo55 zCz3*oLAZ`P@8akCOJTKG*78@h#Q--W{u)cOs3z~Bk7yb-S1(Zc^qJ}JU6fmD2C7;= zlH1(Hx~!0U2tT(oEHj?E^9LBLVZ#OXqp!g+Z}AaSSM8)+=3|sDcjUq}S8tW=sZBy( zQ6eufe%cu7$+PGQpV4R-;b3@&nJPUxwBb)_WN2u{IhB)dhdNR|L>5j`GfCnO6ykw& zo$8oN@DY`eZCss|&$~$#5qPWXtmdr-E0U?PIf7#P{Sdf&(lS(6POCC>K>TV@($btX z-<$zAh*9mmAFce>%WxT5;N;ii^i;g;f6goPBzH>CAK|=?q7UUB7DpubWeR_S1!4ru%zBXnHmIrMFCVnqi!9T=GbFY}e_8DKOqJNw203-dJ z_G2JzZp(SO{j56QCbvunMGnw5%)DHwEAqdL2F;mkuy;JwEsa+@rYg$S@N|U!4v`~y zIdA$&NM%;x$ztjqw$)+!7!G?yD%KitA6?Go1ga%v+!nf>3dk$dEo%;V10dnmpb}0r z-WpG}LnG{p`FNg*LVt@w@W;_6&P#KQ-~G(Ipgd{IFtQ;xePK$lhqA;!X|yS>T138} zK;XO|wgnA#G=~p_Gr1mP9S8eJQ+q&$61oL<OPJMvS?G1