From 1232ea21288c24a8d08eada84479cc7b6253fbf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:07:30 +0200 Subject: [PATCH 01/29] Add base class for WSIs & `openslide` implementation (#365) --- src/eva/vision/data/wsi/__init__.py | 5 +++ src/eva/vision/data/wsi/backend.py | 19 ++++++++++ src/eva/vision/data/wsi/base.py | 53 ++++++++++++++++++++++++++++ src/eva/vision/data/wsi/openslide.py | 46 ++++++++++++++++++++++++ 4 files changed, 123 insertions(+) create mode 100644 src/eva/vision/data/wsi/__init__.py create mode 100644 src/eva/vision/data/wsi/backend.py create mode 100644 src/eva/vision/data/wsi/base.py create mode 100644 src/eva/vision/data/wsi/openslide.py diff --git a/src/eva/vision/data/wsi/__init__.py b/src/eva/vision/data/wsi/__init__.py new file mode 100644 index 00000000..e9ccd558 --- /dev/null +++ b/src/eva/vision/data/wsi/__init__.py @@ -0,0 +1,5 @@ +from eva.vision.data.wsi.backend import WsiBackend, get_wsi_class +from eva.vision.data.wsi.base import Wsi +from eva.vision.data.wsi.openslide import WsiOpenslide + +__all__ = ["Wsi", "WsiOpenslide", "WsiBackend", "get_wsi_class"] diff --git a/src/eva/vision/data/wsi/backend.py b/src/eva/vision/data/wsi/backend.py new file mode 100644 index 00000000..cecceb47 --- /dev/null +++ b/src/eva/vision/data/wsi/backend.py @@ -0,0 +1,19 @@ +import enum + +from eva.vision.data.wsi.base import Wsi +from eva.vision.data.wsi.openslide import WsiOpenslide + + +class WsiBackend(enum.Enum): + OPENSLIDE = 0 + AUTO = 1 + + +def get_wsi_class(backend: WsiBackend) -> Wsi: + match backend: + case WsiBackend.OPENSLIDE: + return WsiOpenslide + case WsiBackend.AUTO: + raise NotImplementedError + case _: + raise ValueError(f"Unknown WSI backend: {backend}") diff --git a/src/eva/vision/data/wsi/base.py b/src/eva/vision/data/wsi/base.py new file mode 100644 index 00000000..2110bf38 --- /dev/null +++ b/src/eva/vision/data/wsi/base.py @@ -0,0 +1,53 @@ +import abc +from typing import Any, List, Tuple + +import numpy as np + + +class Wsi(abc.ABC): + """Base class for loading data from WSI (whole slide image) files.""" + + def __init__(self, file_path: str): + """Initializes a new class instance. + + Args: + file_path: The path to the whole slide image file. + """ + self._file_path = file_path + self._wsi = None + + @property + @abc.abstractmethod + def level_dimensions(self) -> List[tuple[int, int]]: + """A list of (width, height) tuples for each level, from highest to lowest resolution.""" + + @property + @abc.abstractmethod + def level_downsamples(self) -> List[float]: + """A list of downsampling factors for each level, relative to the highest resolution.""" + + @property + @abc.abstractmethod + def mpp(self) -> float: + """Microns per pixel at the highest resolution.""" + + @abc.abstractmethod + def read_region( + self, location: Tuple[int, int], size: Tuple[int, int], level: int + ) -> np.ndarray: + """Reads and returns image data for a specified region and zoom level. + + Args: + location: Top-left corner (x, y) to start reading. + size: Region size as (width, height), relative to . + level: Zoom level, with 0 being the highest resolution. + """ + + @abc.abstractmethod + def open_slide(self) -> Any: + """Opens the WSI file. + + Note: This shouldn't be called in the constructor as wsi backends usually contain + C types or pointers, which the standard Python pickler cannot serialize, leading to + issues with torch.DataLoader in multiprocessing settings. + """ diff --git a/src/eva/vision/data/wsi/openslide.py b/src/eva/vision/data/wsi/openslide.py new file mode 100644 index 00000000..0f37d617 --- /dev/null +++ b/src/eva/vision/data/wsi/openslide.py @@ -0,0 +1,46 @@ +from typing import List, Tuple + +import numpy as np +import openslide +from typing_extensions import override + +from eva.vision.data.wsi import base + + +class WsiOpenslide(base.Wsi): + """Class for loading data from WSI files using the OpenSlide library.""" + + _wsi: openslide.OpenSlide + + @override + @property + def level_dimensions(self) -> List[Tuple[int, int]]: + return self._wsi.level_dimensions + + @override + @property + def level_downsamples(self) -> List[float]: + return self._wsi.level_downsamples + + @override + @property + def mpp(self) -> float: + try: + x_mpp = float(self._wsi.properties["openslide.mpp-x"]) + y_mpp = float(self._wsi.properties["openslide.mpp-y"]) + return (x_mpp + y_mpp) / 2.0 + except KeyError: + # TODO: add overwrite_mpp class attribute to allow setting a default value + raise ValueError("Microns per pixel (mpp) value is not available for this slide.") + + @override + def read_region( + self, location: Tuple[int, int], size: Tuple[int, int], level: int + ) -> np.ndarray: + data = self._wsi.read_region(location, level, size) + + return np.array(data.convert("RGB")) + + @override + def open_slide(self) -> openslide.OpenSlide: + self._wsi = openslide.open_slide(self._file_path) From d71cb3c04fd88d661434b592ec61510bb481ab6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:10:53 +0200 Subject: [PATCH 02/29] Add WSI dataset classes (#368) --- src/eva/core/callbacks/writers/embeddings.py | 6 +- src/eva/core/models/modules/head.py | 16 +- src/eva/core/models/modules/inference.py | 6 +- src/eva/core/models/modules/module.py | 8 +- src/eva/core/models/modules/typings.py | 2 +- src/eva/vision/data/datasets/__init__.py | 5 + .../data/datasets/classification/__init__.py | 2 + .../data/datasets/classification/wsi.py | 25 +++ src/eva/vision/data/datasets/wsi.py | 158 ++++++++++++++++++ 9 files changed, 209 insertions(+), 19 deletions(-) create mode 100644 src/eva/vision/data/datasets/classification/wsi.py create mode 100644 src/eva/vision/data/datasets/wsi.py diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index 4b3cceec..f6fb1c70 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -13,7 +13,7 @@ from typing_extensions import override from eva.core.callbacks.writers.typings import QUEUE_ITEM -from eva.core.models.modules.typings import INPUT_BATCH +from eva.core.models.modules.typings import DATA_SAMPLE from eva.core.utils import multiprocessing as eva_multiprocessing @@ -72,12 +72,12 @@ def write_on_batch_end( pl_module: pl.LightningModule, prediction: Any, batch_indices: Sequence[int], - batch: INPUT_BATCH, + batch: DATA_SAMPLE, batch_idx: int, dataloader_idx: int, ) -> None: dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore - _, targets, metadata = INPUT_BATCH(*batch) + _, targets, metadata = DATA_SAMPLE(*batch) split = self._dataloader_idx_map.get(dataloader_idx) embeddings = self._get_embeddings(prediction) diff --git a/src/eva/core/models/modules/head.py b/src/eva/core/models/modules/head.py index 0976e8f2..543b5006 100644 --- a/src/eva/core/models/modules/head.py +++ b/src/eva/core/models/modules/head.py @@ -11,7 +11,7 @@ from eva.core.metrics import structs as metrics_lib from eva.core.models.modules import module -from eva.core.models.modules.typings import INPUT_BATCH, MODEL_TYPE +from eva.core.models.modules.typings import DATA_SAMPLE, MODEL_TYPE from eva.core.models.modules.utils import batch_postprocess, grad @@ -72,20 +72,20 @@ def on_fit_start(self) -> None: grad.deactivate_requires_grad(self.backbone) @override - def training_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def training_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def validation_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def validation_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def test_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def test_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def predict_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> torch.Tensor: - tensor = INPUT_BATCH(*batch).data + def predict_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> torch.Tensor: + tensor = DATA_SAMPLE(*batch).data return tensor if self.backbone is None else self.backbone(tensor) @override @@ -93,7 +93,7 @@ def on_fit_end(self) -> None: if self.backbone is not None: grad.activate_requires_grad(self.backbone) - def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: + def _batch_step(self, batch: DATA_SAMPLE) -> STEP_OUTPUT: """Performs a model forward step and calculates the loss. Args: @@ -102,7 +102,7 @@ def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: Returns: The batch step output. """ - data, targets, metadata = INPUT_BATCH(*batch) + data, targets, metadata = DATA_SAMPLE(*batch) predictions = self(data) loss = self.criterion(predictions, targets) return { diff --git a/src/eva/core/models/modules/inference.py b/src/eva/core/models/modules/inference.py index 2d0d9a3d..10f092d0 100644 --- a/src/eva/core/models/modules/inference.py +++ b/src/eva/core/models/modules/inference.py @@ -5,7 +5,7 @@ from typing_extensions import override from eva.core.models.modules import module -from eva.core.models.modules.typings import INPUT_BATCH, MODEL_TYPE +from eva.core.models.modules.typings import DATA_SAMPLE, MODEL_TYPE class InferenceModule(module.ModelModule): @@ -28,10 +28,10 @@ def forward(self, tensor: torch.Tensor) -> torch.Tensor: @override def predict_step( self, - batch: INPUT_BATCH, + batch: DATA_SAMPLE, batch_idx: int, dataloader_idx: int = 0, ) -> STEP_OUTPUT: - data, *_ = INPUT_BATCH(*batch) + data, *_ = DATA_SAMPLE(*batch) predictions = self(data) return predictions diff --git a/src/eva/core/models/modules/module.py b/src/eva/core/models/modules/module.py index cb5e222a..b1b6ddd9 100644 --- a/src/eva/core/models/modules/module.py +++ b/src/eva/core/models/modules/module.py @@ -9,7 +9,7 @@ from typing_extensions import override from eva.core.metrics import structs as metrics_lib -from eva.core.models.modules.typings import INPUT_BATCH +from eva.core.models.modules.typings import DATA_SAMPLE from eva.core.models.modules.utils import batch_postprocess @@ -50,7 +50,7 @@ def default_postprocess(self) -> batch_postprocess.BatchPostProcess: def on_train_batch_end( self, outputs: STEP_OUTPUT, - batch: INPUT_BATCH, + batch: DATA_SAMPLE, batch_idx: int, ) -> None: outputs = self._common_batch_end(outputs) @@ -63,7 +63,7 @@ def on_train_batch_end( def on_validation_batch_end( self, outputs: STEP_OUTPUT, - batch: INPUT_BATCH, + batch: DATA_SAMPLE, batch_idx: int, dataloader_idx: int = 0, ) -> None: @@ -82,7 +82,7 @@ def on_validation_epoch_end(self) -> None: def on_test_batch_end( self, outputs: STEP_OUTPUT, - batch: INPUT_BATCH, + batch: DATA_SAMPLE, batch_idx: int, dataloader_idx: int = 0, ) -> None: diff --git a/src/eva/core/models/modules/typings.py b/src/eva/core/models/modules/typings.py index fa476bd1..67c79fb6 100644 --- a/src/eva/core/models/modules/typings.py +++ b/src/eva/core/models/modules/typings.py @@ -10,7 +10,7 @@ """The expected model type.""" -class INPUT_BATCH(NamedTuple): +class DATA_SAMPLE(NamedTuple): """The default input batch data scheme.""" data: torch.Tensor diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index 608ec65c..557fdcba 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -4,11 +4,13 @@ BACH, CRC, MHIST, + MultiWsiClassificationDataset, PatchCamelyon, TotalSegmentatorClassification, ) from eva.vision.data.datasets.segmentation import ImageSegmentation, TotalSegmentator2D from eva.vision.data.datasets.vision import VisionDataset +from eva.vision.data.datasets.wsi import MultiWsiDataset, WsiDataset __all__ = [ "BACH", @@ -19,4 +21,7 @@ "TotalSegmentatorClassification", "TotalSegmentator2D", "VisionDataset", + "WsiDataset", + "MultiWsiDataset", + "MultiWsiClassificationDataset", ] diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index 5942b313..55198b9e 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -5,6 +5,7 @@ from eva.vision.data.datasets.classification.mhist import MHIST from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon from eva.vision.data.datasets.classification.total_segmentator import TotalSegmentatorClassification +from eva.vision.data.datasets.classification.wsi import MultiWsiClassificationDataset __all__ = [ "BACH", @@ -12,4 +13,5 @@ "MHIST", "PatchCamelyon", "TotalSegmentatorClassification", + "MultiWsiClassificationDataset", ] diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py new file mode 100644 index 00000000..97661712 --- /dev/null +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -0,0 +1,25 @@ +import bisect +import random +from typing import Any, Dict + +import numpy as np + +from eva.core.models.modules.typings import DATA_SAMPLE +from eva.vision.data.datasets.wsi import MultiWsiDataset + + +class MultiWsiClassificationDataset(MultiWsiDataset): + def __getitem__(self, index: int) -> DATA_SAMPLE: + data = super().__getitem__(index) + target = self._load_target(index) + metadata = self._load_metadata(index) + + return DATA_SAMPLE(data, target, metadata) + + def _load_target(self, index: int) -> np.ndarray: + dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) + return self._manifest.at[dataset_idx, self._column_mapping["target"]] + + def _load_metadata(self, index: int) -> Dict[str, Any]: + # TODO: Implement metadata loading + return {"slide_id": random.randint(0, 100)} diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py new file mode 100644 index 00000000..c257a2d5 --- /dev/null +++ b/src/eva/vision/data/datasets/wsi.py @@ -0,0 +1,158 @@ +import os +import random +from functools import cached_property +from typing import Callable, Dict + +import numpy as np +import pandas as pd +import torch +from torch.utils.data import dataset as torch_datasets +from typing_extensions import override + +from eva.vision.data import wsi +from eva.vision.data.datasets import vision + + +class WsiDataset(vision.VisionDataset): + def __init__( + self, + file_path: str, + n_samples: int, + width: int, + height: int, + target_mpp: float, + backend: wsi.WsiBackend = wsi.WsiBackend.OPENSLIDE, + transforms: Callable | None = None, + ): + """Args: + file_path: Path to the whole-slide image file. + n_samples: Number of patches to sample from each slide. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + backend: The backend to use for reading the whole-slide images. + transforms: A function that takes in an image and returns a transformed version. + """ + self._file_path = file_path + self._n_samples = n_samples + self._width = width + self._height = height + self._target_mpp = target_mpp + self._backend = backend + self._transforms = transforms + + def __len__(self): + return self._n_samples + + @override + @property + def filename(self, index: int) -> str: + return f"{self._file_path}_{index}" + + @cached_property + def _wsi(self) -> wsi.Wsi: + wsi_obj = wsi.get_wsi_class(self._backend)(self._file_path) + wsi_obj.open_slide() + return wsi_obj + + def __getitem__(self, index: int) -> torch.Tensor: + # Calculate the desired zoom level based on target_mpp + level_idx, width, height = self._get_closest_level(self._wsi, self._target_mpp) + + # Random Sampling + # TODO: make sampling method configurable + # TODO: add support for masking of unwanted regions + x_max, y_max = self._wsi.level_dimensions[level_idx] + x = random.randint(0, x_max - width) + y = random.randint(0, y_max - height) + + patch = self._wsi.read_region((x, y), (width, height), level_idx) + patch = self._apply_transforms(patch) + return patch + + def _get_closest_level(self, slide: wsi.Wsi, target_mpp: float): + """Calculate the slide level closest to the target mpp.""" + # Calculate the mpp for each level + level_mpps = slide.mpp * np.array(slide.level_downsamples) + + # Ignore levels with higher mpp + level_mpps_filtered = level_mpps.copy() + level_mpps_filtered[level_mpps_filtered > target_mpp] = 0 + + if level_mpps_filtered.max() == 0: + # When all levels have higher mpp than target_mpp return the level with lowest mpp + level_idx = np.argmin(level_mpps) + else: + level_idx = np.argmax(level_mpps_filtered) + + # Calculate the width & height in pixels scaled to the selected level + mpp_ratio = slide.mpp / level_mpps[level_idx] + width, height = int(mpp_ratio * self._width), int(mpp_ratio * self._height) + + return level_idx, width, height + + def _apply_transforms(self, tensor: torch.Tensor) -> torch.Tensor: + if self._transforms: + tensor = self._transforms(tensor) + return tensor + + +class MultiWsiDataset(torch_datasets.ConcatDataset): + default_column_mapping: Dict[str, str] = { + "path": "path", + "target": "target", + } + + def __init__( + self, + root: str, + manifest_file: str, + n_samples: int, + width: int, + height: int, + target_mpp: float, + backend: wsi.WsiBackend = wsi.WsiBackend.OPENSLIDE, + transforms: Callable | None = None, + column_mapping: Dict[str, str] = default_column_mapping, + ): + self._root = root + self._manifest_file = manifest_file + self._n_samples = n_samples + self._width = width + self._height = height + self._target_mpp = target_mpp + self._backend = backend + self._transforms = transforms + self._column_mapping = column_mapping + + self._manifest = self._load_manifest(os.path.join(self._root, self._manifest_file)) + super().__init__(self._load_datasets()) + + def _load_datasets(self) -> list[WsiDataset]: + wsi_datasets = [] + for index, row in self._manifest.iterrows(): + file_path = os.path.join(self._root, row[self._column_mapping["path"]]) + if not os.path.exists(file_path): + raise FileNotFoundError(f"File not found: {file_path}") + + wsi_datasets.append( + WsiDataset( + file_path=file_path, + n_samples=self._n_samples, + width=self._width, + height=self._height, + target_mpp=self._target_mpp, + backend=self._backend, + transforms=self._transforms, + ) + ) + return wsi_datasets + + def _load_manifest(self, manifest_path: str) -> pd.DataFrame: + df = pd.read_csv(manifest_path) + + missing_columns = set(self._column_mapping.values()) - set(df.columns) + if missing_columns: + raise ValueError(f"Missing columns in the manifest file: {missing_columns}") + + return df From fef6819aac7c62b3f53a54f1b9941736baf65a3a Mon Sep 17 00:00:00 2001 From: roman807 Date: Tue, 16 Apr 2024 17:58:53 +0200 Subject: [PATCH 03/29] Add baseline `panda` workflow (#373) * add panda config * adjust batch size * addressed comments * addressed comments * addressed comments --- configs/vision/dino_vit/offline/panda.yaml | 127 ++++++++++++++++++ src/eva/core/callbacks/writers/embeddings.py | 14 +- src/eva/core/callbacks/writers/typings.py | 3 + .../classification/multi_embeddings.py | 8 +- .../data/datasets/classification/wsi.py | 11 +- 5 files changed, 152 insertions(+), 11 deletions(-) create mode 100644 configs/vision/dino_vit/offline/panda.yaml diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml new file mode 100644 index 00000000..29df522e --- /dev/null +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -0,0 +1,127 @@ +--- +trainer: + class_path: eva.Trainer + init_args: + n_runs: &N_RUNS ${oc.env:N_RUNS, 1} + default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/panda} + max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} + callbacks: + - class_path: lightning.pytorch.callbacks.LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: lightning.pytorch.callbacks.ModelCheckpoint + init_args: + filename: best + save_last: true + save_top_k: 1 + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} + mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + min_delta: 0 + patience: 13 + monitor: *MONITOR_METRIC + mode: *MONITOR_METRIC_MODE + - class_path: eva.callbacks.EmbeddingsWriter + init_args: + group_key: slide_id + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/panda + dataloader_idx_map: + 0: train + 1: val + 2: test + backbone: + class_path: eva.models.ModelFromFunction + init_args: + path: torch.hub.load + arguments: + repo_or_dir: ${oc.env:REPO_OR_DIR, facebookresearch/dino:main} + model: ${oc.env:DINO_BACKBONE, dino_vits16} + pretrained: ${oc.env:PRETRAINED, true} + force_reload: ${oc.env:FORCE_RELOAD, false} + checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} + logger: + - class_path: lightning.pytorch.loggers.TensorBoardLogger + init_args: + save_dir: *OUTPUT_ROOT + name: "" +model: + class_path: eva.HeadModule + init_args: + head: + class_path: eva.vision.models.networks.ABMIL + init_args: + input_size: ${oc.env:IN_FEATURES, 384} + output_size: &NUM_CLASSES 6 + criterion: torch.nn.CrossEntropyLoss + optimizer: + class_path: torch.optim.SGD + init_args: + lr: &LR_VALUE ${oc.env:LR_VALUE, 0.00004} + momentum: 0.9 + weight_decay: 0.0 + lr_scheduler: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: *MAX_STEPS + eta_min: 0.0 + metrics: + common: + - class_path: eva.metrics.AverageLoss + - class_path: eva.metrics.MulticlassClassificationMetrics + init_args: + num_classes: *NUM_CLASSES +data: + class_path: eva.DataModule + init_args: + datasets: + train: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: &DATASET_ARGS + root: *DATASET_EMBEDDINGS_ROOT + manifest_file: manifest.csv + split: train + val: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: val + test: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: test + predict: + - class_path: eva.vision.datasets.MultiWsiClassificationDataset + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:DATA_ROOT, ./slide_data}/panda + manifest_file: manifest_train.csv + n_samples: 100 + width: 224 + height: 224 + target_mpp: 0.5 + transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.MultiWsiClassificationDataset + init_args: + <<: *PREDICT_DATASET_ARGS + manifest_file: manifest_val.csv + - class_path: eva.vision.datasets.MultiWsiClassificationDataset + init_args: + <<: *PREDICT_DATASET_ARGS + manifest_file: manifest_test.csv + dataloaders: + train: + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} + shuffle: true + val: + batch_size: *BATCH_SIZE + predict: + batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} + num_workers: 12 #multiprocessing.cpu_count + prefetch_factor: 2 + diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index f6fb1c70..c3b2d617 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -85,10 +85,13 @@ def write_on_batch_end( input_name, save_name = self._construct_save_name( dataset.filename(global_idx), metadata, local_idx ) + # TODO: group multiple embeddings into one file and remove line below + save_name = save_name.split(".")[0] + f"_{local_idx}.pt" embeddings_buffer, target_buffer = io.BytesIO(), io.BytesIO() torch.save(embeddings[local_idx].clone(), embeddings_buffer) torch.save(targets[local_idx], target_buffer) # type: ignore - item = QUEUE_ITEM(embeddings_buffer, target_buffer, input_name, save_name, split) + slide_id = None if not metadata else metadata.get("slide_id")[local_idx] + item = QUEUE_ITEM(embeddings_buffer, target_buffer, input_name, save_name, split, slide_id) self._write_queue.put(item) self._write_process.check_exceptions() @@ -130,9 +133,9 @@ def _process_write_queue( if item is None: break - prediction_buffer, target_buffer, input_name, save_name, split = QUEUE_ITEM(*item) + prediction_buffer, target_buffer, input_name, save_name, split, slide_id = QUEUE_ITEM(*item) _save_prediction(prediction_buffer, save_name, output_dir) - _update_manifest(target_buffer, input_name, save_name, split, manifest_writer) + _update_manifest(target_buffer, input_name, save_name, split, slide_id, manifest_writer) manifest_file.close() @@ -154,7 +157,7 @@ def _init_manifest(output_dir: str, overwrite: bool = False) -> tuple[io.TextIOW ) manifest_file = open(manifest_path, "w", newline="") manifest_writer = csv.writer(manifest_file) - manifest_writer.writerow(["origin", "embeddings", "target", "split"]) + manifest_writer.writerow(["origin", "embeddings", "target", "split", "slide_id"]) return manifest_file, manifest_writer @@ -163,7 +166,8 @@ def _update_manifest( input_name: str, save_name: str, split: str | None, + slide_id: str | None, manifest_writer, ) -> None: target = torch.load(io.BytesIO(target_buffer.getbuffer()), map_location="cpu") - manifest_writer.writerow([input_name, save_name, target.item(), split]) + manifest_writer.writerow([input_name, save_name, target.item(), split, slide_id]) diff --git a/src/eva/core/callbacks/writers/typings.py b/src/eva/core/callbacks/writers/typings.py index bf6aa795..c3c3ea68 100644 --- a/src/eva/core/callbacks/writers/typings.py +++ b/src/eva/core/callbacks/writers/typings.py @@ -21,3 +21,6 @@ class QUEUE_ITEM(NamedTuple): split: str | None """The dataset split the item belongs to (e.g. train, val, test).""" + + slide_id: str | None = None + """Unique slide identifier.""" diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py index 386b7104..d06d713c 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py @@ -73,10 +73,12 @@ def _load_embeddings(self, index: int) -> torch.Tensor: embedding_paths = self._data.loc[ self._data[self._column_mapping["multi_id"]] == multi_id, self._column_mapping["path"] ].to_list() - embedding_paths = [os.path.join(self._root, path) for path in embedding_paths] # Load embeddings and stack them accross the first dimension - embeddings = [torch.load(path, map_location="cpu") for path in embedding_paths] + embeddings = [] + for path in embedding_paths: + embedding = torch.load(os.path.join(self._root, path), map_location="cpu") + embeddings.append(embedding.unsqueeze(0) if embedding.ndim==1 else embedding) embeddings = torch.cat(embeddings, dim=0) if not embeddings.ndim == 2: @@ -103,4 +105,4 @@ def _load_target(self, index: int) -> np.ndarray: @override def __len__(self) -> int: - return len(self._data) + return len(self._multi_ids) diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py index 97661712..a7414f74 100644 --- a/src/eva/vision/data/datasets/classification/wsi.py +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -1,5 +1,5 @@ import bisect -import random +import os from typing import Any, Dict import numpy as np @@ -16,10 +16,15 @@ def __getitem__(self, index: int) -> DATA_SAMPLE: return DATA_SAMPLE(data, target, metadata) + # TODO: create panda-specific dataset class for functions below def _load_target(self, index: int) -> np.ndarray: dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return self._manifest.at[dataset_idx, self._column_mapping["target"]] def _load_metadata(self, index: int) -> Dict[str, Any]: - # TODO: Implement metadata loading - return {"slide_id": random.randint(0, 100)} + return {"slide_id": self.filename(index).split(".")[0]} + + def filename(self, index: int) -> str: + dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) + full_path = self._manifest.at[dataset_idx, self._column_mapping["path"]] + return os.path.basename(full_path) From 1faa3c072597f46d2f40a75af4c3ce986c77d8ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Tue, 23 Apr 2024 14:56:21 +0200 Subject: [PATCH 04/29] Add support for grid sampling to `WsiDataset` (#377) --- configs/vision/dino_vit/offline/panda.yaml | 5 +- pdm.lock | 23 +++- pyproject.toml | 1 + src/eva/core/callbacks/writers/embeddings.py | 12 +- .../classification/multi_embeddings.py | 2 +- .../data/datasets/classification/wsi.py | 28 ++-- src/eva/vision/data/datasets/wsi.py | 123 +++++++++--------- src/eva/vision/data/wsi/__init__.py | 9 +- src/eva/vision/data/wsi/backend.py | 19 --- src/eva/vision/data/wsi/backends/__init__.py | 31 +++++ src/eva/vision/data/wsi/backends/base.py | 75 +++++++++++ src/eva/vision/data/wsi/backends/openslide.py | 47 +++++++ src/eva/vision/data/wsi/base.py | 53 -------- src/eva/vision/data/wsi/openslide.py | 46 ------- src/eva/vision/data/wsi/patching/__init__.py | 5 + .../vision/data/wsi/patching/coordinates.py | 58 +++++++++ src/eva/vision/data/wsi/patching/samplers.py | 107 +++++++++++++++ 17 files changed, 448 insertions(+), 196 deletions(-) delete mode 100644 src/eva/vision/data/wsi/backend.py create mode 100644 src/eva/vision/data/wsi/backends/__init__.py create mode 100644 src/eva/vision/data/wsi/backends/base.py create mode 100644 src/eva/vision/data/wsi/backends/openslide.py delete mode 100644 src/eva/vision/data/wsi/base.py delete mode 100644 src/eva/vision/data/wsi/openslide.py create mode 100644 src/eva/vision/data/wsi/patching/__init__.py create mode 100644 src/eva/vision/data/wsi/patching/coordinates.py create mode 100644 src/eva/vision/data/wsi/patching/samplers.py diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 29df522e..5b5a04bf 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -96,7 +96,10 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./slide_data}/panda manifest_file: manifest_train.csv - n_samples: 100 + sampler: + class_path: eva.vision.data.wsi.patching.samplers.RandomSampler + init_args: + n_samples: 100 width: 224 height: 224 target_mpp: 0.5 diff --git a/pdm.lock b/pdm.lock index d97b9fee..5e5a89f4 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev", "docs", "all", "typecheck", "lint", "vision", "test"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:c10565d042a67f776991cc9989f4cc9aee39c75161829ad679c1b394d2bdb906" +content_hash = "sha256:01893abe4eedac47e0dc55b1097b0dd80675e09772227fd20bb46e4287760674" [[package]] name = "absl-py" @@ -1466,7 +1466,7 @@ groups = ["default"] dependencies = [ "coloredlogs", "flatbuffers", - "numpy>=1.24.2", + "numpy>=1.21.6", "packaging", "protobuf", "sympy", @@ -1515,6 +1515,25 @@ files = [ {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:a8056c2cb37cd65dfcdf4153ca16f7362afcf3a50d600d6bb69c660fc61ee29c"}, ] +[[package]] +name = "openslide-python" +version = "1.3.1" +requires_python = ">=3.8" +summary = "Python interface to OpenSlide" +groups = ["vision"] +dependencies = [ + "Pillow", +] +files = [ + {file = "openslide-python-1.3.1.tar.gz", hash = "sha256:0909c6257cd8decfbbd0082e8c0cd94bbe3a89ad31e142cfa9accc8bb959294e"}, + {file = "openslide_python-1.3.1-cp310-cp310-win32.whl", hash = "sha256:7a5c0c5bddb518f3e643d0ce2e8d5dfe6b3a374a966ca2c316ef56196dd3c602"}, + {file = "openslide_python-1.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:d208e53d3da82313213303058b2ca9fc66c2d98365b9338e27ecc46ab8b07e9d"}, + {file = "openslide_python-1.3.1-cp311-cp311-win32.whl", hash = "sha256:c4720598ba39e7b879e757eff31195f8b80d4638dcb0fbb297ca9823039724ae"}, + {file = "openslide_python-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:53a468cd92bdd17cf5b56592242709519c0c7d7028b2f466d20d75264471cc6d"}, + {file = "openslide_python-1.3.1-cp312-cp312-win32.whl", hash = "sha256:d10caf1a1c1e1f598d80e7a5e1a266979ed9bccf9ba8bf45aa34cf04639d9f9e"}, + {file = "openslide_python-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:d834fbca0824b902da9d8541f7c34a3e62496823a42de5ac7bf6c35e4c799678"}, +] + [[package]] name = "packaging" version = "24.0" diff --git a/pyproject.toml b/pyproject.toml index 71ea6fdd..5873d3b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,6 +61,7 @@ vision = [ "opencv-python-headless>=4.9.0.80", "timm>=0.9.12", "torchvision>=0.17.0", + "openslide-python>=1.3.1", ] all = [ "h5py>=3.10.0", diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index c3b2d617..191622cf 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -86,12 +86,18 @@ def write_on_batch_end( dataset.filename(global_idx), metadata, local_idx ) # TODO: group multiple embeddings into one file and remove line below - save_name = save_name.split(".")[0] + f"_{local_idx}.pt" + save_name = save_name.split(".")[0] + f"_{global_idx}.pt" embeddings_buffer, target_buffer = io.BytesIO(), io.BytesIO() torch.save(embeddings[local_idx].clone(), embeddings_buffer) torch.save(targets[local_idx], target_buffer) # type: ignore - slide_id = None if not metadata else metadata.get("slide_id")[local_idx] - item = QUEUE_ITEM(embeddings_buffer, target_buffer, input_name, save_name, split, slide_id) + slide_id = ( + list(metadata["slide_id"])[local_idx] + if isinstance(metadata, dict) and "slide_id" in metadata + else None + ) + item = QUEUE_ITEM( + embeddings_buffer, target_buffer, input_name, save_name, split, slide_id + ) self._write_queue.put(item) self._write_process.check_exceptions() diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py index d06d713c..813eb28a 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py @@ -78,7 +78,7 @@ def _load_embeddings(self, index: int) -> torch.Tensor: embeddings = [] for path in embedding_paths: embedding = torch.load(os.path.join(self._root, path), map_location="cpu") - embeddings.append(embedding.unsqueeze(0) if embedding.ndim==1 else embedding) + embeddings.append(embedding.unsqueeze(0) if embedding.ndim == 1 else embedding) embeddings = torch.cat(embeddings, dim=0) if not embeddings.ndim == 2: diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py index a7414f74..4d7aff61 100644 --- a/src/eva/vision/data/datasets/classification/wsi.py +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -1,14 +1,29 @@ +"""Dataset classes for whole-slide image classification.""" + import bisect import os from typing import Any, Dict -import numpy as np +import torch +from typing_extensions import override from eva.core.models.modules.typings import DATA_SAMPLE from eva.vision.data.datasets.wsi import MultiWsiDataset class MultiWsiClassificationDataset(MultiWsiDataset): + """Classification Dataset class for reading patches from multiple whole-slide images. + + # TODO: Replace this by dataset specific classes? + """ + + @override + def filename(self, index: int) -> str: + dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) + full_path = self._manifest.at[dataset_idx, self._column_mapping["path"]] + return os.path.basename(full_path) + + @override def __getitem__(self, index: int) -> DATA_SAMPLE: data = super().__getitem__(index) target = self._load_target(index) @@ -16,15 +31,10 @@ def __getitem__(self, index: int) -> DATA_SAMPLE: return DATA_SAMPLE(data, target, metadata) - # TODO: create panda-specific dataset class for functions below - def _load_target(self, index: int) -> np.ndarray: + def _load_target(self, index: int) -> torch.Tensor: dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) - return self._manifest.at[dataset_idx, self._column_mapping["target"]] + target = self._manifest.at[dataset_idx, self._column_mapping["target"]] + return torch.tensor(target) def _load_metadata(self, index: int) -> Dict[str, Any]: return {"slide_id": self.filename(index).split(".")[0]} - - def filename(self, index: int) -> str: - dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) - full_path = self._manifest.at[dataset_idx, self._column_mapping["path"]] - return os.path.basename(full_path) diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index c257a2d5..2e4f7fff 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -1,9 +1,9 @@ +"""Dataset classes for whole-slide images.""" + import os -import random from functools import cached_property from typing import Callable, Dict -import numpy as np import pandas as pd import torch from torch.utils.data import dataset as torch_datasets @@ -11,93 +11,83 @@ from eva.vision.data import wsi from eva.vision.data.datasets import vision +from eva.vision.data.wsi.backends import wsi_backend +from eva.vision.data.wsi.patching import samplers class WsiDataset(vision.VisionDataset): + """Dataset class for reading patches from whole-slide images.""" + def __init__( self, file_path: str, - n_samples: int, width: int, height: int, target_mpp: float, - backend: wsi.WsiBackend = wsi.WsiBackend.OPENSLIDE, - transforms: Callable | None = None, + sampler: samplers.Sampler, + backend: str = "openslide", + transforms: Callable[..., torch.Tensor] | None = None, ): - """Args: - file_path: Path to the whole-slide image file. - n_samples: Number of patches to sample from each slide. - width: Width of the patches to be extracted, in pixels. - height: Height of the patches to be extracted, in pixels. - target_mpp: Target microns per pixel (mpp) for the patches. - backend: The backend to use for reading the whole-slide images. - transforms: A function that takes in an image and returns a transformed version. + """Initializes a new dataset instance. + + Args: + file_path: Path to the whole-slide image file. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + sampler: The sampler to use for sampling patch coordinates. + backend: The backend to use for reading the whole-slide images. + transforms: Transforms to apply to the extracted patch tensors. """ self._file_path = file_path - self._n_samples = n_samples self._width = width self._height = height self._target_mpp = target_mpp self._backend = backend + self._sampler = sampler self._transforms = transforms + @override def __len__(self): - return self._n_samples + return len(self._coords.x_y) @override - @property def filename(self, index: int) -> str: return f"{self._file_path}_{index}" @cached_property def _wsi(self) -> wsi.Wsi: - wsi_obj = wsi.get_wsi_class(self._backend)(self._file_path) - wsi_obj.open_slide() - return wsi_obj + # wsi_obj = wsi.get_wsi_class(self._backend)(self._file_path) + return wsi_backend(self._backend)(self._file_path) - def __getitem__(self, index: int) -> torch.Tensor: - # Calculate the desired zoom level based on target_mpp - level_idx, width, height = self._get_closest_level(self._wsi, self._target_mpp) - - # Random Sampling - # TODO: make sampling method configurable - # TODO: add support for masking of unwanted regions - x_max, y_max = self._wsi.level_dimensions[level_idx] - x = random.randint(0, x_max - width) - y = random.randint(0, y_max - height) + @cached_property + def _coords(self) -> wsi.PatchCoordinates: + return wsi.PatchCoordinates.from_file( + wsi_path=self._file_path, + width=self._width, + height=self._height, + target_mpp=self._target_mpp, + backend=self._backend, + sampler=self._sampler, + ) + @override + def __getitem__(self, index: int) -> torch.Tensor: + x, y = self._coords.x_y[index] + width, height, level_idx = self._coords.width, self._coords.height, self._coords.level_idx patch = self._wsi.read_region((x, y), (width, height), level_idx) - patch = self._apply_transforms(patch) + patch = self._apply_transforms(torch.from_numpy(patch).permute(2, 0, 1)) return patch - def _get_closest_level(self, slide: wsi.Wsi, target_mpp: float): - """Calculate the slide level closest to the target mpp.""" - # Calculate the mpp for each level - level_mpps = slide.mpp * np.array(slide.level_downsamples) - - # Ignore levels with higher mpp - level_mpps_filtered = level_mpps.copy() - level_mpps_filtered[level_mpps_filtered > target_mpp] = 0 - - if level_mpps_filtered.max() == 0: - # When all levels have higher mpp than target_mpp return the level with lowest mpp - level_idx = np.argmin(level_mpps) - else: - level_idx = np.argmax(level_mpps_filtered) - - # Calculate the width & height in pixels scaled to the selected level - mpp_ratio = slide.mpp / level_mpps[level_idx] - width, height = int(mpp_ratio * self._width), int(mpp_ratio * self._height) - - return level_idx, width, height - def _apply_transforms(self, tensor: torch.Tensor) -> torch.Tensor: if self._transforms: tensor = self._transforms(tensor) return tensor -class MultiWsiDataset(torch_datasets.ConcatDataset): +class MultiWsiDataset(torch_datasets.ConcatDataset, vision.VisionDataset): + """Dataset class for reading patches from multiple whole-slide images.""" + default_column_mapping: Dict[str, str] = { "path": "path", "target": "target", @@ -107,20 +97,37 @@ def __init__( self, root: str, manifest_file: str, - n_samples: int, width: int, height: int, target_mpp: float, - backend: wsi.WsiBackend = wsi.WsiBackend.OPENSLIDE, + sampler: samplers.Sampler, + backend: str = "openslide", transforms: Callable | None = None, column_mapping: Dict[str, str] = default_column_mapping, ): + """Initializes a new dataset instance. + + Args: + root: Root directory of the dataset. + manifest_file: The path to the manifest file, which is relative to + the `root` argument. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + sampler: The sampler to use for sampling patch coordinates. + backend: The backend to use for reading the whole-slide images. + transforms: Transforms to apply to the extracted patch tensors. + column_mapping: Defines the map between the variables and the manifest + columns. It will overwrite the `default_column_mapping` with + the provided values, so that `column_mapping` can contain only the + values which are altered or missing. + """ self._root = root self._manifest_file = manifest_file - self._n_samples = n_samples self._width = width self._height = height self._target_mpp = target_mpp + self._sampler = sampler self._backend = backend self._transforms = transforms self._column_mapping = column_mapping @@ -130,18 +137,18 @@ def __init__( def _load_datasets(self) -> list[WsiDataset]: wsi_datasets = [] - for index, row in self._manifest.iterrows(): - file_path = os.path.join(self._root, row[self._column_mapping["path"]]) + for _, row in self._manifest.iterrows(): + file_path = os.path.join(self._root, str(row[self._column_mapping["path"]])) if not os.path.exists(file_path): raise FileNotFoundError(f"File not found: {file_path}") wsi_datasets.append( WsiDataset( file_path=file_path, - n_samples=self._n_samples, width=self._width, height=self._height, target_mpp=self._target_mpp, + sampler=self._sampler, backend=self._backend, transforms=self._transforms, ) diff --git a/src/eva/vision/data/wsi/__init__.py b/src/eva/vision/data/wsi/__init__.py index e9ccd558..72433a43 100644 --- a/src/eva/vision/data/wsi/__init__.py +++ b/src/eva/vision/data/wsi/__init__.py @@ -1,5 +1,6 @@ -from eva.vision.data.wsi.backend import WsiBackend, get_wsi_class -from eva.vision.data.wsi.base import Wsi -from eva.vision.data.wsi.openslide import WsiOpenslide +"""WSI API.""" -__all__ = ["Wsi", "WsiOpenslide", "WsiBackend", "get_wsi_class"] +from eva.vision.data.wsi.backends import Wsi, wsi_backend +from eva.vision.data.wsi.patching.coordinates import PatchCoordinates + +__all__ = ["Wsi", "PatchCoordinates", "wsi_backend"] diff --git a/src/eva/vision/data/wsi/backend.py b/src/eva/vision/data/wsi/backend.py deleted file mode 100644 index cecceb47..00000000 --- a/src/eva/vision/data/wsi/backend.py +++ /dev/null @@ -1,19 +0,0 @@ -import enum - -from eva.vision.data.wsi.base import Wsi -from eva.vision.data.wsi.openslide import WsiOpenslide - - -class WsiBackend(enum.Enum): - OPENSLIDE = 0 - AUTO = 1 - - -def get_wsi_class(backend: WsiBackend) -> Wsi: - match backend: - case WsiBackend.OPENSLIDE: - return WsiOpenslide - case WsiBackend.AUTO: - raise NotImplementedError - case _: - raise ValueError(f"Unknown WSI backend: {backend}") diff --git a/src/eva/vision/data/wsi/backends/__init__.py b/src/eva/vision/data/wsi/backends/__init__.py new file mode 100644 index 00000000..71c622df --- /dev/null +++ b/src/eva/vision/data/wsi/backends/__init__.py @@ -0,0 +1,31 @@ +"""WSI Backends API.""" + +import importlib.util +from typing import Callable + +from eva.vision.data.wsi.backends.base import Wsi + + +def is_openslide_available() -> bool: + """Whether the OpenSlide library is available.""" + return importlib.util.find_spec("openslide") is not None + + +def wsi_backend(backend: str = "openslide") -> Callable[..., Wsi]: + """Returns the backend to use for reading the whole-slide images.""" + match backend: + case "openslide": + if is_openslide_available(): + from eva.vision.data.wsi.backends.openslide import WsiOpenslide + + return WsiOpenslide + else: + raise ValueError( + "Missing optional dependency: openslide.\n" + "Please install using `pip install openslide-python`." + ) + case _: + raise ValueError(f"Unknown WSI backend selected: {backend}") + + +__all__ = ["Wsi", "wsi_backend", "is_openslide_available"] diff --git a/src/eva/vision/data/wsi/backends/base.py b/src/eva/vision/data/wsi/backends/base.py new file mode 100644 index 00000000..2bc5ce7b --- /dev/null +++ b/src/eva/vision/data/wsi/backends/base.py @@ -0,0 +1,75 @@ +"""Base Module for loading data from WSI files.""" + +import abc +from typing import Any, Sequence, Tuple + +import numpy as np + + +class Wsi(abc.ABC): + """Base class for loading data from Whole Slide Image (WSI) files.""" + + def __init__(self, file_path: str): + """Initializes a Wsi object. + + Args: + file_path: The path to the WSI file. + """ + self._wsi = self.open_file(file_path) + + @abc.abstractmethod + def open_file(self, file_path: str) -> Any: + """Opens the WSI file. + + Args: + file_path: The path to the WSI file. + """ + + @property + @abc.abstractmethod + def level_dimensions(self) -> Sequence[Tuple[int, int]]: + """A list of (width, height) tuples for each level, from highest to lowest resolution.""" + + @property + @abc.abstractmethod + def level_downsamples(self) -> Sequence[float]: + """A list of downsampling factors for each level, relative to the highest resolution.""" + + @property + @abc.abstractmethod + def mpp(self) -> float: + """Microns per pixel at the highest resolution.""" + + @abc.abstractmethod + def read_region( + self, location: Tuple[int, int], size: Tuple[int, int], level: int + ) -> np.ndarray: + """Reads and returns image data for a specified region and zoom level. + + Args: + location: Top-left corner (x, y) to start reading. + size: Region size as (width, height), relative to . + level: Zoom level, with 0 being the highest resolution. + """ + + def get_closest_level(self, target_mpp: float) -> int: + """Calculate the slide level that is closest to the target mpp. + + Args: + slide: The whole-slide image object. + target_mpp: The target microns per pixel (mpp) value. + """ + # Calculate the mpp for each level + level_mpps = self.mpp * np.array(self.level_downsamples) + + # Ignore levels with higher mpp + level_mpps_filtered = level_mpps.copy() + level_mpps_filtered[level_mpps_filtered > target_mpp] = 0 + + if level_mpps_filtered.max() == 0: + # When all levels have higher mpp than target_mpp return the level with lowest mpp + level_idx = np.argmin(level_mpps) + else: + level_idx = np.argmax(level_mpps_filtered) + + return int(level_idx) diff --git a/src/eva/vision/data/wsi/backends/openslide.py b/src/eva/vision/data/wsi/backends/openslide.py new file mode 100644 index 00000000..6a7fbe0d --- /dev/null +++ b/src/eva/vision/data/wsi/backends/openslide.py @@ -0,0 +1,47 @@ +"""Module for loading data from WSI files using the OpenSlide library.""" + +from typing import Sequence, Tuple + +import numpy as np +import openslide +from typing_extensions import override + +from eva.vision.data.wsi.backends import base + + +class WsiOpenslide(base.Wsi): + """Class for loading data from WSI files using the OpenSlide library.""" + + _wsi: openslide.OpenSlide | openslide.ImageSlide + + @override + def open_file(self, file_path: str) -> openslide.OpenSlide | openslide.ImageSlide: + return openslide.open_slide(file_path) + + @property + @override + def level_dimensions(self) -> Sequence[Tuple[int, int]]: + return self._wsi.level_dimensions + + @property + @override + def level_downsamples(self) -> Sequence[float]: + return self._wsi.level_downsamples + + @property + @override + def mpp(self) -> float: + # TODO: add overwrite_mpp class attribute to allow setting a default value + x_mpp = float(self._wsi.properties["openslide.mpp-x"]) + y_mpp = float(self._wsi.properties["openslide.mpp-y"]) + return (x_mpp + y_mpp) / 2.0 + + @override + def read_region( + self, location: Tuple[int, int], size: Tuple[int, int], level: int + ) -> np.ndarray: + x_max, y_max = self._wsi.level_dimensions[level] + if location[0] + size[0] > x_max or location[1] + size[1] > y_max: + raise ValueError(f"Out of bounds region: {location}, {size}, {level}") + data = self._wsi.read_region(location, level, size) + return np.array(data.convert("RGB")) diff --git a/src/eva/vision/data/wsi/base.py b/src/eva/vision/data/wsi/base.py deleted file mode 100644 index 2110bf38..00000000 --- a/src/eva/vision/data/wsi/base.py +++ /dev/null @@ -1,53 +0,0 @@ -import abc -from typing import Any, List, Tuple - -import numpy as np - - -class Wsi(abc.ABC): - """Base class for loading data from WSI (whole slide image) files.""" - - def __init__(self, file_path: str): - """Initializes a new class instance. - - Args: - file_path: The path to the whole slide image file. - """ - self._file_path = file_path - self._wsi = None - - @property - @abc.abstractmethod - def level_dimensions(self) -> List[tuple[int, int]]: - """A list of (width, height) tuples for each level, from highest to lowest resolution.""" - - @property - @abc.abstractmethod - def level_downsamples(self) -> List[float]: - """A list of downsampling factors for each level, relative to the highest resolution.""" - - @property - @abc.abstractmethod - def mpp(self) -> float: - """Microns per pixel at the highest resolution.""" - - @abc.abstractmethod - def read_region( - self, location: Tuple[int, int], size: Tuple[int, int], level: int - ) -> np.ndarray: - """Reads and returns image data for a specified region and zoom level. - - Args: - location: Top-left corner (x, y) to start reading. - size: Region size as (width, height), relative to . - level: Zoom level, with 0 being the highest resolution. - """ - - @abc.abstractmethod - def open_slide(self) -> Any: - """Opens the WSI file. - - Note: This shouldn't be called in the constructor as wsi backends usually contain - C types or pointers, which the standard Python pickler cannot serialize, leading to - issues with torch.DataLoader in multiprocessing settings. - """ diff --git a/src/eva/vision/data/wsi/openslide.py b/src/eva/vision/data/wsi/openslide.py deleted file mode 100644 index 0f37d617..00000000 --- a/src/eva/vision/data/wsi/openslide.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import List, Tuple - -import numpy as np -import openslide -from typing_extensions import override - -from eva.vision.data.wsi import base - - -class WsiOpenslide(base.Wsi): - """Class for loading data from WSI files using the OpenSlide library.""" - - _wsi: openslide.OpenSlide - - @override - @property - def level_dimensions(self) -> List[Tuple[int, int]]: - return self._wsi.level_dimensions - - @override - @property - def level_downsamples(self) -> List[float]: - return self._wsi.level_downsamples - - @override - @property - def mpp(self) -> float: - try: - x_mpp = float(self._wsi.properties["openslide.mpp-x"]) - y_mpp = float(self._wsi.properties["openslide.mpp-y"]) - return (x_mpp + y_mpp) / 2.0 - except KeyError: - # TODO: add overwrite_mpp class attribute to allow setting a default value - raise ValueError("Microns per pixel (mpp) value is not available for this slide.") - - @override - def read_region( - self, location: Tuple[int, int], size: Tuple[int, int], level: int - ) -> np.ndarray: - data = self._wsi.read_region(location, level, size) - - return np.array(data.convert("RGB")) - - @override - def open_slide(self) -> openslide.OpenSlide: - self._wsi = openslide.open_slide(self._file_path) diff --git a/src/eva/vision/data/wsi/patching/__init__.py b/src/eva/vision/data/wsi/patching/__init__.py new file mode 100644 index 00000000..f14b1b51 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/__init__.py @@ -0,0 +1,5 @@ +"""WSI Patching API.""" + +from eva.vision.data.wsi.patching.coordinates import PatchCoordinates + +__all__ = ["PatchCoordinates"] diff --git a/src/eva/vision/data/wsi/patching/coordinates.py b/src/eva/vision/data/wsi/patching/coordinates.py new file mode 100644 index 00000000..7f4151a2 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/coordinates.py @@ -0,0 +1,58 @@ +"""A module for handling coordinates of patches from a whole-slide image.""" + +import dataclasses +from typing import List, Tuple + +from eva.vision.data.wsi import backends +from eva.vision.data.wsi.patching import samplers + + +@dataclasses.dataclass +class PatchCoordinates: + """A class to store coordinates of patches from a whole-slide image. + + Args: + x_y: A list of (x, y) coordinates of the patches. + width: The width of the patches, in pixels (refers to x-dim). + height: The height of the patches, in pixels (refers to y-dim). + level_idx: The level index of the patches. + """ + + x_y: List[Tuple[int, int]] + width: int + height: int + level_idx: int + + @classmethod + def from_file( + cls, + wsi_path: str, + width: int, + height: int, + target_mpp: float, + sampler: samplers.Sampler, + backend: str = "openslide", + ) -> "PatchCoordinates": + """Create a new instance of PatchCoordinates from a whole-slide image file. + + Patches will be read from the level that is closest to the specified target_mpp. + + Args: + wsi_path: The path to the whole-slide image file. + width: The width of the patches to be extracted, in pixels. + height: The height of the patches to be extracted, in pixels. + target_mpp: The target microns per pixel (mpp) for the patches. + sampler: The sampler to use for sampling patch coordinates. + backend: The backend to use for reading the whole-slide images. + """ + wsi = backends.wsi_backend(backend)(wsi_path) + x_y = [] + level_idx = wsi.get_closest_level(target_mpp) + level_mpp = wsi.mpp * wsi.level_downsamples[level_idx] + mpp_ratio = target_mpp / level_mpp + scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) + + for x, y in sampler.sample(scaled_width, scaled_height, wsi.level_dimensions[level_idx]): + x_y.append((x, y)) + + return cls(x_y, scaled_width, scaled_height, level_idx) diff --git a/src/eva/vision/data/wsi/patching/samplers.py b/src/eva/vision/data/wsi/patching/samplers.py new file mode 100644 index 00000000..66a8e876 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers.py @@ -0,0 +1,107 @@ +"""Samplers for WSI patch extraction.""" + +import abc +import random +from typing import Generator, Tuple + +import numpy as np + + +class Sampler(abc.ABC): + """Base class for samplers.""" + + @abc.abstractmethod + def sample( + self, + width: int, + height: int, + layer_shape: tuple[int, int], + ) -> Generator[Tuple[int, int], None, None]: + """Iterator that samples patches.""" + + +class RandomSampler(Sampler): + """Sample patch coordinates randomly. + + Args: + n_samples: The number of samples to return. + seed: The random seed. + """ + + def __init__(self, n_samples: int = 1, seed: int = 42): + """Initializes the sampler.""" + self.seed = seed + self.n_samples = n_samples + + def sample( + self, + width: int, + height: int, + layer_shape: tuple[int, int], + ) -> Generator[Tuple[int, int], None, None]: + """Sample random patches. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + """ + _set_seed(self.seed) + + for _ in range(self.n_samples): + x_max, y_max = layer_shape[0], layer_shape[1] + x, y = random.randint(0, x_max - width), random.randint(0, y_max - height) # nosec + yield x, y + + +class GridSampler(Sampler): + """Sample patches based on a grid. + + Args: + max_samples: The maximum number of samples to return. + overlap: The overlap between patches in the grid. + seed: The random seed. + """ + + def __init__( + self, + max_samples: int | None = None, + overlap: tuple[int, int] = (0, 0), + seed: int = 42, + ): + """Initializes the sampler.""" + self.max_samples = max_samples + self.overlap = overlap + self.seed = seed + + def sample( + self, + width: int, + height: int, + layer_shape: tuple[int, int], + ) -> Generator[Tuple[int, int], None, None]: + """Sample patches from a grid. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + """ + _set_seed(self.seed) + + x_range = range(0, layer_shape[0] - width, width - self.overlap[0]) + y_range = range(0, layer_shape[1] - height, height - self.overlap[1]) + x_y = [(x, y) for x in x_range for y in y_range] + + shuffled_indices = ( + np.random.choice(len(x_y), self.max_samples, replace=False) + if self.max_samples + else range(len(x_y)) + ) + for i in shuffled_indices: + yield x_y[i] + + +def _set_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) From 278b7581de5ba4e68a5917b9ae65f2d1a621562a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Thu, 25 Apr 2024 14:36:02 +0200 Subject: [PATCH 05/29] Replaced cached_property in WsiDataset by LRU cache (#388) --- src/eva/vision/data/datasets/wsi.py | 15 +++++------- src/eva/vision/data/wsi/__init__.py | 6 ++--- src/eva/vision/data/wsi/backends/__init__.py | 11 ++++++++- .../vision/data/wsi/patching/coordinates.py | 23 +++++++++++++++++++ 4 files changed, 42 insertions(+), 13 deletions(-) diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index 2e4f7fff..f4b57b52 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -1,7 +1,6 @@ """Dataset classes for whole-slide images.""" import os -from functools import cached_property from typing import Callable, Dict import pandas as pd @@ -11,7 +10,6 @@ from eva.vision.data import wsi from eva.vision.data.datasets import vision -from eva.vision.data.wsi.backends import wsi_backend from eva.vision.data.wsi.patching import samplers @@ -55,20 +53,19 @@ def __len__(self): def filename(self, index: int) -> str: return f"{self._file_path}_{index}" - @cached_property + @property def _wsi(self) -> wsi.Wsi: - # wsi_obj = wsi.get_wsi_class(self._backend)(self._file_path) - return wsi_backend(self._backend)(self._file_path) + return wsi.get_cached_wsi(self._file_path, self._backend) - @cached_property + @property def _coords(self) -> wsi.PatchCoordinates: - return wsi.PatchCoordinates.from_file( - wsi_path=self._file_path, + return wsi.get_cached_coords( + file_path=self._file_path, width=self._width, height=self._height, target_mpp=self._target_mpp, - backend=self._backend, sampler=self._sampler, + backend=self._backend, ) @override diff --git a/src/eva/vision/data/wsi/__init__.py b/src/eva/vision/data/wsi/__init__.py index 72433a43..24c9aa4c 100644 --- a/src/eva/vision/data/wsi/__init__.py +++ b/src/eva/vision/data/wsi/__init__.py @@ -1,6 +1,6 @@ """WSI API.""" -from eva.vision.data.wsi.backends import Wsi, wsi_backend -from eva.vision.data.wsi.patching.coordinates import PatchCoordinates +from eva.vision.data.wsi.backends import Wsi, get_cached_wsi, wsi_backend +from eva.vision.data.wsi.patching.coordinates import PatchCoordinates, get_cached_coords -__all__ = ["Wsi", "PatchCoordinates", "wsi_backend"] +__all__ = ["Wsi", "PatchCoordinates", "get_cached_coords", "wsi_backend", "get_cached_wsi"] diff --git a/src/eva/vision/data/wsi/backends/__init__.py b/src/eva/vision/data/wsi/backends/__init__.py index 71c622df..4a6b23b2 100644 --- a/src/eva/vision/data/wsi/backends/__init__.py +++ b/src/eva/vision/data/wsi/backends/__init__.py @@ -1,10 +1,13 @@ """WSI Backends API.""" +import functools import importlib.util from typing import Callable from eva.vision.data.wsi.backends.base import Wsi +LRU_CACHE_SIZE = 32 + def is_openslide_available() -> bool: """Whether the OpenSlide library is available.""" @@ -28,4 +31,10 @@ def wsi_backend(backend: str = "openslide") -> Callable[..., Wsi]: raise ValueError(f"Unknown WSI backend selected: {backend}") -__all__ = ["Wsi", "wsi_backend", "is_openslide_available"] +@functools.lru_cache(LRU_CACHE_SIZE) +def get_cached_wsi(file_path: str, backend: str) -> Wsi: + """Returns a cached instance of the whole-slide image backend reader.""" + return wsi_backend(backend)(file_path) + + +__all__ = ["Wsi", "wsi_backend", "get_cached_wsi", "is_openslide_available"] diff --git a/src/eva/vision/data/wsi/patching/coordinates.py b/src/eva/vision/data/wsi/patching/coordinates.py index 7f4151a2..caeb50c6 100644 --- a/src/eva/vision/data/wsi/patching/coordinates.py +++ b/src/eva/vision/data/wsi/patching/coordinates.py @@ -1,11 +1,14 @@ """A module for handling coordinates of patches from a whole-slide image.""" import dataclasses +import functools from typing import List, Tuple from eva.vision.data.wsi import backends from eva.vision.data.wsi.patching import samplers +LRU_CACHE_SIZE = 32 + @dataclasses.dataclass class PatchCoordinates: @@ -56,3 +59,23 @@ def from_file( x_y.append((x, y)) return cls(x_y, scaled_width, scaled_height, level_idx) + + +@functools.lru_cache(LRU_CACHE_SIZE) +def get_cached_coords( + file_path: str, + width: int, + height: int, + target_mpp: float, + sampler: samplers.Sampler, + backend: str, +) -> PatchCoordinates: + """Get a cached instance of PatchCoordinates for the specified parameters.""" + return PatchCoordinates.from_file( + wsi_path=file_path, + width=width, + height=height, + target_mpp=target_mpp, + backend=backend, + sampler=sampler, + ) From 5ce416ca8e99e528d80205949c7fad43216bd21e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Fri, 26 Apr 2024 13:51:42 +0200 Subject: [PATCH 06/29] Updated `EmbeddingsWriter` to support multi-embedding file outputs (#384) --- configs/vision/dino_vit/offline/panda.yaml | 2 +- src/eva/core/callbacks/writers/embeddings.py | 164 ++++++++++++---- src/eva/core/callbacks/writers/typings.py | 24 ++- .../core/callbacks/writers/test_embeddings.py | 185 ++++++++++++++---- 4 files changed, 287 insertions(+), 88 deletions(-) diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 5b5a04bf..69dc38c8 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -24,12 +24,12 @@ trainer: mode: *MONITOR_METRIC_MODE - class_path: eva.callbacks.EmbeddingsWriter init_args: - group_key: slide_id output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/panda dataloader_idx_map: 0: train 1: val 2: test + metadata_keys: ["slide_id"] backbone: class_path: eva.models.ModelFromFunction init_args: diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index 191622cf..a3c544da 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -3,7 +3,7 @@ import csv import io import os -from typing import Any, Dict, Sequence +from typing import Any, Dict, List, Sequence import lightning.pytorch as pl import torch @@ -12,7 +12,7 @@ from torch import multiprocessing, nn from typing_extensions import override -from eva.core.callbacks.writers.typings import QUEUE_ITEM +from eva.core.callbacks.writers.typings import ITEM_DICT_ENTRY, QUEUE_ITEM from eva.core.models.modules.typings import DATA_SAMPLE from eva.core.utils import multiprocessing as eva_multiprocessing @@ -25,8 +25,9 @@ def __init__( output_dir: str, backbone: nn.Module | None = None, dataloader_idx_map: Dict[int, str] | None = None, - group_key: str | None = None, + metadata_keys: List[str] | None = None, overwrite: bool = True, + save_every_n: int = 100, ) -> None: """Initializes a new EmbeddingsWriter instance. @@ -39,18 +40,20 @@ def __init__( it will be expected that the input batch returns the features directly. dataloader_idx_map: A dictionary mapping dataloader indices to their respective names (e.g. train, val, test). - group_key: The metadata key to group the embeddings by. If specified, the - embedding files will be saved in subdirectories named after the group_key. - If specified, the key must be present in the metadata of the input batch. + metadata_keys: An optional list of keys to extract from the batch metadata and store + as additional columns in the manifest file. overwrite: Whether to overwrite the output directory. Defaults to True. + save_every_n: Interval for number of iterations to save the embeddings to disk. + During this interval, the embeddings are accumulated in memory. """ super().__init__(write_interval="batch") self._output_dir = output_dir self._backbone = backbone self._dataloader_idx_map = dataloader_idx_map or {} - self._group_key = group_key self._overwrite = overwrite + self._save_every_n = save_every_n + self._metadata_keys = metadata_keys or [] self._write_queue: multiprocessing.Queue self._write_process: eva_multiprocessing.Process @@ -82,21 +85,14 @@ def write_on_batch_end( embeddings = self._get_embeddings(prediction) for local_idx, global_idx in enumerate(batch_indices[: len(embeddings)]): - input_name, save_name = self._construct_save_name( - dataset.filename(global_idx), metadata, local_idx - ) - # TODO: group multiple embeddings into one file and remove line below - save_name = save_name.split(".")[0] + f"_{global_idx}.pt" + input_name = dataset.filename(global_idx) + save_name = os.path.splitext(input_name)[0] + ".pt" embeddings_buffer, target_buffer = io.BytesIO(), io.BytesIO() torch.save(embeddings[local_idx].clone(), embeddings_buffer) torch.save(targets[local_idx], target_buffer) # type: ignore - slide_id = ( - list(metadata["slide_id"])[local_idx] - if isinstance(metadata, dict) and "slide_id" in metadata - else None - ) + item_metadata = self._get_item_metadata(metadata, local_idx) item = QUEUE_ITEM( - embeddings_buffer, target_buffer, input_name, save_name, split, slide_id + embeddings_buffer, target_buffer, input_name, save_name, split, item_metadata ) self._write_queue.put(item) @@ -111,7 +107,14 @@ def on_predict_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> def _initialize_write_process(self) -> None: self._write_queue = multiprocessing.Queue() self._write_process = eva_multiprocessing.Process( - target=_process_write_queue, args=(self._write_queue, self._output_dir, self._overwrite) + target=_process_write_queue, + args=( + self._write_queue, + self._output_dir, + self._metadata_keys, + self._save_every_n, + self._overwrite, + ), ) def _get_embeddings(self, prediction: torch.Tensor) -> torch.Tensor: @@ -122,38 +125,123 @@ def _get_embeddings(self, prediction: torch.Tensor) -> torch.Tensor: with torch.no_grad(): return self._backbone(prediction) - def _construct_save_name(self, input_name, metadata, local_idx): - group_name = metadata[self._group_key][local_idx] if self._group_key else None - save_name = os.path.splitext(input_name)[0] + ".pt" - if group_name: - save_name = os.path.join(group_name, save_name) - return input_name, save_name + def _get_item_metadata( + self, metadata: Dict[str, Any] | None, local_idx: int + ) -> Dict[str, Any] | None: + """Returns the metadata for the item at the given local index.""" + if not metadata: + if self._metadata_keys: + raise ValueError("Metadata keys are provided but the batch metadata is empty.") + else: + return None + + item_metadata = {} + for key in self._metadata_keys: + if key not in metadata: + raise KeyError(f"Metadata key '{key}' not found in the batch metadata.") + item_metadata[key] = metadata[key][local_idx] + + return item_metadata def _process_write_queue( - write_queue: multiprocessing.Queue, output_dir: str, overwrite: bool = False + write_queue: multiprocessing.Queue, + output_dir: str, + metadata_keys: List[str], + save_every_n: int, + overwrite: bool = False, ) -> None: - manifest_file, manifest_writer = _init_manifest(output_dir, overwrite) + """This function receives and processes items added by the main process to the queue.""" + manifest_file, manifest_writer = _init_manifest(output_dir, metadata_keys, overwrite) + + name_to_items: Dict[str, ITEM_DICT_ENTRY] = {} + + counter = 0 while True: item = write_queue.get() if item is None: break - prediction_buffer, target_buffer, input_name, save_name, split, slide_id = QUEUE_ITEM(*item) - _save_prediction(prediction_buffer, save_name, output_dir) - _update_manifest(target_buffer, input_name, save_name, split, slide_id, manifest_writer) + item = QUEUE_ITEM(*item) + + if item.save_name in name_to_items: + name_to_items[item.save_name].items.append(item) + else: + name_to_items[item.save_name] = ITEM_DICT_ENTRY(items=[item], save_count=0) + + if counter > 0 and counter % save_every_n == 0: + name_to_items = _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) + + counter += 1 + + if len(name_to_items) > 0: + _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) manifest_file.close() -def _save_prediction(prediction_buffer: io.BytesIO, save_name: str, output_dir: str) -> None: - save_path = os.path.join(output_dir, save_name) - prediction = torch.load(io.BytesIO(prediction_buffer.getbuffer()), map_location="cpu") +def _save_items( + name_to_items: Dict[str, ITEM_DICT_ENTRY], + metadata_keys: List[str], + output_dir: str, + manifest_writer: Any, +) -> Dict[str, ITEM_DICT_ENTRY]: + """Saves predictions to disk and updates the manifest file. + + If multiple items share the same filename, the predictions are concatenated and saved + to the same file. Furthermore, the manifest file will only contain one entry for each + filename, which is why this function checks if it's the first time saving to a file. + + Args: + name_to_items: A dictionary mapping save names to the corresponding queue items + holding the prediction tensors and the information for the manifest file. + metadata_keys: A list of keys to extract from the batch metadata. These will be + stored as additional columns in the manifest file. + output_dir: The directory where the embedding tensors & manifest will be saved. + manifest_writer: The CSV writer for the writing to the manifest file. + """ + for save_name, entry in name_to_items.items(): + if len(entry.items) > 0: + save_path = os.path.join(output_dir, save_name) + is_first_save = entry.save_count == 0 + if is_first_save: + _, target, input_name, _, split, metadata = QUEUE_ITEM(*entry.items[0]) + metadata = [metadata[key] for key in metadata_keys] # type: ignore + _update_manifest(target, input_name, save_name, split, metadata, manifest_writer) + prediction_buffers = [item.prediction_buffer for item in entry.items] + _save_predictions(prediction_buffers, save_path, is_first_save) + name_to_items[save_name].save_count += 1 + name_to_items[save_name].items = [] + + return name_to_items + + +def _save_predictions( + prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool +) -> None: + """Saves the embedding tensors to .pt files. + + If it's not the first save to this save_path, the new predictions are concatenated + with the existing ones and saved to the same file. + + Example Usecase: Save all patch embeddings corresponding to the same WSI to a single file. + """ + predictions = [ + torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") + for buffer in prediction_buffers + ] + predictions = torch.stack(predictions, dim=0) + + if not is_first_save: + predictions = torch.cat([torch.load(save_path), predictions], dim=0) + os.makedirs(os.path.dirname(save_path), exist_ok=True) - torch.save(prediction, save_path) + torch.save(predictions, save_path) -def _init_manifest(output_dir: str, overwrite: bool = False) -> tuple[io.TextIOWrapper, Any]: +def _init_manifest( + output_dir: str, metadata_keys: List[str] | None, overwrite: bool = False +) -> tuple[io.TextIOWrapper, Any]: manifest_path = os.path.join(output_dir, "manifest.csv") if os.path.exists(manifest_path) and not overwrite: raise FileExistsError( @@ -163,7 +251,7 @@ def _init_manifest(output_dir: str, overwrite: bool = False) -> tuple[io.TextIOW ) manifest_file = open(manifest_path, "w", newline="") manifest_writer = csv.writer(manifest_file) - manifest_writer.writerow(["origin", "embeddings", "target", "split", "slide_id"]) + manifest_writer.writerow(["origin", "embeddings", "target", "split"] + (metadata_keys or [])) return manifest_file, manifest_writer @@ -172,8 +260,8 @@ def _update_manifest( input_name: str, save_name: str, split: str | None, - slide_id: str | None, + metadata: List[str], manifest_writer, ) -> None: target = torch.load(io.BytesIO(target_buffer.getbuffer()), map_location="cpu") - manifest_writer.writerow([input_name, save_name, target.item(), split, slide_id]) + manifest_writer.writerow([input_name, save_name, target.item(), split] + metadata) diff --git a/src/eva/core/callbacks/writers/typings.py b/src/eva/core/callbacks/writers/typings.py index c3c3ea68..9e061de8 100644 --- a/src/eva/core/callbacks/writers/typings.py +++ b/src/eva/core/callbacks/writers/typings.py @@ -1,26 +1,38 @@ """Typing definitions for the writer callback functions.""" +import dataclasses import io -from typing import NamedTuple +from typing import Any, Dict, List, NamedTuple class QUEUE_ITEM(NamedTuple): """The default input batch data scheme.""" prediction_buffer: io.BytesIO - """IO buffer containing the prediction tensor""" + """IO buffer containing the prediction tensor.""" target_buffer: io.BytesIO - """IO buffer containing the target tensor""" + """IO buffer containing the target tensor.""" input_name: str """Name of the original input file that was used to generate the embedding.""" save_name: str - """Name to store the generated embedding""" + """Name to store the generated embedding.""" split: str | None """The dataset split the item belongs to (e.g. train, val, test).""" - slide_id: str | None = None - """Unique slide identifier.""" + metadata: Dict[str, Any] | None = None + """Dictionary holding additional metadata.""" + + +@dataclasses.dataclass +class ITEM_DICT_ENTRY: + """Typing for holding queue items and number of save operations.""" + + items: List[QUEUE_ITEM] + """List of queue items.""" + + save_count: int + """Number of prior item batch saves to same file.""" diff --git a/tests/eva/core/callbacks/writers/test_embeddings.py b/tests/eva/core/callbacks/writers/test_embeddings.py index c32d2f85..d9886d4e 100644 --- a/tests/eva/core/callbacks/writers/test_embeddings.py +++ b/tests/eva/core/callbacks/writers/test_embeddings.py @@ -1,14 +1,17 @@ """Tests the embeddings writer.""" +import functools import os import random import tempfile from pathlib import Path -from typing import List, Literal +from typing import List, Literal, Set import lightning.pytorch as pl import pandas as pd import pytest +import torch +from lightning.pytorch import callbacks from lightning.pytorch.demos import boring_classes from torch import nn from typing_extensions import override @@ -21,57 +24,126 @@ @pytest.mark.parametrize( - "batch_size, n_samples", + "batch_size, n_samples, metadata_keys, filenames", [ - (5, 7), - (8, 16), + (5, 7, None, None), + (5, 7, ["slide_id"], None), + (8, 16, None, None), + (8, 32, ["slide_id"], ["slide_1", "slide_2"]), ], ) def test_embeddings_writer(datamodule: datamodules.DataModule, model: modules.HeadModule) -> None: - """Tests the embeddings writer callback.""" + """Tests the embeddings writer callback. + + This test executes a lightning trainer predict operation and checks if the expected + embedding tensors & manifest files are correctly written to disk. + """ with tempfile.TemporaryDirectory() as output_dir: - trainer = pl.Trainer( - logger=False, - callbacks=writers.EmbeddingsWriter( - output_dir=output_dir, - dataloader_idx_map={0: "train", 1: "val", 2: "test"}, - backbone=nn.Flatten(), - ), - ) - all_predictions = trainer.predict( - model=model, datamodule=datamodule, return_predictions=True + metadata_keys = datamodule.datasets.predict[0]._metadata_keys # type: ignore + expected_filenames = datamodule.datasets.predict[0]._filenames # type: ignore + grouping_enabled = expected_filenames is not None + callback = writers.EmbeddingsWriter( + output_dir=output_dir, + dataloader_idx_map={0: "train", 1: "val", 2: "test"}, + backbone=nn.Flatten(), + metadata_keys=metadata_keys, ) - files = Path(output_dir).glob("*.pt") - files = [f.relative_to(output_dir).as_posix() for f in files] + trainer = _init_and_run_trainer([callback], model, datamodule) assert isinstance(trainer.predict_dataloaders, list) assert len(trainer.predict_dataloaders) == 3 - assert isinstance(all_predictions, list) - assert len(all_predictions) == 3 - total_n_predictions = 0 + + unique_filenames = set() + tot_n_samples = 0 for dataloader_idx in range(len(trainer.predict_dataloaders)): + _check_embedding_dimensions(output_dir, grouping_enabled) dataset = trainer.predict_dataloaders[dataloader_idx].dataset + filenames = _check_if_embedding_files_exist(output_dir, dataset, expected_filenames) + unique_filenames.update(filenames) + tot_n_samples += len(dataset) + + expected_file_count = len(unique_filenames) if expected_filenames else tot_n_samples + _check_expected_n_files(output_dir, expected_file_count) + _check_manifest(output_dir, len(unique_filenames), metadata_keys) + + +def _init_and_run_trainer( + callbacks: List[callbacks.Callback], + model: pl.LightningModule, + datamodule: datamodules.DataModule, +): + """Initializes and runs the trainer with the given callbacks.""" + trainer = pl.Trainer( + logger=False, + accelerator="cpu", + callbacks=callbacks, + ) + trainer.predict(model=model, datamodule=datamodule, return_predictions=True) + + return trainer + + +def _check_if_embedding_files_exist( + output_dir: str, dataset: datasets.Dataset, expected_filenames: List[str] | None +) -> Set[str]: + """Checks if the expected embedding files exist in the output directory.""" + output_files = _get_output_filenames(output_dir) + + dataset_filenames = set() + for idx in range(len(dataset)): # type: ignore + filename = f"{dataset.filename(idx)}.pt" # type: ignore + assert filename in output_files + dataset_filenames.add(filename) + + if expected_filenames: + assert len(set(expected_filenames) - {Path(x).stem for x in output_files}) == 0 + + return dataset_filenames - # Check if the number of predictions is correct - predictions = all_predictions[dataloader_idx] - assert isinstance(predictions, list) - n_predictions = sum(len(p) for p in predictions) - assert len(dataset) == n_predictions - # Check if the expected files are present - for idx in range(len(dataset)): - filename = dataset.filename(idx) - assert f"{filename}.pt" in files +def _check_embedding_dimensions(output_dir: str, grouping_enabled: bool): + """Checks if the produced embeddings have the expected dimensions.""" + embedding_paths = Path(output_dir).glob("*.pt") - total_n_predictions += n_predictions + for path in embedding_paths: + tensor = torch.load(path) + assert tensor.ndim == 2 - # Check if the manifest file is in the expected format - df_manifest = pd.read_csv(os.path.join(output_dir, "manifest.csv")) - assert "origin" in df_manifest.columns - assert "embeddings" in df_manifest.columns - assert "target" in df_manifest.columns - assert "split" in df_manifest.columns - assert len(df_manifest) == total_n_predictions + if grouping_enabled: + assert tensor.shape[0] > 1 + else: + assert tensor.shape[0] == 1 + + +def _check_expected_n_files(output_dir: str, expected_file_count: int): + """Checks if the number of produced output files matches the expected count.""" + output_files = _get_output_filenames(output_dir) + assert len(output_files) == expected_file_count + + +def _check_manifest( + output_dir: str, expected_n_entries: int, metadata_keys: List[str] | None = None +): + """Checks if the manifest file contains the expected number of entries and columns.""" + manifest_path = os.path.join(output_dir, "manifest.csv") + assert os.path.isfile(manifest_path) + df_manifest = pd.read_csv(manifest_path) + + expected_columns = ["origin", "embeddings", "target", "split"] + (metadata_keys or []) + for column in expected_columns: + assert column in df_manifest.columns + + assert len(df_manifest) == expected_n_entries + + if metadata_keys: + assert all(key in df_manifest.columns for key in metadata_keys) + + +def _get_output_filenames(output_dir: str) -> List[str]: + """Returns the list of output embedding filenames in the output directory.""" + output_files = Path(output_dir).glob("*.pt") + output_files = [f.relative_to(output_dir).as_posix() for f in output_files] + return output_files @pytest.fixture(scope="function") @@ -87,11 +159,20 @@ def model(n_classes: int = 4) -> modules.HeadModule: @pytest.fixture(scope="function") def dataset( n_samples: int, + metadata_keys: List[str] | None, + filenames: List[str] | None, ) -> List[datasets.Dataset]: """Fake dataset fixture.""" - train_dataset = FakeDataset(split="train", length=n_samples, size=SAMPLE_SHAPE) - val_dataset = FakeDataset(split="val", length=n_samples, size=SAMPLE_SHAPE) - test_dataset = FakeDataset(split="test", length=n_samples, size=SAMPLE_SHAPE) + Dataset = functools.partial( + FakeDataset, + length=n_samples, + size=SAMPLE_SHAPE, + metadata_keys=metadata_keys, + filenames=filenames, + ) + train_dataset = Dataset(split="train") + val_dataset = Dataset(split="val") + test_dataset = Dataset(split="test") return [train_dataset, val_dataset, test_dataset] @@ -99,17 +180,35 @@ def dataset( class FakeDataset(boring_classes.RandomDataset, datasets.Dataset): """Fake prediction dataset.""" - def __init__(self, split: Literal["train", "val", "test"], size: int = 32, length: int = 10): + def __init__( + self, + split: Literal["train", "val", "test"], + size: int = 32, + length: int = 10, + metadata_keys: List[str] | None = None, + filenames: List[str] | None = None, + ): """Initializes the dataset.""" super().__init__(size=size, length=length) self._split = split + self._metadata_keys = metadata_keys + self._filenames = filenames def filename(self, index: int) -> str: """Returns the filename for the given index.""" - return f"{self._split}-{index}" + if self._filenames: + # This simulates the case where where multiple items can correspond to the same file. + # e.g. in WSI classification, multiple patches can belong to the same slide. + return random.choice(self._filenames) + else: + return f"{self._split}-{index}" @override def __getitem__(self, index: int): data = boring_classes.RandomDataset.__getitem__(self, index) target = random.choice([0, 1]) - return data, target + if self._metadata_keys: + metadata = {key: random.choice([0, 1, 2]) for key in self._metadata_keys} + return data, target, metadata + else: + return data, target From 03196610b920c4627604abab64fb2e9dc489598f Mon Sep 17 00:00:00 2001 From: roman807 Date: Tue, 30 Apr 2024 11:03:23 +0200 Subject: [PATCH 07/29] Simple baseline sampler for foreground patches (#394) * add foreground kaggle * added random foreground sampler kaggle * refactored sampler * moved get_mask * typo * refactor sampler * refactor sampler * addressed comments * addressed comments --- configs/vision/dino_vit/offline/panda.yaml | 10 +- .../classification/multi_embeddings.py | 2 + src/eva/vision/data/datasets/wsi.py | 2 +- src/eva/vision/data/wsi/backends/base.py | 2 +- src/eva/vision/data/wsi/backends/openslide.py | 2 +- .../vision/data/wsi/patching/coordinates.py | 13 +- src/eva/vision/data/wsi/patching/samplers.py | 143 ++++++++++++++++-- src/eva/vision/utils/mask.py | 39 +++++ 8 files changed, 194 insertions(+), 19 deletions(-) create mode 100644 src/eva/vision/utils/mask.py diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 69dc38c8..5954df88 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -81,6 +81,12 @@ data: root: *DATASET_EMBEDDINGS_ROOT manifest_file: manifest.csv split: train + embeddings_transforms: + class_path: eva.core.data.transforms.Pad2DTensor + init_args: + pad_size: 100 + pad_value: 0 + val: class_path: eva.datasets.MultiEmbeddingsClassificationDataset init_args: @@ -97,9 +103,9 @@ data: root: ${oc.env:DATA_ROOT, ./slide_data}/panda manifest_file: manifest_train.csv sampler: - class_path: eva.vision.data.wsi.patching.samplers.RandomSampler + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: - n_samples: 100 + max_samples: 100 width: 224 height: 224 target_mpp: 0.5 diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py index 813eb28a..b103b699 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py @@ -42,6 +42,8 @@ def __init__( the `root` argument. split: The dataset split to use. The `split` column of the manifest file will be splitted based on this value. + n_embeddings: Expected number of embeddings per sample. If less, the embeddings + will be padded with zeros. column_mapping: Defines the map between the variables and the manifest columns. It will overwrite the `default_column_mapping` with the provided values, so that `column_mapping` can contain only the diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index f4b57b52..962c5d48 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -72,7 +72,7 @@ def _coords(self) -> wsi.PatchCoordinates: def __getitem__(self, index: int) -> torch.Tensor: x, y = self._coords.x_y[index] width, height, level_idx = self._coords.width, self._coords.height, self._coords.level_idx - patch = self._wsi.read_region((x, y), (width, height), level_idx) + patch = self._wsi.read_region((x, y), level_idx, (width, height)) patch = self._apply_transforms(torch.from_numpy(patch).permute(2, 0, 1)) return patch diff --git a/src/eva/vision/data/wsi/backends/base.py b/src/eva/vision/data/wsi/backends/base.py index 2bc5ce7b..66c6f323 100644 --- a/src/eva/vision/data/wsi/backends/base.py +++ b/src/eva/vision/data/wsi/backends/base.py @@ -42,7 +42,7 @@ def mpp(self) -> float: @abc.abstractmethod def read_region( - self, location: Tuple[int, int], size: Tuple[int, int], level: int + self, location: Tuple[int, int], level: int, size: Tuple[int, int] ) -> np.ndarray: """Reads and returns image data for a specified region and zoom level. diff --git a/src/eva/vision/data/wsi/backends/openslide.py b/src/eva/vision/data/wsi/backends/openslide.py index 6a7fbe0d..dbfb4eea 100644 --- a/src/eva/vision/data/wsi/backends/openslide.py +++ b/src/eva/vision/data/wsi/backends/openslide.py @@ -38,7 +38,7 @@ def mpp(self) -> float: @override def read_region( - self, location: Tuple[int, int], size: Tuple[int, int], level: int + self, location: Tuple[int, int], level: int, size: Tuple[int, int] ) -> np.ndarray: x_max, y_max = self._wsi.level_dimensions[level] if location[0] + size[0] > x_max or location[1] + size[1] > y_max: diff --git a/src/eva/vision/data/wsi/patching/coordinates.py b/src/eva/vision/data/wsi/patching/coordinates.py index caeb50c6..85450cb8 100644 --- a/src/eva/vision/data/wsi/patching/coordinates.py +++ b/src/eva/vision/data/wsi/patching/coordinates.py @@ -6,6 +6,7 @@ from eva.vision.data.wsi import backends from eva.vision.data.wsi.patching import samplers +from eva.vision.utils.mask import get_mask LRU_CACHE_SIZE = 32 @@ -49,14 +50,20 @@ def from_file( backend: The backend to use for reading the whole-slide images. """ wsi = backends.wsi_backend(backend)(wsi_path) - x_y = [] level_idx = wsi.get_closest_level(target_mpp) level_mpp = wsi.mpp * wsi.level_downsamples[level_idx] mpp_ratio = target_mpp / level_mpp scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) - for x, y in sampler.sample(scaled_width, scaled_height, wsi.level_dimensions[level_idx]): - x_y.append((x, y)) + sample_args = { + "width": scaled_width, + "height": scaled_height, + "layer_shape": wsi.level_dimensions[level_idx], + } + if isinstance(sampler, samplers.ForegroundSampler): + sample_args["mask"] = get_mask(wsi, level_idx) + + x_y = [(x, y) for x, y in sampler.sample(**sample_args)] return cls(x_y, scaled_width, scaled_height, level_idx) diff --git a/src/eva/vision/data/wsi/patching/samplers.py b/src/eva/vision/data/wsi/patching/samplers.py index 66a8e876..f96b4a8e 100644 --- a/src/eva/vision/data/wsi/patching/samplers.py +++ b/src/eva/vision/data/wsi/patching/samplers.py @@ -16,10 +16,38 @@ def sample( width: int, height: int, layer_shape: tuple[int, int], + *args, ) -> Generator[Tuple[int, int], None, None]: """Iterator that samples patches.""" +class ForegroundSampler(Sampler): + """Base class for samplers with foreground filtering capabilities.""" + + @abc.abstractmethod + def sample( + self, + width: int, + height: int, + layer_shape: tuple[int, int], + mask: tuple[np.ndarray, float], + *args, + ) -> Generator[Tuple[int, int], None, None]: + """Iterator that samples patches.""" + + @abc.abstractmethod + def is_foreground( + self, + mask: tuple[np.ndarray, float], + x: int, + y: int, + width: int, + height: int, + min_foreground_ratio=0.35, + ) -> bool: + """Check if a patch contains sufficient foreground.""" + + class RandomSampler(Sampler): """Sample patch coordinates randomly. @@ -87,19 +115,112 @@ def sample( height: The height of the patches. layer_shape: The shape of the layer. """ - _set_seed(self.seed) + x_y, indices = _get_grid_coords_and_indices(layer_shape, width, height, self.overlap) + max_samples = len(indices) if self.max_samples is None else self.max_samples + for i in indices[:max_samples]: + yield x_y[i] - x_range = range(0, layer_shape[0] - width, width - self.overlap[0]) - y_range = range(0, layer_shape[1] - height, height - self.overlap[1]) - x_y = [(x, y) for x in x_range for y in y_range] - shuffled_indices = ( - np.random.choice(len(x_y), self.max_samples, replace=False) - if self.max_samples - else range(len(x_y)) - ) - for i in shuffled_indices: - yield x_y[i] +class ForegroundGridSampler(ForegroundSampler): + """Sample patches based on a grid, only returning patches containing foreground. + + Args: + max_samples: The maximum number of samples to return. + """ + + def __init__( + self, + max_samples: int = 20, + overlap: tuple[int, int] = (0, 0), + seed: int = 42, + ): + """Initializes the sampler.""" + self.max_samples = max_samples + self.overlap = overlap + self.seed = seed + + def sample( + self, + width: int, + height: int, + layer_shape: tuple[int, int], + mask: tuple[np.ndarray, float], + ): + """Sample patches from a grid containing foreground. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + mask: The mask of the image. + mask_scale_factor: The scale factor of the mask. + """ + x_y, indices = _get_grid_coords_and_indices(layer_shape, width, height, self.overlap) + + count = 0 + for i in indices: + if count >= self.max_samples: + break + if self.is_foreground(mask, x_y[i][0], x_y[i][1], width, height): + count += 1 + yield x_y[i] + + def is_foreground( + self, + mask: tuple[np.ndarray, float], + x: int, + y: int, + width: int, + height: int, + min_foreground_ratio=0.35, + ) -> bool: + """Check if a patch contains sufficient foreground. + + Args: + mask: The mask of the image. + x: The x-coordinate of the patch. + y: The y-coordinate of the patch. + width: The width of the patch. + height: The height of the patch. + mask_scale_factor: The scale factor of the mask. + min_foreground_ratio: The minimum amount of foreground in the patch. + """ + mask_array, mask_scale_factor = mask + x_, y_, width_, height_ = self.scale_coords(mask_scale_factor, x, y, width, height) + patch_mask = mask_array[y_ : y_ + height_, x_ : x_ + width_] + return patch_mask.sum() / patch_mask.size > min_foreground_ratio + + def scale_coords(self, scale_factor, *coords): + return tuple(int(coord * scale_factor) for coord in coords) + + +def _get_grid_coords_and_indices( + layer_shape: tuple[int, int], + width: int, + height: int, + overlap: tuple[int, int], + shuffle: bool = True, + seed: int = 42, +): + """Get grid coordinates and indices. + + Args: + layer_shape: The shape of the layer. + width: The width of the patches. + height: The height of the patches. + overlap: The overlap between patches in the grid. + shuffle: Whether to shuffle the indices. + seed: The random seed. + """ + x_range = range(0, layer_shape[0] - width, width - overlap[0]) + y_range = range(0, layer_shape[1] - height, height - overlap[1]) + x_y = [(x, y) for x in x_range for y in y_range] + + indices = list(range(len(x_y))) + if shuffle: + _set_seed(seed) + np.random.shuffle(indices) + return x_y, indices def _set_seed(seed: int) -> None: diff --git a/src/eva/vision/utils/mask.py b/src/eva/vision/utils/mask.py new file mode 100644 index 00000000..16d15568 --- /dev/null +++ b/src/eva/vision/utils/mask.py @@ -0,0 +1,39 @@ +import cv2 +import numpy as np + +from eva.vision.data.wsi.backends.base import Wsi + + +def get_mask( + wsi: Wsi, + level_idx: int, + kernel_size: tuple[int, int] = (7, 7), + gray_threshold: int = 220, + fill_holes: bool = False, +) -> tuple[np.ndarray, float]: + """Extracts a binary mask from an image. + + Args: + wsi: The WSI object. + level_idx: The level index to extract the mask from. + kernel_size: The size of the kernel for morphological operations. + gray_threshold: The threshold for the gray scale image. + fill_holes: Whether to fill holes in the mask. + """ + image = np.array( + wsi.read_region([0, 0], len(wsi.level_dimensions) - 1, wsi.level_dimensions[-1]) + ) + + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) + gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + mask = np.where(gray < gray_threshold, 1, 0).astype(np.uint8) + + if fill_holes: + mask = cv2.dilate(mask, kernel, iterations=1) + contour, _ = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) + for cnt in contour: + cv2.drawContours(mask, [cnt], 0, 1, -1) + + mask_scale_factor = wsi.level_dimensions[-1][0] / wsi.level_dimensions[level_idx][0] + + return mask, mask_scale_factor From 2d28f6117a490037f9c53078c3e44716928adce2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Tue, 30 Apr 2024 15:34:30 +0200 Subject: [PATCH 08/29] Fixed linting in WSI feature branch (#407) --- src/eva/core/callbacks/writers/embeddings.py | 4 +- src/eva/core/models/networks/mlp.py | 4 +- .../vision/data/wsi/patching/coordinates.py | 4 +- src/eva/vision/data/wsi/patching/samplers.py | 69 ++++++++++--------- src/eva/vision/utils/mask.py | 16 +++-- .../eva/vision/models/networks/test_abmil.py | 3 +- 6 files changed, 56 insertions(+), 44 deletions(-) diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index a3c544da..b1cc792a 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -3,7 +3,7 @@ import csv import io import os -from typing import Any, Dict, List, Sequence +from typing import Any, Dict, List, Sequence, Tuple import lightning.pytorch as pl import torch @@ -241,7 +241,7 @@ def _save_predictions( def _init_manifest( output_dir: str, metadata_keys: List[str] | None, overwrite: bool = False -) -> tuple[io.TextIOWrapper, Any]: +) -> Tuple[io.TextIOWrapper, Any]: manifest_path = os.path.join(output_dir, "manifest.csv") if os.path.exists(manifest_path) and not overwrite: raise FileExistsError( diff --git a/src/eva/core/models/networks/mlp.py b/src/eva/core/models/networks/mlp.py index 4decad2a..c8403dbe 100644 --- a/src/eva/core/models/networks/mlp.py +++ b/src/eva/core/models/networks/mlp.py @@ -1,6 +1,6 @@ """Multi-layer Perceptron (MLP) implemented in PyTorch.""" -from typing import Type +from typing import Tuple, Type import torch import torch.nn as nn @@ -13,7 +13,7 @@ def __init__( self, input_size: int, output_size: int, - hidden_layer_sizes: tuple[int, ...] | None = None, + hidden_layer_sizes: Tuple[int, ...] | None = None, hidden_activation_fn: Type[torch.nn.Module] | None = nn.ReLU, output_activation_fn: Type[torch.nn.Module] | None = None, dropout: float = 0.0, diff --git a/src/eva/vision/data/wsi/patching/coordinates.py b/src/eva/vision/data/wsi/patching/coordinates.py index 85450cb8..f38344b9 100644 --- a/src/eva/vision/data/wsi/patching/coordinates.py +++ b/src/eva/vision/data/wsi/patching/coordinates.py @@ -63,7 +63,9 @@ def from_file( if isinstance(sampler, samplers.ForegroundSampler): sample_args["mask"] = get_mask(wsi, level_idx) - x_y = [(x, y) for x, y in sampler.sample(**sample_args)] + x_y = [] + for x, y in sampler.sample(**sample_args): + x_y.append((x, y)) return cls(x_y, scaled_width, scaled_height, level_idx) diff --git a/src/eva/vision/data/wsi/patching/samplers.py b/src/eva/vision/data/wsi/patching/samplers.py index f96b4a8e..ed5f5966 100644 --- a/src/eva/vision/data/wsi/patching/samplers.py +++ b/src/eva/vision/data/wsi/patching/samplers.py @@ -15,35 +15,36 @@ def sample( self, width: int, height: int, - layer_shape: tuple[int, int], - *args, + layer_shape: Tuple[int, int], + mask: Tuple[np.ndarray, float] | None = None, ) -> Generator[Tuple[int, int], None, None]: - """Iterator that samples patches.""" + """Sample patche coordinates. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + mask: Tuple containing the mask array and the scaling factor with respect to the + provided layer_shape. Optional, only required for samplers with foreground + filtering. + + Returns: + A generator producing sampled patch coordinates. + """ class ForegroundSampler(Sampler): """Base class for samplers with foreground filtering capabilities.""" - @abc.abstractmethod - def sample( - self, - width: int, - height: int, - layer_shape: tuple[int, int], - mask: tuple[np.ndarray, float], - *args, - ) -> Generator[Tuple[int, int], None, None]: - """Iterator that samples patches.""" - @abc.abstractmethod def is_foreground( self, - mask: tuple[np.ndarray, float], + mask: Tuple[np.ndarray, float], x: int, y: int, width: int, height: int, - min_foreground_ratio=0.35, + min_foreground_ratio: float, ) -> bool: """Check if a patch contains sufficient foreground.""" @@ -65,7 +66,7 @@ def sample( self, width: int, height: int, - layer_shape: tuple[int, int], + layer_shape: Tuple[int, int], ) -> Generator[Tuple[int, int], None, None]: """Sample random patches. @@ -94,7 +95,7 @@ class GridSampler(Sampler): def __init__( self, max_samples: int | None = None, - overlap: tuple[int, int] = (0, 0), + overlap: Tuple[int, int] = (0, 0), seed: int = 42, ): """Initializes the sampler.""" @@ -106,7 +107,7 @@ def sample( self, width: int, height: int, - layer_shape: tuple[int, int], + layer_shape: Tuple[int, int], ) -> Generator[Tuple[int, int], None, None]: """Sample patches from a grid. @@ -126,25 +127,30 @@ class ForegroundGridSampler(ForegroundSampler): Args: max_samples: The maximum number of samples to return. + overlap: The overlap between patches in the grid. + min_foreground_ratio: The minimum amount of foreground within a sampled patch. + seed: The random seed. """ def __init__( self, max_samples: int = 20, - overlap: tuple[int, int] = (0, 0), + overlap: Tuple[int, int] = (0, 0), + min_foreground_ratio: float = 0.35, seed: int = 42, ): """Initializes the sampler.""" self.max_samples = max_samples self.overlap = overlap + self.min_foreground_ratio = min_foreground_ratio self.seed = seed def sample( self, width: int, height: int, - layer_shape: tuple[int, int], - mask: tuple[np.ndarray, float], + layer_shape: Tuple[int, int], + mask: Tuple[np.ndarray, float], ): """Sample patches from a grid containing foreground. @@ -153,7 +159,6 @@ def sample( height: The height of the patches. layer_shape: The shape of the layer. mask: The mask of the image. - mask_scale_factor: The scale factor of the mask. """ x_y, indices = _get_grid_coords_and_indices(layer_shape, width, height, self.overlap) @@ -161,18 +166,20 @@ def sample( for i in indices: if count >= self.max_samples: break - if self.is_foreground(mask, x_y[i][0], x_y[i][1], width, height): + if self.is_foreground( + mask, x_y[i][0], x_y[i][1], width, height, self.min_foreground_ratio + ): count += 1 yield x_y[i] def is_foreground( self, - mask: tuple[np.ndarray, float], + mask: Tuple[np.ndarray, float], x: int, y: int, width: int, height: int, - min_foreground_ratio=0.35, + min_foreground_ratio: float, ) -> bool: """Check if a patch contains sufficient foreground. @@ -182,23 +189,23 @@ def is_foreground( y: The y-coordinate of the patch. width: The width of the patch. height: The height of the patch. - mask_scale_factor: The scale factor of the mask. min_foreground_ratio: The minimum amount of foreground in the patch. """ mask_array, mask_scale_factor = mask - x_, y_, width_, height_ = self.scale_coords(mask_scale_factor, x, y, width, height) + x_, y_, width_, height_ = self._scale_coords(mask_scale_factor, x, y, width, height) patch_mask = mask_array[y_ : y_ + height_, x_ : x_ + width_] + # TODO: look into warning "RuntimeWarning: invalid value encountered in divide" return patch_mask.sum() / patch_mask.size > min_foreground_ratio - def scale_coords(self, scale_factor, *coords): + def _scale_coords(self, scale_factor, *coords): return tuple(int(coord * scale_factor) for coord in coords) def _get_grid_coords_and_indices( - layer_shape: tuple[int, int], + layer_shape: Tuple[int, int], width: int, height: int, - overlap: tuple[int, int], + overlap: Tuple[int, int], shuffle: bool = True, seed: int = 42, ): diff --git a/src/eva/vision/utils/mask.py b/src/eva/vision/utils/mask.py index 16d15568..4f1cd023 100644 --- a/src/eva/vision/utils/mask.py +++ b/src/eva/vision/utils/mask.py @@ -1,3 +1,7 @@ +"""Functions for extracting foreground masks.""" + +from typing import Tuple + import cv2 import numpy as np @@ -7,10 +11,10 @@ def get_mask( wsi: Wsi, level_idx: int, - kernel_size: tuple[int, int] = (7, 7), + kernel_size: Tuple[int, int] = (7, 7), gray_threshold: int = 220, fill_holes: bool = False, -) -> tuple[np.ndarray, float]: +) -> Tuple[np.ndarray, float]: """Extracts a binary mask from an image. Args: @@ -20,19 +24,17 @@ def get_mask( gray_threshold: The threshold for the gray scale image. fill_holes: Whether to fill holes in the mask. """ - image = np.array( - wsi.read_region([0, 0], len(wsi.level_dimensions) - 1, wsi.level_dimensions[-1]) - ) + image = wsi.read_region((0, 0), len(wsi.level_dimensions) - 1, wsi.level_dimensions[-1]) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) - gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + gray = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), dtype=np.uint8) mask = np.where(gray < gray_threshold, 1, 0).astype(np.uint8) if fill_holes: mask = cv2.dilate(mask, kernel, iterations=1) contour, _ = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) for cnt in contour: - cv2.drawContours(mask, [cnt], 0, 1, -1) + cv2.drawContours(mask, [cnt], 0, (1,), -1) mask_scale_factor = wsi.level_dimensions[-1][0] / wsi.level_dimensions[level_idx][0] diff --git a/tests/eva/vision/models/networks/test_abmil.py b/tests/eva/vision/models/networks/test_abmil.py index 7ca80a02..b89fb01d 100644 --- a/tests/eva/vision/models/networks/test_abmil.py +++ b/tests/eva/vision/models/networks/test_abmil.py @@ -1,6 +1,7 @@ """ABMIL network tests.""" import itertools +from typing import Tuple import pytest import torch @@ -15,7 +16,7 @@ def test_masked_abmil( input_size: int, output_size: int, - hidden_sizes_mlp: tuple[int], + hidden_sizes_mlp: Tuple[int], batch_size: int, n_instances: int, masked_fraction: float, From d504b2c1986c4b4ab41c1fa142cd96fb5003a30f Mon Sep 17 00:00:00 2001 From: Nicolas Kaenzig Date: Mon, 6 May 2024 16:37:42 +0200 Subject: [PATCH 09/29] added openslide-python to all dependencies --- pdm.lock | 10 +++++----- pyproject.toml | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pdm.lock b/pdm.lock index 5e5a89f4..5b0fb979 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev", "docs", "all", "typecheck", "lint", "vision", "test"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:01893abe4eedac47e0dc55b1097b0dd80675e09772227fd20bb46e4287760674" +content_hash = "sha256:f1e852d1f3aa89e7061fc859e3446b3abfc3bbd019323fa885dc81a6b5cd2659" [[package]] name = "absl-py" @@ -760,7 +760,7 @@ files = [ [[package]] name = "lightning" -version = "2.2.1" +version = "2.2.4" requires_python = ">=3.8" summary = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." groups = ["default"] @@ -777,8 +777,8 @@ dependencies = [ "typing-extensions<6.0,>=4.4.0", ] files = [ - {file = "lightning-2.2.1-py3-none-any.whl", hash = "sha256:fec9b49d29a6019e8fe49e825082bab8d5ea3fde8e4b36dcf5c8896c2bdb86c3"}, - {file = "lightning-2.2.1.tar.gz", hash = "sha256:b3e46d596b32cafd1fb9b21fdba1b1767df97b1af5cc702693d1c51df60b19aa"}, + {file = "lightning-2.2.4-py3-none-any.whl", hash = "sha256:b44cb8692253f2719b2f84237e94ff84451fe219922c7f04447b52524471379e"}, + {file = "lightning-2.2.4.tar.gz", hash = "sha256:4cc3fb3edf04fcd63c0ecf75087d2fa06163759fc8c1fc500b16404ac1854f77"}, ] [[package]] @@ -1520,7 +1520,7 @@ name = "openslide-python" version = "1.3.1" requires_python = ">=3.8" summary = "Python interface to OpenSlide" -groups = ["vision"] +groups = ["all", "vision"] dependencies = [ "Pillow", ] diff --git a/pyproject.toml b/pyproject.toml index 6e722e83..d60cf684 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,7 @@ all = [ "opencv-python-headless>=4.9.0.80", "timm>=0.9.12", "torchvision>=0.17.0", + "openslide-python>=1.3.1", ] [project.scripts] From ecaa5eeacdf7dc91347c800ea8de8f6e931e71c5 Mon Sep 17 00:00:00 2001 From: roman807 Date: Mon, 6 May 2024 16:50:37 +0200 Subject: [PATCH 10/29] Fix input batch class name (#414) --- src/eva/core/models/modules/head.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/eva/core/models/modules/head.py b/src/eva/core/models/modules/head.py index 903a8100..8e09bc55 100644 --- a/src/eva/core/models/modules/head.py +++ b/src/eva/core/models/modules/head.py @@ -11,7 +11,7 @@ from eva.core.metrics import structs as metrics_lib from eva.core.models.modules import module -from eva.core.models.modules.typings import INPUT_BATCH, MODEL_TYPE +from eva.core.models.modules.typings import DATA_SAMPLE, MODEL_TYPE from eva.core.models.modules.utils import batch_postprocess, grad @@ -72,23 +72,23 @@ def forward(self, tensor: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tens return self.head(features).squeeze(-1) @override - def training_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def training_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def validation_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def validation_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def test_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def test_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def predict_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> torch.Tensor: - tensor = INPUT_BATCH(*batch).data + def predict_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> torch.Tensor: + tensor = DATA_SAMPLE(*batch).data return tensor if self.backbone is None else self.backbone(tensor) - def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: + def _batch_step(self, batch: DATA_SAMPLE) -> STEP_OUTPUT: """Performs a model forward step and calculates the loss. Args: @@ -97,7 +97,7 @@ def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: Returns: The batch step output. """ - data, targets, metadata = INPUT_BATCH(*batch) + data, targets, metadata = DATA_SAMPLE(*batch) predictions = self(data) loss = self.criterion(predictions, targets) return { @@ -105,4 +105,4 @@ def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: "targets": targets, "predictions": predictions, "metadata": metadata, - } \ No newline at end of file + } From 15874f542858da2ce7b2e511f3c7f71dcaa1328d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Tue, 7 May 2024 08:44:39 +0200 Subject: [PATCH 11/29] Add lower bound for wsi resolution level during mask generation (#412) --- src/eva/vision/data/wsi/backends/base.py | 9 +- src/eva/vision/data/wsi/backends/openslide.py | 27 +++-- .../vision/data/wsi/patching/coordinates.py | 38 +++---- src/eva/vision/data/wsi/patching/mask.py | 98 +++++++++++++++++++ src/eva/vision/data/wsi/patching/samplers.py | 21 ++-- src/eva/vision/utils/mask.py | 41 -------- 6 files changed, 155 insertions(+), 79 deletions(-) create mode 100644 src/eva/vision/data/wsi/patching/mask.py delete mode 100644 src/eva/vision/utils/mask.py diff --git a/src/eva/vision/data/wsi/backends/base.py b/src/eva/vision/data/wsi/backends/base.py index 66c6f323..830a213b 100644 --- a/src/eva/vision/data/wsi/backends/base.py +++ b/src/eva/vision/data/wsi/backends/base.py @@ -38,7 +38,7 @@ def level_downsamples(self) -> Sequence[float]: @property @abc.abstractmethod def mpp(self) -> float: - """Microns per pixel at the highest resolution.""" + """Microns per pixel at the highest resolution (level 0).""" @abc.abstractmethod def read_region( @@ -47,9 +47,10 @@ def read_region( """Reads and returns image data for a specified region and zoom level. Args: - location: Top-left corner (x, y) to start reading. - size: Region size as (width, height), relative to . - level: Zoom level, with 0 being the highest resolution. + location: Top-left corner (x, y) to start reading at level 0. + level: WSI level to read from. + size: Region size as (width, height) in pixels at the selected read level. + Remember to scale the size correctly. """ def get_closest_level(self, target_mpp: float) -> int: diff --git a/src/eva/vision/data/wsi/backends/openslide.py b/src/eva/vision/data/wsi/backends/openslide.py index dbfb4eea..4173b8cf 100644 --- a/src/eva/vision/data/wsi/backends/openslide.py +++ b/src/eva/vision/data/wsi/backends/openslide.py @@ -12,11 +12,11 @@ class WsiOpenslide(base.Wsi): """Class for loading data from WSI files using the OpenSlide library.""" - _wsi: openslide.OpenSlide | openslide.ImageSlide + _wsi: openslide.OpenSlide @override - def open_file(self, file_path: str) -> openslide.OpenSlide | openslide.ImageSlide: - return openslide.open_slide(file_path) + def open_file(self, file_path: str) -> openslide.OpenSlide: + return openslide.OpenSlide(file_path) @property @override @@ -40,8 +40,21 @@ def mpp(self) -> float: def read_region( self, location: Tuple[int, int], level: int, size: Tuple[int, int] ) -> np.ndarray: - x_max, y_max = self._wsi.level_dimensions[level] - if location[0] + size[0] > x_max or location[1] + size[1] > y_max: + x_max, y_max = self.level_dimensions[0] + + x_scale = x_max / self._wsi.level_dimensions[level][0] + y_scale = y_max / self._wsi.level_dimensions[level][1] + + if ( + int(location[0] + x_scale * size[0]) > x_max + or int(location[1] + y_scale * size[1]) > y_max + ): raise ValueError(f"Out of bounds region: {location}, {size}, {level}") - data = self._wsi.read_region(location, level, size) - return np.array(data.convert("RGB")) + + data = np.array(self._wsi.read_region(location, level, size)) + + if data.shape[2] == 4: + # Change color to white where the alpha channel is 0 + data[data[:, :, 3] == 0] = 255 + + return data[:, :, :3] diff --git a/src/eva/vision/data/wsi/patching/coordinates.py b/src/eva/vision/data/wsi/patching/coordinates.py index f38344b9..0600db98 100644 --- a/src/eva/vision/data/wsi/patching/coordinates.py +++ b/src/eva/vision/data/wsi/patching/coordinates.py @@ -6,7 +6,7 @@ from eva.vision.data.wsi import backends from eva.vision.data.wsi.patching import samplers -from eva.vision.utils.mask import get_mask +from eva.vision.data.wsi.patching.mask import Mask, get_mask, get_mask_level LRU_CACHE_SIZE = 32 @@ -16,16 +16,18 @@ class PatchCoordinates: """A class to store coordinates of patches from a whole-slide image. Args: - x_y: A list of (x, y) coordinates of the patches. - width: The width of the patches, in pixels (refers to x-dim). - height: The height of the patches, in pixels (refers to y-dim). - level_idx: The level index of the patches. + x_y: A list of (x, y) coordinates of the patches (refer to level 0). + width: The width of the patches, in pixels (refers to level_idx). + height: The height of the patches, in pixels (refers to level_idx). + level_idx: The level index at which to extract the patches. + mask: The foreground mask of the wsi. """ x_y: List[Tuple[int, int]] width: int height: int level_idx: int + mask: Mask | None = None @classmethod def from_file( @@ -50,24 +52,26 @@ def from_file( backend: The backend to use for reading the whole-slide images. """ wsi = backends.wsi_backend(backend)(wsi_path) - level_idx = wsi.get_closest_level(target_mpp) - level_mpp = wsi.mpp * wsi.level_downsamples[level_idx] - mpp_ratio = target_mpp / level_mpp - scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) + # Sample patch coordinates at level 0 + mpp_ratio_0 = target_mpp / wsi.mpp sample_args = { - "width": scaled_width, - "height": scaled_height, - "layer_shape": wsi.level_dimensions[level_idx], + "width": int(mpp_ratio_0 * width), + "height": int(mpp_ratio_0 * height), + "layer_shape": wsi.level_dimensions[0], } if isinstance(sampler, samplers.ForegroundSampler): - sample_args["mask"] = get_mask(wsi, level_idx) + mask_level_idx = get_mask_level(wsi, width, height, target_mpp) + sample_args["mask"] = get_mask(wsi, mask_level_idx) + + x_y = list(sampler.sample(**sample_args)) - x_y = [] - for x, y in sampler.sample(**sample_args): - x_y.append((x, y)) + # Scale dimensions to level that is closest to the target_mpp + level_idx = wsi.get_closest_level(target_mpp) + mpp_ratio = target_mpp / (wsi.mpp * wsi.level_downsamples[level_idx]) + scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) - return cls(x_y, scaled_width, scaled_height, level_idx) + return cls(x_y, scaled_width, scaled_height, level_idx, sample_args.get("mask")) @functools.lru_cache(LRU_CACHE_SIZE) diff --git a/src/eva/vision/data/wsi/patching/mask.py b/src/eva/vision/data/wsi/patching/mask.py new file mode 100644 index 00000000..3dc1d9bb --- /dev/null +++ b/src/eva/vision/data/wsi/patching/mask.py @@ -0,0 +1,98 @@ +"""Functions for extracting foreground masks.""" + +import dataclasses +from typing import Tuple + +import cv2 +import numpy as np + +from eva.vision.data.wsi.backends.base import Wsi + + +@dataclasses.dataclass +class Mask: + """A class to store the mask of a whole-slide image.""" + + mask_array: np.ndarray + """Binary mask array where 1s represent the foreground and 0s represent the background.""" + + mask_level_idx: int + """WSI level index at which the mask_array was extracted.""" + + scale_factors: Tuple[float, float] + """Factors to scale x/y coordinates from mask_level_idx to level 0.""" + + +def get_mask( + wsi: Wsi, + mask_level_idx: int, + kernel_size: Tuple[int, int] = (7, 7), + gray_threshold: int = 220, + fill_holes: bool = False, +) -> Mask: + """Extracts a binary mask from an image. + + Args: + wsi: The WSI object. + mask_level_idx: The level index of the WSI at which we want to extract the mask. + kernel_size: The size of the kernel for morphological operations. + gray_threshold: The threshold for the gray scale image. + fill_holes: Whether to fill holes in the mask. + """ + image = wsi.read_region((0, 0), mask_level_idx, wsi.level_dimensions[mask_level_idx]) + + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) + gray = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), dtype=np.uint8) + mask_array = np.where(gray < gray_threshold, 1, 0).astype(np.uint8) + + if fill_holes: + mask_array = cv2.dilate(mask_array, kernel, iterations=1) + contour, _ = cv2.findContours(mask_array, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) + for cnt in contour: + cv2.drawContours(mask_array, [cnt], 0, (1,), -1) + + scale_factors = ( + wsi.level_dimensions[0][0] / wsi.level_dimensions[mask_level_idx][0], + wsi.level_dimensions[0][1] / wsi.level_dimensions[mask_level_idx][1], + ) + + return Mask(mask_array=mask_array, mask_level_idx=mask_level_idx, scale_factors=scale_factors) + + +def get_mask_level( + wsi: Wsi, + width: int, + height: int, + target_mpp: float, + min_mask_patch_pixels: int = 3 * 3, +) -> int: + """For performance reasons, we generate the mask at the lowest resolution level possible. + + However, if minimum resolution level has too few pixels, the patches scaled to that level will + be too small or even collapse to a single pixel. This function allows to find the lowest + resolution level that yields mask patches with at least `min_mask_patch_pixels` pixels. + + Args: + wsi: The WSI object. + width: The width of the patches to be extracted, in pixels (at target_mpp). + height: The height of the patches to be extracted, in pixels. + target_mpp: The target microns per pixel (mpp) for the patches. + min_mask_patch_pixels: The minimum number of pixels required for the mask patches. + Mask patch refers to width / height at target_mpp scaled down to the WSI level + at which the mask is generated. + """ + level_mpps = wsi.mpp * np.array(wsi.level_downsamples) + mask_level_idx = None + + for level_idx, level_mpp in reversed(list(enumerate(level_mpps))): + mpp_ratio = target_mpp / level_mpp + scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) + + if scaled_width * scaled_height >= min_mask_patch_pixels: + mask_level_idx = level_idx + break + + if mask_level_idx is None: + raise ValueError("No level with the specified minimum number of patch pixels available.") + + return mask_level_idx diff --git a/src/eva/vision/data/wsi/patching/samplers.py b/src/eva/vision/data/wsi/patching/samplers.py index ed5f5966..58df27fd 100644 --- a/src/eva/vision/data/wsi/patching/samplers.py +++ b/src/eva/vision/data/wsi/patching/samplers.py @@ -6,6 +6,8 @@ import numpy as np +from eva.vision.data.wsi.patching.mask import Mask + class Sampler(abc.ABC): """Base class for samplers.""" @@ -16,7 +18,7 @@ def sample( width: int, height: int, layer_shape: Tuple[int, int], - mask: Tuple[np.ndarray, float] | None = None, + mask: Mask | None = None, ) -> Generator[Tuple[int, int], None, None]: """Sample patche coordinates. @@ -39,7 +41,7 @@ class ForegroundSampler(Sampler): @abc.abstractmethod def is_foreground( self, - mask: Tuple[np.ndarray, float], + mask: Mask, x: int, y: int, width: int, @@ -150,7 +152,7 @@ def sample( width: int, height: int, layer_shape: Tuple[int, int], - mask: Tuple[np.ndarray, float], + mask: Mask, ): """Sample patches from a grid containing foreground. @@ -174,7 +176,7 @@ def sample( def is_foreground( self, - mask: Tuple[np.ndarray, float], + mask: Mask, x: int, y: int, width: int, @@ -191,14 +193,13 @@ def is_foreground( height: The height of the patch. min_foreground_ratio: The minimum amount of foreground in the patch. """ - mask_array, mask_scale_factor = mask - x_, y_, width_, height_ = self._scale_coords(mask_scale_factor, x, y, width, height) - patch_mask = mask_array[y_ : y_ + height_, x_ : x_ + width_] - # TODO: look into warning "RuntimeWarning: invalid value encountered in divide" + x_, y_ = self._scale_coords(x, y, mask.scale_factors) + width_, height_ = self._scale_coords(width, height, mask.scale_factors) + patch_mask = mask.mask_array[y_ : y_ + height_, x_ : x_ + width_] return patch_mask.sum() / patch_mask.size > min_foreground_ratio - def _scale_coords(self, scale_factor, *coords): - return tuple(int(coord * scale_factor) for coord in coords) + def _scale_coords(self, x: int, y: int, scale_factors: Tuple[float, float]) -> Tuple[int, int]: + return int(x / scale_factors[0]), int(y / scale_factors[1]) def _get_grid_coords_and_indices( diff --git a/src/eva/vision/utils/mask.py b/src/eva/vision/utils/mask.py deleted file mode 100644 index 4f1cd023..00000000 --- a/src/eva/vision/utils/mask.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Functions for extracting foreground masks.""" - -from typing import Tuple - -import cv2 -import numpy as np - -from eva.vision.data.wsi.backends.base import Wsi - - -def get_mask( - wsi: Wsi, - level_idx: int, - kernel_size: Tuple[int, int] = (7, 7), - gray_threshold: int = 220, - fill_holes: bool = False, -) -> Tuple[np.ndarray, float]: - """Extracts a binary mask from an image. - - Args: - wsi: The WSI object. - level_idx: The level index to extract the mask from. - kernel_size: The size of the kernel for morphological operations. - gray_threshold: The threshold for the gray scale image. - fill_holes: Whether to fill holes in the mask. - """ - image = wsi.read_region((0, 0), len(wsi.level_dimensions) - 1, wsi.level_dimensions[-1]) - - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) - gray = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), dtype=np.uint8) - mask = np.where(gray < gray_threshold, 1, 0).astype(np.uint8) - - if fill_holes: - mask = cv2.dilate(mask, kernel, iterations=1) - contour, _ = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) - for cnt in contour: - cv2.drawContours(mask, [cnt], 0, (1,), -1) - - mask_scale_factor = wsi.level_dimensions[-1][0] / wsi.level_dimensions[level_idx][0] - - return mask, mask_scale_factor From b6c5f52894a6b9c046fc75f94a78abe542322ae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Tue, 7 May 2024 15:26:48 +0200 Subject: [PATCH 12/29] Move sampler logic to `samplers` module and add unit tests (#420) --- src/eva/vision/data/wsi/patching/samplers.py | 236 ------------------ .../data/wsi/patching/samplers/__init__.py | 8 + .../data/wsi/patching/samplers/_utils.py | 50 ++++ .../vision/data/wsi/patching/samplers/base.py | 48 ++++ .../wsi/patching/samplers/foreground_grid.py | 87 +++++++ .../vision/data/wsi/patching/samplers/grid.py | 47 ++++ .../data/wsi/patching/samplers/random.py | 41 +++ tests/eva/vision/data/wsi/__init__.py | 1 + .../eva/vision/data/wsi/patching/__init__.py | 1 + .../data/wsi/patching/samplers/__init__.py | 1 + .../patching/samplers/test_foreground_grid.py | 93 +++++++ .../data/wsi/patching/samplers/test_grid.py | 69 +++++ .../data/wsi/patching/samplers/test_random.py | 48 ++++ 13 files changed, 494 insertions(+), 236 deletions(-) delete mode 100644 src/eva/vision/data/wsi/patching/samplers.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/__init__.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/_utils.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/base.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/foreground_grid.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/grid.py create mode 100644 src/eva/vision/data/wsi/patching/samplers/random.py create mode 100644 tests/eva/vision/data/wsi/__init__.py create mode 100644 tests/eva/vision/data/wsi/patching/__init__.py create mode 100644 tests/eva/vision/data/wsi/patching/samplers/__init__.py create mode 100644 tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py create mode 100644 tests/eva/vision/data/wsi/patching/samplers/test_grid.py create mode 100644 tests/eva/vision/data/wsi/patching/samplers/test_random.py diff --git a/src/eva/vision/data/wsi/patching/samplers.py b/src/eva/vision/data/wsi/patching/samplers.py deleted file mode 100644 index 58df27fd..00000000 --- a/src/eva/vision/data/wsi/patching/samplers.py +++ /dev/null @@ -1,236 +0,0 @@ -"""Samplers for WSI patch extraction.""" - -import abc -import random -from typing import Generator, Tuple - -import numpy as np - -from eva.vision.data.wsi.patching.mask import Mask - - -class Sampler(abc.ABC): - """Base class for samplers.""" - - @abc.abstractmethod - def sample( - self, - width: int, - height: int, - layer_shape: Tuple[int, int], - mask: Mask | None = None, - ) -> Generator[Tuple[int, int], None, None]: - """Sample patche coordinates. - - Args: - width: The width of the patches. - height: The height of the patches. - layer_shape: The shape of the layer. - mask: Tuple containing the mask array and the scaling factor with respect to the - provided layer_shape. Optional, only required for samplers with foreground - filtering. - - Returns: - A generator producing sampled patch coordinates. - """ - - -class ForegroundSampler(Sampler): - """Base class for samplers with foreground filtering capabilities.""" - - @abc.abstractmethod - def is_foreground( - self, - mask: Mask, - x: int, - y: int, - width: int, - height: int, - min_foreground_ratio: float, - ) -> bool: - """Check if a patch contains sufficient foreground.""" - - -class RandomSampler(Sampler): - """Sample patch coordinates randomly. - - Args: - n_samples: The number of samples to return. - seed: The random seed. - """ - - def __init__(self, n_samples: int = 1, seed: int = 42): - """Initializes the sampler.""" - self.seed = seed - self.n_samples = n_samples - - def sample( - self, - width: int, - height: int, - layer_shape: Tuple[int, int], - ) -> Generator[Tuple[int, int], None, None]: - """Sample random patches. - - Args: - width: The width of the patches. - height: The height of the patches. - layer_shape: The shape of the layer. - """ - _set_seed(self.seed) - - for _ in range(self.n_samples): - x_max, y_max = layer_shape[0], layer_shape[1] - x, y = random.randint(0, x_max - width), random.randint(0, y_max - height) # nosec - yield x, y - - -class GridSampler(Sampler): - """Sample patches based on a grid. - - Args: - max_samples: The maximum number of samples to return. - overlap: The overlap between patches in the grid. - seed: The random seed. - """ - - def __init__( - self, - max_samples: int | None = None, - overlap: Tuple[int, int] = (0, 0), - seed: int = 42, - ): - """Initializes the sampler.""" - self.max_samples = max_samples - self.overlap = overlap - self.seed = seed - - def sample( - self, - width: int, - height: int, - layer_shape: Tuple[int, int], - ) -> Generator[Tuple[int, int], None, None]: - """Sample patches from a grid. - - Args: - width: The width of the patches. - height: The height of the patches. - layer_shape: The shape of the layer. - """ - x_y, indices = _get_grid_coords_and_indices(layer_shape, width, height, self.overlap) - max_samples = len(indices) if self.max_samples is None else self.max_samples - for i in indices[:max_samples]: - yield x_y[i] - - -class ForegroundGridSampler(ForegroundSampler): - """Sample patches based on a grid, only returning patches containing foreground. - - Args: - max_samples: The maximum number of samples to return. - overlap: The overlap between patches in the grid. - min_foreground_ratio: The minimum amount of foreground within a sampled patch. - seed: The random seed. - """ - - def __init__( - self, - max_samples: int = 20, - overlap: Tuple[int, int] = (0, 0), - min_foreground_ratio: float = 0.35, - seed: int = 42, - ): - """Initializes the sampler.""" - self.max_samples = max_samples - self.overlap = overlap - self.min_foreground_ratio = min_foreground_ratio - self.seed = seed - - def sample( - self, - width: int, - height: int, - layer_shape: Tuple[int, int], - mask: Mask, - ): - """Sample patches from a grid containing foreground. - - Args: - width: The width of the patches. - height: The height of the patches. - layer_shape: The shape of the layer. - mask: The mask of the image. - """ - x_y, indices = _get_grid_coords_and_indices(layer_shape, width, height, self.overlap) - - count = 0 - for i in indices: - if count >= self.max_samples: - break - if self.is_foreground( - mask, x_y[i][0], x_y[i][1], width, height, self.min_foreground_ratio - ): - count += 1 - yield x_y[i] - - def is_foreground( - self, - mask: Mask, - x: int, - y: int, - width: int, - height: int, - min_foreground_ratio: float, - ) -> bool: - """Check if a patch contains sufficient foreground. - - Args: - mask: The mask of the image. - x: The x-coordinate of the patch. - y: The y-coordinate of the patch. - width: The width of the patch. - height: The height of the patch. - min_foreground_ratio: The minimum amount of foreground in the patch. - """ - x_, y_ = self._scale_coords(x, y, mask.scale_factors) - width_, height_ = self._scale_coords(width, height, mask.scale_factors) - patch_mask = mask.mask_array[y_ : y_ + height_, x_ : x_ + width_] - return patch_mask.sum() / patch_mask.size > min_foreground_ratio - - def _scale_coords(self, x: int, y: int, scale_factors: Tuple[float, float]) -> Tuple[int, int]: - return int(x / scale_factors[0]), int(y / scale_factors[1]) - - -def _get_grid_coords_and_indices( - layer_shape: Tuple[int, int], - width: int, - height: int, - overlap: Tuple[int, int], - shuffle: bool = True, - seed: int = 42, -): - """Get grid coordinates and indices. - - Args: - layer_shape: The shape of the layer. - width: The width of the patches. - height: The height of the patches. - overlap: The overlap between patches in the grid. - shuffle: Whether to shuffle the indices. - seed: The random seed. - """ - x_range = range(0, layer_shape[0] - width, width - overlap[0]) - y_range = range(0, layer_shape[1] - height, height - overlap[1]) - x_y = [(x, y) for x in x_range for y in y_range] - - indices = list(range(len(x_y))) - if shuffle: - _set_seed(seed) - np.random.shuffle(indices) - return x_y, indices - - -def _set_seed(seed: int) -> None: - random.seed(seed) - np.random.seed(seed) diff --git a/src/eva/vision/data/wsi/patching/samplers/__init__.py b/src/eva/vision/data/wsi/patching/samplers/__init__.py new file mode 100644 index 00000000..49860968 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/__init__.py @@ -0,0 +1,8 @@ +"""Patch Sampler API.""" + +from eva.vision.data.wsi.patching.samplers.base import ForegroundSampler, Sampler +from eva.vision.data.wsi.patching.samplers.foreground_grid import ForegroundGridSampler +from eva.vision.data.wsi.patching.samplers.grid import GridSampler +from eva.vision.data.wsi.patching.samplers.random import RandomSampler + +__all__ = ["Sampler", "ForegroundSampler", "RandomSampler", "GridSampler", "ForegroundGridSampler"] diff --git a/src/eva/vision/data/wsi/patching/samplers/_utils.py b/src/eva/vision/data/wsi/patching/samplers/_utils.py new file mode 100644 index 00000000..af8418df --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/_utils.py @@ -0,0 +1,50 @@ +import random +from typing import Tuple + +import numpy as np + + +def set_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) + + +def get_grid_coords_and_indices( + layer_shape: Tuple[int, int], + width: int, + height: int, + overlap: Tuple[int, int], + shuffle: bool = True, + seed: int = 42, +): + """Get grid coordinates and indices. + + Args: + layer_shape: The shape of the layer. + width: The width of the patches. + height: The height of the patches. + overlap: The overlap between patches in the grid. + shuffle: Whether to shuffle the indices. + seed: The random seed. + """ + x_range = range(0, layer_shape[0] - width + 1, width - overlap[0]) + y_range = range(0, layer_shape[1] - height + 1, height - overlap[1]) + x_y = [(x, y) for x in x_range for y in y_range] + + indices = list(range(len(x_y))) + if shuffle: + set_seed(seed) + np.random.shuffle(indices) + return x_y, indices + + +def validate_dimensions(width: int, height: int, layer_shape: Tuple[int, int]) -> None: + """Checks if the width / height is bigger than the layer shape. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + """ + if width > layer_shape[0] or height > layer_shape[1]: + raise ValueError("The width / height cannot be bigger than the layer shape.") diff --git a/src/eva/vision/data/wsi/patching/samplers/base.py b/src/eva/vision/data/wsi/patching/samplers/base.py new file mode 100644 index 00000000..fa9a24ac --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/base.py @@ -0,0 +1,48 @@ +"""Base classes for samplers.""" + +import abc +from typing import Generator, Tuple + +from eva.vision.data.wsi.patching.mask import Mask + + +class Sampler(abc.ABC): + """Base class for samplers.""" + + @abc.abstractmethod + def sample( + self, + width: int, + height: int, + layer_shape: Tuple[int, int], + mask: Mask | None = None, + ) -> Generator[Tuple[int, int], None, None]: + """Sample patche coordinates. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + mask: Tuple containing the mask array and the scaling factor with respect to the + provided layer_shape. Optional, only required for samplers with foreground + filtering. + + Returns: + A generator producing sampled patch coordinates. + """ + + +class ForegroundSampler(Sampler): + """Base class for samplers with foreground filtering capabilities.""" + + @abc.abstractmethod + def is_foreground( + self, + mask: Mask, + x: int, + y: int, + width: int, + height: int, + min_foreground_ratio: float, + ) -> bool: + """Check if a patch contains sufficient foreground.""" diff --git a/src/eva/vision/data/wsi/patching/samplers/foreground_grid.py b/src/eva/vision/data/wsi/patching/samplers/foreground_grid.py new file mode 100644 index 00000000..e062caf5 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/foreground_grid.py @@ -0,0 +1,87 @@ +"""Foreground grid sampler.""" + +from typing import Tuple + +from eva.vision.data.wsi.patching.mask import Mask +from eva.vision.data.wsi.patching.samplers import _utils, base + + +class ForegroundGridSampler(base.ForegroundSampler): + """Sample patches based on a grid, only returning patches containing foreground. + + Args: + max_samples: The maximum number of samples to return. + overlap: The overlap between patches in the grid. + min_foreground_ratio: The minimum amount of foreground within a sampled patch. + seed: The random seed. + """ + + def __init__( + self, + max_samples: int = 20, + overlap: Tuple[int, int] = (0, 0), + min_foreground_ratio: float = 0.35, + seed: int = 42, + ): + """Initializes the sampler.""" + self.max_samples = max_samples + self.overlap = overlap + self.min_foreground_ratio = min_foreground_ratio + self.seed = seed + + def sample( + self, + width: int, + height: int, + layer_shape: Tuple[int, int], + mask: Mask, + ): + """Sample patches from a grid containing foreground. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + mask: The mask of the image. + """ + _utils.validate_dimensions(width, height, layer_shape) + x_y, indices = _utils.get_grid_coords_and_indices( + layer_shape, width, height, self.overlap, seed=self.seed + ) + + count = 0 + for i in indices: + if count >= self.max_samples: + break + if self.is_foreground( + mask, x_y[i][0], x_y[i][1], width, height, self.min_foreground_ratio + ): + count += 1 + yield x_y[i] + + def is_foreground( + self, + mask: Mask, + x: int, + y: int, + width: int, + height: int, + min_foreground_ratio: float, + ) -> bool: + """Check if a patch contains sufficient foreground. + + Args: + mask: The mask of the image. + x: The x-coordinate of the patch. + y: The y-coordinate of the patch. + width: The width of the patch. + height: The height of the patch. + min_foreground_ratio: The minimum amount of foreground in the patch. + """ + x_, y_ = self._scale_coords(x, y, mask.scale_factors) + width_, height_ = self._scale_coords(width, height, mask.scale_factors) + patch_mask = mask.mask_array[y_ : y_ + height_, x_ : x_ + width_] + return patch_mask.sum() / patch_mask.size >= min_foreground_ratio + + def _scale_coords(self, x: int, y: int, scale_factors: Tuple[float, float]) -> Tuple[int, int]: + return int(x / scale_factors[0]), int(y / scale_factors[1]) diff --git a/src/eva/vision/data/wsi/patching/samplers/grid.py b/src/eva/vision/data/wsi/patching/samplers/grid.py new file mode 100644 index 00000000..3f2b0081 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/grid.py @@ -0,0 +1,47 @@ +"""Grid sampler.""" + +from typing import Generator, Tuple + +from eva.vision.data.wsi.patching.samplers import _utils, base + + +class GridSampler(base.Sampler): + """Sample patches based on a grid. + + Args: + max_samples: The maximum number of samples to return. + overlap: The overlap between patches in the grid. + seed: The random seed. + """ + + def __init__( + self, + max_samples: int | None = None, + overlap: Tuple[int, int] = (0, 0), + seed: int = 42, + ): + """Initializes the sampler.""" + self.max_samples = max_samples + self.overlap = overlap + self.seed = seed + + def sample( + self, + width: int, + height: int, + layer_shape: Tuple[int, int], + ) -> Generator[Tuple[int, int], None, None]: + """Sample patches from a grid. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + """ + _utils.validate_dimensions(width, height, layer_shape) + x_y, indices = _utils.get_grid_coords_and_indices( + layer_shape, width, height, self.overlap, seed=self.seed + ) + max_samples = len(indices) if self.max_samples is None else self.max_samples + for i in indices[:max_samples]: + yield x_y[i] diff --git a/src/eva/vision/data/wsi/patching/samplers/random.py b/src/eva/vision/data/wsi/patching/samplers/random.py new file mode 100644 index 00000000..09ae5729 --- /dev/null +++ b/src/eva/vision/data/wsi/patching/samplers/random.py @@ -0,0 +1,41 @@ +"""Random sampler.""" + +import random +from typing import Generator, Tuple + +from eva.vision.data.wsi.patching.samplers import _utils, base + + +class RandomSampler(base.Sampler): + """Sample patch coordinates randomly. + + Args: + n_samples: The number of samples to return. + seed: The random seed. + """ + + def __init__(self, n_samples: int = 1, seed: int = 42): + """Initializes the sampler.""" + self.seed = seed + self.n_samples = n_samples + + def sample( + self, + width: int, + height: int, + layer_shape: Tuple[int, int], + ) -> Generator[Tuple[int, int], None, None]: + """Sample random patches. + + Args: + width: The width of the patches. + height: The height of the patches. + layer_shape: The shape of the layer. + """ + _utils.validate_dimensions(width, height, layer_shape) + _utils.set_seed(self.seed) + + x_max, y_max = layer_shape[0], layer_shape[1] + for _ in range(self.n_samples): + x, y = random.randint(0, x_max - width), random.randint(0, y_max - height) # nosec + yield x, y diff --git a/tests/eva/vision/data/wsi/__init__.py b/tests/eva/vision/data/wsi/__init__.py new file mode 100644 index 00000000..c3adfdd3 --- /dev/null +++ b/tests/eva/vision/data/wsi/__init__.py @@ -0,0 +1 @@ +"""WSI module tests.""" diff --git a/tests/eva/vision/data/wsi/patching/__init__.py b/tests/eva/vision/data/wsi/patching/__init__.py new file mode 100644 index 00000000..686c6e8d --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/__init__.py @@ -0,0 +1 @@ +"""WSI patch extraction tests.""" diff --git a/tests/eva/vision/data/wsi/patching/samplers/__init__.py b/tests/eva/vision/data/wsi/patching/samplers/__init__.py new file mode 100644 index 00000000..e7064022 --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/samplers/__init__.py @@ -0,0 +1 @@ +"""WSI patch samplers tests.""" diff --git a/tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py b/tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py new file mode 100644 index 00000000..9a5510ac --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py @@ -0,0 +1,93 @@ +"""ForegroundGridSampler tests.""" + +import numpy as np +import pytest + +from eva.vision.data.wsi.patching import mask, samplers + +TEST_MASK = mask.Mask( + mask_array=np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + ] + ), + mask_level_idx=3, + scale_factors=(6.0, 6.0), +) + +TEST_ARGS = {"width": 12, "height": 12, "layer_shape": (36, 36), "mask": TEST_MASK} + + +@pytest.mark.parametrize( + "min_foreground_ratio, max_samples, expected_n_samples", + [(0.0, 3, 3), (0.0, 100, 9), (0.5, 100, 5), (0.9, 100, 1)], +) +def test_length(min_foreground_ratio: float, max_samples: int, expected_n_samples: int) -> None: + """Tests if the sampler returns the correct number of samples.""" + sampler = samplers.ForegroundGridSampler( + max_samples=max_samples, min_foreground_ratio=min_foreground_ratio + ) + + x_y = list(sampler.sample(**TEST_ARGS)) + + assert len(x_y) == expected_n_samples + + +@pytest.mark.parametrize("n_samples, seed", [(10, 8), (22, 42)]) +def test_same_seed(n_samples: int, seed: int) -> None: + """Tests if the sampler returns the same samples for the same seed.""" + sampler = samplers.ForegroundGridSampler( + max_samples=n_samples, seed=seed, min_foreground_ratio=0.5 + ) + + x_y_1 = list(sampler.sample(**TEST_ARGS)) + x_y_2 = list(sampler.sample(**TEST_ARGS)) + + assert x_y_1 == x_y_2 + + +@pytest.mark.parametrize("n_samples, seed_1, seed_2", [(3, 1, 2), (5, 3, 4)]) +def test_different_seed(n_samples: int, seed_1: int, seed_2: int) -> None: + """Tests if the sampler returns different samples for different seeds.""" + sampler_1 = samplers.ForegroundGridSampler(max_samples=n_samples, seed=seed_1) + sampler_2 = samplers.ForegroundGridSampler(max_samples=n_samples, seed=seed_2) + + x_y_1 = list(sampler_1.sample(**TEST_ARGS)) + x_y_2 = list(sampler_2.sample(**TEST_ARGS)) + + assert x_y_1 != x_y_2 + + +def test_invalid_width_height() -> None: + """Tests if the sampler raises an error when width / height is bigger than layer_shape.""" + sampler = samplers.ForegroundGridSampler(max_samples=10, seed=42) + + with pytest.raises(ValueError): + list(sampler.sample(width=200, height=200, layer_shape=(100, 100), mask=TEST_MASK)) + + +@pytest.mark.parametrize("min_foreground_ratio", [0.0, 0.5, 0.9]) +def test_min_foreground_ratio(min_foreground_ratio: float) -> None: + """Tests if sampled coordinates respect the min_foreground_ratio.""" + sampler = samplers.ForegroundGridSampler( + max_samples=100, min_foreground_ratio=min_foreground_ratio + ) + + x_y = list(sampler.sample(**TEST_ARGS)) + + mask = TEST_MASK + width, height = TEST_ARGS["width"], TEST_ARGS["height"] + + for x, y in x_y: + x_, y_ = sampler._scale_coords(x, y, mask.scale_factors) + width_, height_ = sampler._scale_coords(width, height, mask.scale_factors) + + patch_mask = mask.mask_array[x_ : x_ + width_, y_ : y_ + height_] + foreground_ratio = patch_mask.sum() / patch_mask.size + + assert foreground_ratio >= min_foreground_ratio diff --git a/tests/eva/vision/data/wsi/patching/samplers/test_grid.py b/tests/eva/vision/data/wsi/patching/samplers/test_grid.py new file mode 100644 index 00000000..efeecf54 --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/samplers/test_grid.py @@ -0,0 +1,69 @@ +"""GridSampler tests.""" + +from typing import Tuple + +import pytest + +from eva.vision.data.wsi.patching import samplers + +TEST_ARGS = {"width": 10, "height": 10, "layer_shape": (100, 100)} + + +@pytest.mark.parametrize("max_samples, expected_n_samples", [(3, 3), (10, 10), (200, 100)]) +def test_length(max_samples: int, expected_n_samples: int) -> None: + """Tests if the sampler returns the correct number of samples.""" + sampler = samplers.GridSampler(max_samples=max_samples) + + x_y = list(sampler.sample(**TEST_ARGS)) + + assert len(x_y) == expected_n_samples + + +@pytest.mark.parametrize("max_samples, seed", [(10, 8), (22, 42)]) +def test_same_seed(max_samples: int, seed: int) -> None: + """Tests if the sampler returns the same samples for the same seed.""" + sampler = samplers.GridSampler(max_samples=max_samples, seed=seed) + + x_y_1 = list(sampler.sample(**TEST_ARGS)) + x_y_2 = list(sampler.sample(**TEST_ARGS)) + + assert x_y_1 == x_y_2 + + +@pytest.mark.parametrize("max_samples, seed_1, seed_2", [(3, 1, 2), (5, 3, 4)]) +def test_different_seed(max_samples: int, seed_1: int, seed_2: int) -> None: + """Tests if the sampler returns different samples for different seeds.""" + sampler_1 = samplers.GridSampler(max_samples=max_samples, seed=seed_1) + sampler_2 = samplers.GridSampler(max_samples=max_samples, seed=seed_2) + + x_y_1 = list(sampler_1.sample(**TEST_ARGS)) + x_y_2 = list(sampler_2.sample(**TEST_ARGS)) + + assert x_y_1 != x_y_2 + + +def test_invalid_width_height() -> None: + """Tests if the sampler raises an error when width / height is bigger than layer_shape.""" + sampler = samplers.GridSampler(max_samples=10, seed=42) + + with pytest.raises(ValueError): + list(sampler.sample(width=200, height=200, layer_shape=(100, 100))) + + +@pytest.mark.parametrize( + "width, height, layer_shape", + [ + (5, 5, (25, 25)), + (5, 5, (100, 100)), + (224, 224, (1000, 1000)), + ], +) +def test_expected_n_patches(width: int, height: int, layer_shape: Tuple[int, int]) -> None: + """Tests if the sampler respects the max_samples limit.""" + sampler = samplers.GridSampler(max_samples=None) + + expected_max_samples = (layer_shape[0] // width) * (layer_shape[1] // height) + + x_y = list(sampler.sample(width=width, height=height, layer_shape=layer_shape)) + + assert len(x_y) == expected_max_samples diff --git a/tests/eva/vision/data/wsi/patching/samplers/test_random.py b/tests/eva/vision/data/wsi/patching/samplers/test_random.py new file mode 100644 index 00000000..85110a6c --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/samplers/test_random.py @@ -0,0 +1,48 @@ +"""RandomSampler tests.""" + +import pytest + +from eva.vision.data.wsi.patching import samplers + +TEST_ARGS = {"width": 10, "height": 10, "layer_shape": (100, 100)} + + +@pytest.mark.parametrize("n_samples", [3, 10, 22]) +def test_length(n_samples: int) -> None: + """Tests if the sampler returns the correct number of samples.""" + sampler = samplers.RandomSampler(n_samples=n_samples) + + x_y = list(sampler.sample(**TEST_ARGS)) + + assert len(x_y) == n_samples + + +@pytest.mark.parametrize("n_samples, seed", [(10, 8), (22, 42)]) +def test_same_seed(n_samples: int, seed: int) -> None: + """Tests if the sampler returns the same samples for the same seed.""" + sampler = samplers.RandomSampler(n_samples=n_samples, seed=seed) + + x_y_1 = list(sampler.sample(**TEST_ARGS)) + x_y_2 = list(sampler.sample(**TEST_ARGS)) + + assert x_y_1 == x_y_2 + + +@pytest.mark.parametrize("n_samples, seed_1, seed_2", [(10, 1, 2), (22, 3, 4)]) +def test_different_seed(n_samples: int, seed_1: int, seed_2: int) -> None: + """Tests if the sampler returns different samples for different seeds.""" + sampler_1 = samplers.RandomSampler(n_samples=n_samples, seed=seed_1) + sampler_2 = samplers.RandomSampler(n_samples=n_samples, seed=seed_2) + + x_y_1 = list(sampler_1.sample(**TEST_ARGS)) + x_y_2 = list(sampler_2.sample(**TEST_ARGS)) + + assert x_y_1 != x_y_2 + + +def test_invalid_width_height() -> None: + """Tests if the sampler raises an error when width / height is bigger than layer_shape.""" + sampler = samplers.RandomSampler(n_samples=10, seed=42) + + with pytest.raises(ValueError): + list(sampler.sample(width=200, height=200, layer_shape=(100, 100))) From 4bdecbe22180446d423b631630b7f8a9b807d6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 8 May 2024 16:38:20 +0200 Subject: [PATCH 13/29] Add `WsiClassificationDataset` (#429) --- .gitattributes | 1 + .github/workflows/ci.yaml | 6 + configs/vision/dino_vit/offline/panda.yaml | 45 +++---- src/eva/core/callbacks/writers/embeddings.py | 6 +- src/eva/core/data/datasets/embeddings/base.py | 2 +- src/eva/core/models/modules/head.py | 16 +-- src/eva/core/models/modules/inference.py | 6 +- src/eva/core/models/modules/module.py | 8 +- src/eva/core/models/modules/typings.py | 2 +- src/eva/vision/data/datasets/__init__.py | 4 +- .../data/datasets/classification/__init__.py | 4 +- .../data/datasets/classification/base.py | 8 +- .../data/datasets/classification/wsi.py | 111 ++++++++++++++---- src/eva/vision/data/datasets/wsi.py | 67 +++++------ .../datasets/multi-embeddings/manifest.csv | 4 +- tests/eva/assets/vision/datasets/wsi/0/a.tiff | 3 + tests/eva/assets/vision/datasets/wsi/0/b.tiff | 3 + tests/eva/assets/vision/datasets/wsi/1/a.tiff | 3 + .../assets/vision/datasets/wsi/manifest.csv | 3 + .../core/callbacks/writers/test_embeddings.py | 4 +- .../data/datasets/classification/test_bach.py | 6 +- .../data/datasets/classification/test_crc.py | 6 +- .../datasets/classification/test_mhist.py | 6 +- .../classification/test_patch_camelyon.py | 6 +- .../classification/test_total_segmentator.py | 6 +- .../data/datasets/classification/test_wsi.py | 91 ++++++++++++++ tests/eva/vision/data/datasets/test_wsi.py | 93 +++++++++++++++ 27 files changed, 382 insertions(+), 138 deletions(-) create mode 100644 tests/eva/assets/vision/datasets/wsi/0/a.tiff create mode 100644 tests/eva/assets/vision/datasets/wsi/0/b.tiff create mode 100644 tests/eva/assets/vision/datasets/wsi/1/a.tiff create mode 100644 tests/eva/assets/vision/datasets/wsi/manifest.csv create mode 100644 tests/eva/vision/data/datasets/classification/test_wsi.py create mode 100644 tests/eva/vision/data/datasets/test_wsi.py diff --git a/.gitattributes b/.gitattributes index b04fc3fa..6aa148d8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,5 +2,6 @@ tests/eva/assets/**/*.h5 filter=lfs diff=lfs merge=lfs -text tests/eva/assets/**/*.png filter=lfs diff=lfs merge=lfs -text tests/eva/assets/**/*.jpg filter=lfs diff=lfs merge=lfs -text tests/eva/assets/**/*.tif filter=lfs diff=lfs merge=lfs -text +tests/eva/assets/**/*.tiff filter=lfs diff=lfs merge=lfs -text tests/eva/assets/**/*.csv filter=lfs diff=lfs merge=lfs -text tests/eva/assets/**/*.pt filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c004a64c..01998020 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -53,6 +53,12 @@ jobs: - "3.10" runs-on: ${{ matrix.os }} steps: + - name: Install OS dependencies + run: | + sudo apt update + sudo apt install -y software-properties-common + sudo add-apt-repository ppa:openslide/openslide + sudo apt install -y openslide-tools - name: Checkout uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 with: diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 5954df88..97eca774 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -29,7 +29,7 @@ trainer: 0: train 1: val 2: test - metadata_keys: ["slide_id"] + metadata_keys: ["wsi_id"] backbone: class_path: eva.models.ModelFromFunction init_args: @@ -86,7 +86,6 @@ data: init_args: pad_size: 100 pad_value: 0 - val: class_path: eva.datasets.MultiEmbeddingsClassificationDataset init_args: @@ -98,31 +97,23 @@ data: <<: *DATASET_ARGS split: test predict: - - class_path: eva.vision.datasets.MultiWsiClassificationDataset - init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./slide_data}/panda - manifest_file: manifest_train.csv - sampler: - class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler - init_args: - max_samples: 100 - width: 224 - height: 224 - target_mpp: 0.5 - transforms: - class_path: eva.vision.data.transforms.common.ResizeAndCrop - init_args: - size: ${oc.env:RESIZE_DIM, 224} - mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} - std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - - class_path: eva.vision.datasets.MultiWsiClassificationDataset - init_args: - <<: *PREDICT_DATASET_ARGS - manifest_file: manifest_val.csv - - class_path: eva.vision.datasets.MultiWsiClassificationDataset - init_args: - <<: *PREDICT_DATASET_ARGS - manifest_file: manifest_test.csv + class_path: eva.vision.datasets.WsiClassificationDataset + init_args: + root: ${oc.env:DATA_ROOT, ./slide_data}/panda + manifest_file: manifest.csv + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: 100 + width: 224 + height: 224 + target_mpp: 0.5 + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} dataloaders: train: batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index b1cc792a..292344f1 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -13,7 +13,7 @@ from typing_extensions import override from eva.core.callbacks.writers.typings import ITEM_DICT_ENTRY, QUEUE_ITEM -from eva.core.models.modules.typings import DATA_SAMPLE +from eva.core.models.modules.typings import INPUT_BATCH from eva.core.utils import multiprocessing as eva_multiprocessing @@ -75,12 +75,12 @@ def write_on_batch_end( pl_module: pl.LightningModule, prediction: Any, batch_indices: Sequence[int], - batch: DATA_SAMPLE, + batch: INPUT_BATCH, batch_idx: int, dataloader_idx: int, ) -> None: dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore - _, targets, metadata = DATA_SAMPLE(*batch) + _, targets, metadata = INPUT_BATCH(*batch) split = self._dataloader_idx_map.get(dataloader_idx) embeddings = self._get_embeddings(prediction) diff --git a/src/eva/core/data/datasets/embeddings/base.py b/src/eva/core/data/datasets/embeddings/base.py index bcac1b24..37b78138 100644 --- a/src/eva/core/data/datasets/embeddings/base.py +++ b/src/eva/core/data/datasets/embeddings/base.py @@ -16,7 +16,7 @@ "path": "embeddings", "target": "target", "split": "split", - "multi_id": "slide_id", + "multi_id": "wsi_id", } """The default column mapping of the variables to the manifest columns.""" diff --git a/src/eva/core/models/modules/head.py b/src/eva/core/models/modules/head.py index 8e09bc55..95748f70 100644 --- a/src/eva/core/models/modules/head.py +++ b/src/eva/core/models/modules/head.py @@ -11,7 +11,7 @@ from eva.core.metrics import structs as metrics_lib from eva.core.models.modules import module -from eva.core.models.modules.typings import DATA_SAMPLE, MODEL_TYPE +from eva.core.models.modules.typings import INPUT_BATCH, MODEL_TYPE from eva.core.models.modules.utils import batch_postprocess, grad @@ -72,23 +72,23 @@ def forward(self, tensor: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tens return self.head(features).squeeze(-1) @override - def training_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def training_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def validation_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def validation_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def test_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> STEP_OUTPUT: + def test_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> STEP_OUTPUT: return self._batch_step(batch) @override - def predict_step(self, batch: DATA_SAMPLE, *args: Any, **kwargs: Any) -> torch.Tensor: - tensor = DATA_SAMPLE(*batch).data + def predict_step(self, batch: INPUT_BATCH, *args: Any, **kwargs: Any) -> torch.Tensor: + tensor = INPUT_BATCH(*batch).data return tensor if self.backbone is None else self.backbone(tensor) - def _batch_step(self, batch: DATA_SAMPLE) -> STEP_OUTPUT: + def _batch_step(self, batch: INPUT_BATCH) -> STEP_OUTPUT: """Performs a model forward step and calculates the loss. Args: @@ -97,7 +97,7 @@ def _batch_step(self, batch: DATA_SAMPLE) -> STEP_OUTPUT: Returns: The batch step output. """ - data, targets, metadata = DATA_SAMPLE(*batch) + data, targets, metadata = INPUT_BATCH(*batch) predictions = self(data) loss = self.criterion(predictions, targets) return { diff --git a/src/eva/core/models/modules/inference.py b/src/eva/core/models/modules/inference.py index 10f092d0..2d0d9a3d 100644 --- a/src/eva/core/models/modules/inference.py +++ b/src/eva/core/models/modules/inference.py @@ -5,7 +5,7 @@ from typing_extensions import override from eva.core.models.modules import module -from eva.core.models.modules.typings import DATA_SAMPLE, MODEL_TYPE +from eva.core.models.modules.typings import INPUT_BATCH, MODEL_TYPE class InferenceModule(module.ModelModule): @@ -28,10 +28,10 @@ def forward(self, tensor: torch.Tensor) -> torch.Tensor: @override def predict_step( self, - batch: DATA_SAMPLE, + batch: INPUT_BATCH, batch_idx: int, dataloader_idx: int = 0, ) -> STEP_OUTPUT: - data, *_ = DATA_SAMPLE(*batch) + data, *_ = INPUT_BATCH(*batch) predictions = self(data) return predictions diff --git a/src/eva/core/models/modules/module.py b/src/eva/core/models/modules/module.py index b1b6ddd9..cb5e222a 100644 --- a/src/eva/core/models/modules/module.py +++ b/src/eva/core/models/modules/module.py @@ -9,7 +9,7 @@ from typing_extensions import override from eva.core.metrics import structs as metrics_lib -from eva.core.models.modules.typings import DATA_SAMPLE +from eva.core.models.modules.typings import INPUT_BATCH from eva.core.models.modules.utils import batch_postprocess @@ -50,7 +50,7 @@ def default_postprocess(self) -> batch_postprocess.BatchPostProcess: def on_train_batch_end( self, outputs: STEP_OUTPUT, - batch: DATA_SAMPLE, + batch: INPUT_BATCH, batch_idx: int, ) -> None: outputs = self._common_batch_end(outputs) @@ -63,7 +63,7 @@ def on_train_batch_end( def on_validation_batch_end( self, outputs: STEP_OUTPUT, - batch: DATA_SAMPLE, + batch: INPUT_BATCH, batch_idx: int, dataloader_idx: int = 0, ) -> None: @@ -82,7 +82,7 @@ def on_validation_epoch_end(self) -> None: def on_test_batch_end( self, outputs: STEP_OUTPUT, - batch: DATA_SAMPLE, + batch: INPUT_BATCH, batch_idx: int, dataloader_idx: int = 0, ) -> None: diff --git a/src/eva/core/models/modules/typings.py b/src/eva/core/models/modules/typings.py index 67c79fb6..fa476bd1 100644 --- a/src/eva/core/models/modules/typings.py +++ b/src/eva/core/models/modules/typings.py @@ -10,7 +10,7 @@ """The expected model type.""" -class DATA_SAMPLE(NamedTuple): +class INPUT_BATCH(NamedTuple): """The default input batch data scheme.""" data: torch.Tensor diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index 557fdcba..d9705124 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -4,9 +4,9 @@ BACH, CRC, MHIST, - MultiWsiClassificationDataset, PatchCamelyon, TotalSegmentatorClassification, + WsiClassificationDataset, ) from eva.vision.data.datasets.segmentation import ImageSegmentation, TotalSegmentator2D from eva.vision.data.datasets.vision import VisionDataset @@ -23,5 +23,5 @@ "VisionDataset", "WsiDataset", "MultiWsiDataset", - "MultiWsiClassificationDataset", + "WsiClassificationDataset", ] diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index 55198b9e..ca74c25a 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -5,7 +5,7 @@ from eva.vision.data.datasets.classification.mhist import MHIST from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon from eva.vision.data.datasets.classification.total_segmentator import TotalSegmentatorClassification -from eva.vision.data.datasets.classification.wsi import MultiWsiClassificationDataset +from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset __all__ = [ "BACH", @@ -13,5 +13,5 @@ "MHIST", "PatchCamelyon", "TotalSegmentatorClassification", - "MultiWsiClassificationDataset", + "WsiClassificationDataset", ] diff --git a/src/eva/vision/data/datasets/classification/base.py b/src/eva/vision/data/datasets/classification/base.py index 7c28877e..56f95082 100644 --- a/src/eva/vision/data/datasets/classification/base.py +++ b/src/eva/vision/data/datasets/classification/base.py @@ -38,12 +38,11 @@ def classes(self) -> List[str] | None: def class_to_idx(self) -> Dict[str, int] | None: """Returns a mapping of the class name to its target index.""" - def load_metadata(self, index: int | None) -> Dict[str, Any] | List[Dict[str, Any]] | None: + def load_metadata(self, index: int) -> Dict[str, Any] | None: """Returns the dataset metadata. Args: index: The index of the data sample to return the metadata of. - If `None`, it will return the metadata of the current dataset. Returns: The sample metadata. @@ -77,10 +76,11 @@ def __len__(self) -> int: raise NotImplementedError @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray]: + def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: image = self.load_image(index) target = self.load_target(index) - return self._apply_transforms(image, target) + image, target = self._apply_transforms(image, target) + return image, target, self.load_metadata(index) or {} def _apply_transforms( self, image: np.ndarray, target: np.ndarray diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py index 4d7aff61..d34cde8b 100644 --- a/src/eva/vision/data/datasets/classification/wsi.py +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -1,40 +1,103 @@ -"""Dataset classes for whole-slide image classification.""" +"""WSI classification dataset.""" -import bisect import os -from typing import Any, Dict +from typing import Any, Callable, Dict, Literal, Tuple -import torch +import numpy as np +import pandas as pd from typing_extensions import override -from eva.core.models.modules.typings import DATA_SAMPLE -from eva.vision.data.datasets.wsi import MultiWsiDataset +from eva.vision.data.datasets import wsi +from eva.vision.data.datasets.classification import base +from eva.vision.data.wsi.patching import samplers -class MultiWsiClassificationDataset(MultiWsiDataset): - """Classification Dataset class for reading patches from multiple whole-slide images. +class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification): + """A general dataset class for whole-slide image classification using manifest files.""" - # TODO: Replace this by dataset specific classes? - """ + default_column_mapping: Dict[str, str] = { + "path": "path", + "target": "target", + "split": "split", + } + + def __init__( + self, + root: str, + manifest_file: str, + width: int, + height: int, + target_mpp: float, + sampler: samplers.Sampler, + backend: str = "openslide", + split: Literal["train", "val", "test"] | None = None, + image_transforms: Callable | None = None, + column_mapping: Dict[str, str] = default_column_mapping, + ): + """Initializes the dataset. + + Args: + root: Root directory of the dataset. + manifest_file: The path to the manifest file, relative to + the `root` argument. The `path` column is expected to contain + relative paths to the whole-slide images. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + sampler: The sampler to use for sampling patch coordinates. + backend: The backend to use for reading the whole-slide images. + split: The split of the dataset to load. + image_transforms: Transforms to apply to the extracted image patches. + column_mapping: Mapping of the columns in the manifest file. + """ + self._split = split + self._column_mapping = self.default_column_mapping | column_mapping + self._manifest = self._load_manifest(os.path.join(root, manifest_file)) + + wsi.MultiWsiDataset.__init__( + self, + root=root, + file_paths=self._manifest[self._column_mapping["path"]].tolist(), + width=width, + height=height, + sampler=sampler, + target_mpp=target_mpp, + backend=backend, + image_transforms=image_transforms, + ) @override def filename(self, index: int) -> str: - dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) - full_path = self._manifest.at[dataset_idx, self._column_mapping["path"]] - return os.path.basename(full_path) + path = self._manifest.at[self._get_dataset_idx(index), self._column_mapping["path"]] + return os.path.basename(path) if os.path.isabs(path) else path + + @override + def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + return base.ImageClassification.__getitem__(self, index) @override - def __getitem__(self, index: int) -> DATA_SAMPLE: - data = super().__getitem__(index) - target = self._load_target(index) - metadata = self._load_metadata(index) + def load_image(self, index: int) -> np.ndarray: + return wsi.MultiWsiDataset.__getitem__(self, index) + + @override + def load_target(self, index: int) -> np.ndarray: + target = self._manifest.at[self._get_dataset_idx(index), self._column_mapping["target"]] + return np.asarray(target) + + @override + def load_metadata(self, index: int) -> Dict[str, Any]: + return {"wsi_id": self.filename(index).split(".")[0]} + + def _load_manifest(self, manifest_path: str) -> pd.DataFrame: + df = pd.read_csv(manifest_path) - return DATA_SAMPLE(data, target, metadata) + missing_columns = set(self._column_mapping.values()) - set(df.columns) + if self._split is None: + missing_columns = missing_columns - {self._column_mapping["split"]} + if missing_columns: + raise ValueError(f"Missing columns in the manifest file: {missing_columns}") - def _load_target(self, index: int) -> torch.Tensor: - dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) - target = self._manifest.at[dataset_idx, self._column_mapping["target"]] - return torch.tensor(target) + if self._split is not None: + df = df.loc[df[self._column_mapping["split"]] == self._split] - def _load_metadata(self, index: int) -> Dict[str, Any]: - return {"slide_id": self.filename(index).split(".")[0]} + return df.reset_index(drop=True) diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index 962c5d48..8c5817f1 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -1,10 +1,11 @@ """Dataset classes for whole-slide images.""" +import bisect import os -from typing import Callable, Dict +from typing import Callable, List -import pandas as pd -import torch +import numpy as np +from loguru import logger from torch.utils.data import dataset as torch_datasets from typing_extensions import override @@ -24,7 +25,7 @@ def __init__( target_mpp: float, sampler: samplers.Sampler, backend: str = "openslide", - transforms: Callable[..., torch.Tensor] | None = None, + image_transforms: Callable | None = None, ): """Initializes a new dataset instance. @@ -35,15 +36,15 @@ def __init__( target_mpp: Target microns per pixel (mpp) for the patches. sampler: The sampler to use for sampling patch coordinates. backend: The backend to use for reading the whole-slide images. - transforms: Transforms to apply to the extracted patch tensors. + image_transforms: Transforms to apply to the extracted image patches. """ self._file_path = file_path self._width = width self._height = height self._target_mpp = target_mpp - self._backend = backend self._sampler = sampler - self._transforms = transforms + self._backend = backend + self._image_transforms = image_transforms @override def __len__(self): @@ -69,73 +70,65 @@ def _coords(self) -> wsi.PatchCoordinates: ) @override - def __getitem__(self, index: int) -> torch.Tensor: + def __getitem__(self, index: int) -> np.ndarray: x, y = self._coords.x_y[index] width, height, level_idx = self._coords.width, self._coords.height, self._coords.level_idx patch = self._wsi.read_region((x, y), level_idx, (width, height)) - patch = self._apply_transforms(torch.from_numpy(patch).permute(2, 0, 1)) + patch = self._apply_transforms(patch) return patch - def _apply_transforms(self, tensor: torch.Tensor) -> torch.Tensor: - if self._transforms: - tensor = self._transforms(tensor) - return tensor + def _apply_transforms(self, image: np.ndarray) -> np.ndarray: + if self._image_transforms is not None: + image = self._image_transforms(image) + return image -class MultiWsiDataset(torch_datasets.ConcatDataset, vision.VisionDataset): +class MultiWsiDataset(torch_datasets.ConcatDataset): """Dataset class for reading patches from multiple whole-slide images.""" - default_column_mapping: Dict[str, str] = { - "path": "path", - "target": "target", - } - def __init__( self, root: str, - manifest_file: str, + file_paths: List[str], width: int, height: int, target_mpp: float, sampler: samplers.Sampler, backend: str = "openslide", - transforms: Callable | None = None, - column_mapping: Dict[str, str] = default_column_mapping, + image_transforms: Callable | None = None, ): """Initializes a new dataset instance. Args: root: Root directory of the dataset. - manifest_file: The path to the manifest file, which is relative to - the `root` argument. + file_paths: List of paths to the whole-slide image files, relative to the root. width: Width of the patches to be extracted, in pixels. height: Height of the patches to be extracted, in pixels. target_mpp: Target microns per pixel (mpp) for the patches. sampler: The sampler to use for sampling patch coordinates. backend: The backend to use for reading the whole-slide images. - transforms: Transforms to apply to the extracted patch tensors. + image_transforms: Transforms to apply to the extracted image patches. column_mapping: Defines the map between the variables and the manifest columns. It will overwrite the `default_column_mapping` with the provided values, so that `column_mapping` can contain only the values which are altered or missing. """ self._root = root - self._manifest_file = manifest_file + self._file_paths = file_paths self._width = width self._height = height self._target_mpp = target_mpp self._sampler = sampler self._backend = backend - self._transforms = transforms - self._column_mapping = column_mapping + self._image_transforms = image_transforms - self._manifest = self._load_manifest(os.path.join(self._root, self._manifest_file)) super().__init__(self._load_datasets()) def _load_datasets(self) -> list[WsiDataset]: + logger.info(f"Initializing {len(self._file_paths)} WSI datasets ...") wsi_datasets = [] - for _, row in self._manifest.iterrows(): - file_path = os.path.join(self._root, str(row[self._column_mapping["path"]])) + for file_path in self._file_paths: + file_path = os.path.join(self._root, file_path) if self._root else file_path if not os.path.exists(file_path): raise FileNotFoundError(f"File not found: {file_path}") @@ -147,16 +140,10 @@ def _load_datasets(self) -> list[WsiDataset]: target_mpp=self._target_mpp, sampler=self._sampler, backend=self._backend, - transforms=self._transforms, + image_transforms=self._image_transforms, ) ) return wsi_datasets - def _load_manifest(self, manifest_path: str) -> pd.DataFrame: - df = pd.read_csv(manifest_path) - - missing_columns = set(self._column_mapping.values()) - set(df.columns) - if missing_columns: - raise ValueError(f"Missing columns in the manifest file: {missing_columns}") - - return df + def _get_dataset_idx(self, index: int) -> int: + return bisect.bisect_right(self.cumulative_sizes, index) diff --git a/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv b/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv index 1eb25968..084e97ea 100644 --- a/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv +++ b/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e884b93aa81257148dbb73564d734045ffe110463f60a6064814bf95aa82044 -size 514 +oid sha256:f810f5b5bde999b0b655d0107d27c4e257094a6f11b6ad507cc3240bb68d81d2 +size 512 diff --git a/tests/eva/assets/vision/datasets/wsi/0/a.tiff b/tests/eva/assets/vision/datasets/wsi/0/a.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/wsi/0/a.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/wsi/0/b.tiff b/tests/eva/assets/vision/datasets/wsi/0/b.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/wsi/0/b.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/wsi/1/a.tiff b/tests/eva/assets/vision/datasets/wsi/1/a.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/wsi/1/a.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/wsi/manifest.csv b/tests/eva/assets/vision/datasets/wsi/manifest.csv new file mode 100644 index 00000000..d9e7d867 --- /dev/null +++ b/tests/eva/assets/vision/datasets/wsi/manifest.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6feb39305e51bc126f0599bbb097af2525bfcbfd2e028d71bfebb7a29fdcab +size 65 diff --git a/tests/eva/core/callbacks/writers/test_embeddings.py b/tests/eva/core/callbacks/writers/test_embeddings.py index d9886d4e..c1f0570d 100644 --- a/tests/eva/core/callbacks/writers/test_embeddings.py +++ b/tests/eva/core/callbacks/writers/test_embeddings.py @@ -27,9 +27,9 @@ "batch_size, n_samples, metadata_keys, filenames", [ (5, 7, None, None), - (5, 7, ["slide_id"], None), + (5, 7, ["wsi_id"], None), (8, 16, None, None), - (8, 32, ["slide_id"], ["slide_1", "slide_2"]), + (8, 32, ["wsi_id"], ["slide_1", "slide_2"]), ], ) def test_embeddings_writer(datamodule: datamodules.DataModule, model: modules.HeadModule) -> None: diff --git a/tests/eva/vision/data/datasets/classification/test_bach.py b/tests/eva/vision/data/datasets/classification/test_bach.py index fcae0e31..2d063a77 100644 --- a/tests/eva/vision/data/datasets/classification/test_bach.py +++ b/tests/eva/vision/data/datasets/classification/test_bach.py @@ -29,12 +29,12 @@ def test_length(bach_dataset: datasets.BACH, expected_length: int) -> None: ) def test_sample(bach_dataset: datasets.BACH, index: int) -> None: """Tests the format of a dataset sample.""" - # assert data sample is a tuple sample = bach_dataset[index] + # assert data sample is a tuple assert isinstance(sample, tuple) - assert len(sample) == 2 + assert len(sample) == 3 # assert the format of the `image` and `target` - image, target = sample + image, target, _ = sample assert isinstance(image, np.ndarray) assert image.shape == (16, 16, 3) assert isinstance(target, np.ndarray) diff --git a/tests/eva/vision/data/datasets/classification/test_crc.py b/tests/eva/vision/data/datasets/classification/test_crc.py index 6bf0a51d..1fb276bd 100644 --- a/tests/eva/vision/data/datasets/classification/test_crc.py +++ b/tests/eva/vision/data/datasets/classification/test_crc.py @@ -20,12 +20,12 @@ ) def test_sample(crc_dataset: datasets.CRC, index: int) -> None: """Tests the format of a dataset sample.""" - # assert data sample is a tuple sample = crc_dataset[index] + # assert data sample is a tuple assert isinstance(sample, tuple) - assert len(sample) == 2 + assert len(sample) == 3 # assert the format of the `image` and `target` - image, target = sample + image, target, _ = sample assert isinstance(image, np.ndarray) assert image.shape == (16, 16, 3) assert isinstance(target, np.ndarray) diff --git a/tests/eva/vision/data/datasets/classification/test_mhist.py b/tests/eva/vision/data/datasets/classification/test_mhist.py index 94b32ed5..f9e70105 100644 --- a/tests/eva/vision/data/datasets/classification/test_mhist.py +++ b/tests/eva/vision/data/datasets/classification/test_mhist.py @@ -29,12 +29,12 @@ def test_length(mhist_dataset: datasets.BACH, expected_length: int) -> None: ) def test_sample(mhist_dataset: datasets.MHIST, index: int) -> None: """Tests the format of a dataset sample.""" - # assert data sample is a tuple sample = mhist_dataset[index] + # assert data sample is a tuple assert isinstance(sample, tuple) - assert len(sample) == 2 + assert len(sample) == 3 # assert the format of the `image` and `target` - image, target = sample + image, target, _ = sample assert isinstance(image, np.ndarray) assert image.shape == (224, 224, 3) assert isinstance(target, np.ndarray) diff --git a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py index 8205b333..9f9270f3 100644 --- a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py +++ b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py @@ -24,12 +24,12 @@ def test_length(patch_camelyon_dataset: datasets.PatchCamelyon, expected_length: ) def test_sample(patch_camelyon_dataset: datasets.PatchCamelyon) -> None: """Tests the format of a dataset sample.""" - # assert data sample is a tuple sample = patch_camelyon_dataset[0] + # assert data sample is a tuple assert isinstance(sample, tuple) - assert len(sample) == 2 + assert len(sample) == 3 # assert the format of the `image` and `target` - image, target = sample + image, target, _ = sample assert isinstance(image, np.ndarray) assert image.shape == (96, 96, 3) assert isinstance(target, np.ndarray) diff --git a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py b/tests/eva/vision/data/datasets/classification/test_total_segmentator.py index dcd92fdd..1c694f7b 100644 --- a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py +++ b/tests/eva/vision/data/datasets/classification/test_total_segmentator.py @@ -29,12 +29,12 @@ def test_length( ) def test_sample(total_segmentator_dataset: datasets.TotalSegmentatorClassification) -> None: """Tests the format of a dataset sample.""" - # assert data sample is a tuple sample = total_segmentator_dataset[0] + # assert data sample is a tuple assert isinstance(sample, tuple) - assert len(sample) == 2 + assert len(sample) == 3 # assert the format of the `image` and `target` - image, target = sample + image, target, _ = sample assert isinstance(image, np.ndarray) assert image.shape == (16, 16, 3) assert isinstance(target, np.ndarray) diff --git a/tests/eva/vision/data/datasets/classification/test_wsi.py b/tests/eva/vision/data/datasets/classification/test_wsi.py new file mode 100644 index 00000000..6aa151f5 --- /dev/null +++ b/tests/eva/vision/data/datasets/classification/test_wsi.py @@ -0,0 +1,91 @@ +"""WsiClassificationDataset tests.""" + +import os +import pickle +import re +from typing import Any + +import numpy as np +import pytest +import torch +import torchvision.transforms.v2 as torch_transforms + +from eva.vision.data import datasets +from eva.vision.data import transforms as eva_transforms +from eva.vision.data.wsi.patching import samplers + +TARGET_SIZE = 224 +DEFAULT_ARGS = { + "manifest_file": "manifest.csv", + "width": 32, + "height": 32, + "target_mpp": 0.25, + "sampler": samplers.GridSampler(None), + "backend": "openslide", + "image_transforms": torch_transforms.Compose([eva_transforms.ResizeAndCrop(size=TARGET_SIZE)]), +} + + +def test_pickleable(dataset: datasets.WsiClassificationDataset): + """Tests if the dataset is pickleable (required for multi-worker torch data loaders).""" + pickled = pickle.dumps(dataset) + + # Check if it works after unpickling + unpickled_dataset = pickle.loads(pickled) + for batch in unpickled_dataset: + _check_batch_shape(batch) + + +def test_split(root: str): + """Test loading the dataset with different splits.""" + dataset = datasets.WsiClassificationDataset(root=root, split=None, **DEFAULT_ARGS) + assert len(dataset) == 192 + _check_batch_shape(dataset[0]) + + train_dataset = datasets.WsiClassificationDataset(root=root, split="train", **DEFAULT_ARGS) + assert len(train_dataset) == 64 + _check_batch_shape(train_dataset[0]) + + +def test_filename(dataset: datasets.WsiClassificationDataset): + """Tests the filename method.""" + pattern = r"^\d+/[a-z]\.tiff$" + for i in range(len(dataset)): + assert bool(re.match(pattern, dataset.filename(i))) + + +def test_missing_columns(root: str): + """Test if error is raised if columns are missing in the manifest file.""" + with pytest.raises(ValueError, match="Missing columns in the manifest file"): + datasets.WsiClassificationDataset( + root=root, + column_mapping={"target": "label"}, + **DEFAULT_ARGS, + ) + + +def _check_batch_shape(batch: Any): + assert isinstance(batch, tuple) + assert len(batch) == 3 + + image, target, metadata = batch + assert isinstance(image, torch.Tensor) + assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) + + assert isinstance(target, np.ndarray) + assert target.size == 1 + + assert isinstance(metadata, dict) + assert "wsi_id" in metadata + + +@pytest.fixture +def dataset(root: str) -> datasets.WsiClassificationDataset: + """Fixture returning a dataset instance.""" + return datasets.WsiClassificationDataset(root=root, **DEFAULT_ARGS) + + +@pytest.fixture +def root(assets_path: str) -> str: + """Fixture returning the root path to the test dataset assets.""" + return os.path.join(assets_path, "vision/datasets/wsi") diff --git a/tests/eva/vision/data/datasets/test_wsi.py b/tests/eva/vision/data/datasets/test_wsi.py new file mode 100644 index 00000000..b79b2f6f --- /dev/null +++ b/tests/eva/vision/data/datasets/test_wsi.py @@ -0,0 +1,93 @@ +"""WsiDataset & MultiWsiDataset tests.""" + +import os +from typing import Tuple + +import pytest + +from eva.vision.data import datasets +from eva.vision.data.wsi.patching import samplers + + +@pytest.mark.parametrize( + "width, height, overlap", + [ + (4, 4, (0, 0)), + (4, 4, (2, 2)), + (33, 33, (0, 0)), + (224, 224, (0, 0)), + ], +) +def test_len(width: int, height: int, root: str, overlap: Tuple[int, int]): + """Test the length of the dataset using different patch dimensions.""" + dataset = datasets.WsiDataset( + file_path=os.path.join(root, "0/a.tiff"), + width=width, + height=height, + target_mpp=0.25, + sampler=samplers.GridSampler(max_samples=None, overlap=overlap), + backend="openslide", + ) + + layer_shape = dataset._wsi.level_dimensions[0] + assert len(dataset) == _expected_n_patches(layer_shape, width, height, overlap) + + +@pytest.mark.parametrize( + "width, height, target_mpp", + [(4, 4, 0.25), (4, 4, 1.3)], +) +def test_patch_shape(width: int, height: int, target_mpp: float, root: str): + """Test the shape of the extracted patches.""" + dataset = datasets.WsiDataset( + file_path=os.path.join(root, "0/a.tiff"), + width=width, + height=height, + target_mpp=target_mpp, + sampler=samplers.GridSampler(max_samples=None), + backend="openslide", + ) + + mpp_ratio = target_mpp / ( + dataset._wsi.mpp * dataset._wsi.level_downsamples[dataset._coords.level_idx] + ) + scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) + assert dataset[0].shape == (scaled_width, scaled_height, 3) + + +def test_multi_dataset(root: str): + """Test MultiWsiDataset with multiple whole-slide image paths.""" + file_paths = [ + os.path.join(root, "0/a.tiff"), + os.path.join(root, "0/b.tiff"), + os.path.join(root, "1/a.tiff"), + ] + + width, height = 32, 32 + dataset = datasets.MultiWsiDataset( + root=root, + file_paths=file_paths, + width=width, + height=height, + target_mpp=0.25, + sampler=samplers.GridSampler(max_samples=None), + backend="openslide", + ) + + assert isinstance(dataset.datasets[0], datasets.WsiDataset) + layer_shape = dataset.datasets[0]._wsi.level_dimensions[0] + assert len(dataset) == _expected_n_patches(layer_shape, width, height, (0, 0)) * len(file_paths) + assert dataset.cumulative_sizes == [64, 128, 192] + + +def _expected_n_patches(layer_shape, width, height, overlap): + """Calculate the expected number of patches.""" + n_patches_x = (layer_shape[0] - width) // (width - overlap[0]) + 1 + n_patches_y = (layer_shape[1] - height) // (height - overlap[1]) + 1 + return n_patches_x * n_patches_y + + +@pytest.fixture +def root(assets_path: str) -> str: + """Fixture returning the root path to the test dataset assets.""" + return os.path.join(assets_path, "vision/datasets/wsi") From 1211e5a17d6b9200228ba01bcbc79df965089260 Mon Sep 17 00:00:00 2001 From: roman807 Date: Fri, 10 May 2024 08:16:14 +0200 Subject: [PATCH 14/29] Retrieve MPP from WSIs (#432) * add mpp conversion * formatting * addressed comments * addressed comments * addressed comments * formatting * formatting * formatting --- src/eva/vision/data/wsi/backends/openslide.py | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/src/eva/vision/data/wsi/backends/openslide.py b/src/eva/vision/data/wsi/backends/openslide.py index 4173b8cf..e5f141d1 100644 --- a/src/eva/vision/data/wsi/backends/openslide.py +++ b/src/eva/vision/data/wsi/backends/openslide.py @@ -32,8 +32,26 @@ def level_downsamples(self) -> Sequence[float]: @override def mpp(self) -> float: # TODO: add overwrite_mpp class attribute to allow setting a default value - x_mpp = float(self._wsi.properties["openslide.mpp-x"]) - y_mpp = float(self._wsi.properties["openslide.mpp-y"]) + if self._wsi.properties.get(openslide.PROPERTY_NAME_MPP_X) and self._wsi.properties.get( + openslide.PROPERTY_NAME_MPP_Y + ): + x_mpp = float(self._wsi.properties[openslide.PROPERTY_NAME_MPP_X]) + y_mpp = float(self._wsi.properties[openslide.PROPERTY_NAME_MPP_Y]) + elif ( + self._wsi.properties.get("tiff.XResolution") + and self._wsi.properties.get("tiff.YResolution") + and self._wsi.properties.get("tiff.ResolutionUnit") + ): + unit = self._wsi.properties.get("tiff.ResolutionUnit") + if unit not in _conversion_factor_to_micrometer: + raise ValueError(f"Unit {unit} not supported.") + + conversion_factor = float(_conversion_factor_to_micrometer.get(unit)) # type: ignore + x_mpp = conversion_factor / float(self._wsi.properties["tiff.XResolution"]) + y_mpp = conversion_factor / float(self._wsi.properties["tiff.YResolution"]) + else: + raise ValueError("`mpp` cannot be obtained for this slide.") + return (x_mpp + y_mpp) / 2.0 @override @@ -58,3 +76,15 @@ def read_region( data[data[:, :, 3] == 0] = 255 return data[:, :, :3] + + +_conversion_factor_to_micrometer = { + "meter": 10**6, + "decimeter": 10**5, + "centimeter": 10**4, + "millimeter": 10**3, + "micrometer": 1, + "nanometer": 10**-3, + "picometer": 10**-6, + "femtometer": 10**-9, +} From 7fb4445c554c8e706f7415f3d9c100b2ff4a2b19 Mon Sep 17 00:00:00 2001 From: roman807 Date: Mon, 13 May 2024 13:04:38 +0200 Subject: [PATCH 15/29] updated panda config (#437) --- configs/vision/dino_vit/offline/panda.yaml | 44 +++++++++++++--------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 97eca774..c33b9e35 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -97,23 +97,32 @@ data: <<: *DATASET_ARGS split: test predict: - class_path: eva.vision.datasets.WsiClassificationDataset - init_args: - root: ${oc.env:DATA_ROOT, ./slide_data}/panda - manifest_file: manifest.csv - sampler: - class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler - init_args: - max_samples: 100 - width: 224 - height: 224 - target_mpp: 0.5 - image_transforms: - class_path: eva.vision.data.transforms.common.ResizeAndCrop - init_args: - size: ${oc.env:RESIZE_DIM, 224} - mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} - std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.WsiClassificationDataset + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:DATA_ROOT, ./slide_data}/panda + manifest_file: manifest.csv + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: 100 + width: 224 + height: 224 + target_mpp: 0.5 + split: train + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.WsiClassificationDataset + init_args: + <<: *PREDICT_DATASET_ARGS + split: val + - class_path: eva.vision.datasets.WsiClassificationDataset + init_args: + <<: *PREDICT_DATASET_ARGS + split: test dataloaders: train: batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} @@ -124,4 +133,3 @@ data: batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} num_workers: 12 #multiprocessing.cpu_count prefetch_factor: 2 - From e3e5671eb24a2d134d9efdeb5e01891a921547d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Tue, 21 May 2024 08:48:24 +0200 Subject: [PATCH 16/29] update WSI foreground segmentation algorithm (#456) --- src/eva/vision/data/wsi/__init__.py | 12 ++- src/eva/vision/data/wsi/patching/mask.py | 41 +++++++-- .../eva/vision/data/wsi/patching/test_mask.py | 91 +++++++++++++++++++ 3 files changed, 135 insertions(+), 9 deletions(-) create mode 100644 tests/eva/vision/data/wsi/patching/test_mask.py diff --git a/src/eva/vision/data/wsi/__init__.py b/src/eva/vision/data/wsi/__init__.py index 24c9aa4c..116fec74 100644 --- a/src/eva/vision/data/wsi/__init__.py +++ b/src/eva/vision/data/wsi/__init__.py @@ -2,5 +2,15 @@ from eva.vision.data.wsi.backends import Wsi, get_cached_wsi, wsi_backend from eva.vision.data.wsi.patching.coordinates import PatchCoordinates, get_cached_coords +from eva.vision.data.wsi.patching.mask import Mask, get_mask, get_mask_level -__all__ = ["Wsi", "PatchCoordinates", "get_cached_coords", "wsi_backend", "get_cached_wsi"] +__all__ = [ + "Wsi", + "PatchCoordinates", + "Mask", + "get_cached_coords", + "wsi_backend", + "get_cached_wsi", + "get_mask", + "get_mask_level", +] diff --git a/src/eva/vision/data/wsi/patching/mask.py b/src/eva/vision/data/wsi/patching/mask.py index 3dc1d9bb..d73a0d59 100644 --- a/src/eva/vision/data/wsi/patching/mask.py +++ b/src/eva/vision/data/wsi/patching/mask.py @@ -26,31 +26,56 @@ class Mask: def get_mask( wsi: Wsi, mask_level_idx: int, - kernel_size: Tuple[int, int] = (7, 7), - gray_threshold: int = 220, + saturation_threshold: int = 20, + median_blur_threshold: int | None = 7, fill_holes: bool = False, + kernel_size: Tuple[int, int] = (7, 7), + use_otsu: bool = False, ) -> Mask: - """Extracts a binary mask from an image. + """Generates a binary foreground mask for a given WSI. + + The is a simplified version of the algorithm proposed in [1] (CLAM): + 1. Convert the image to the HSV color space (easier to seperate specific colors with RGB). + 2. (optional) Apply a median blur to the saturation channel to reduce noise + & closing small gaps in the mask. While yields cleaner masks, this step is the most + computationally expensive. + 3. Calculate binary mask by thresholding accross the saturation channel. + + [1] Lu, Ming Y., et al. "Data-efficient and weakly supervised computational + pathology on whole-slide images." Nature biomedical engineering 5.6 (2021): 555-570. + https://github.com/mahmoodlab/CLAM Args: wsi: The WSI object. mask_level_idx: The level index of the WSI at which we want to extract the mask. - kernel_size: The size of the kernel for morphological operations. - gray_threshold: The threshold for the gray scale image. + saturation_threshold: The threshold value for the saturation channel. + median_blur_threshold: The threshold value for the median blur operation. + kernel_size: The size of the kernel for morphological operations to fill holes. fill_holes: Whether to fill holes in the mask. + use_otsu: Whether to use Otsu's method for the thresholding operation. If False, + a fixed threshold value is used. + + Returns: A Mask object instance. """ image = wsi.read_region((0, 0), mask_level_idx, wsi.level_dimensions[mask_level_idx]) + image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + image = ( + cv2.medianBlur(image[:, :, 1], median_blur_threshold) + if median_blur_threshold + else image[:, :, 1] + ) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) - gray = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), dtype=np.uint8) - mask_array = np.where(gray < gray_threshold, 1, 0).astype(np.uint8) + threshold_type = cv2.THRESH_BINARY + cv2.THRESH_OTSU if use_otsu else cv2.THRESH_BINARY + _, mask_array = cv2.threshold(image, saturation_threshold, 1, threshold_type) if fill_holes: + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) mask_array = cv2.dilate(mask_array, kernel, iterations=1) contour, _ = cv2.findContours(mask_array, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) for cnt in contour: cv2.drawContours(mask_array, [cnt], 0, (1,), -1) + mask_array = mask_array.astype(np.uint8) scale_factors = ( wsi.level_dimensions[0][0] / wsi.level_dimensions[mask_level_idx][0], wsi.level_dimensions[0][1] / wsi.level_dimensions[mask_level_idx][1], diff --git a/tests/eva/vision/data/wsi/patching/test_mask.py b/tests/eva/vision/data/wsi/patching/test_mask.py new file mode 100644 index 00000000..072f81b6 --- /dev/null +++ b/tests/eva/vision/data/wsi/patching/test_mask.py @@ -0,0 +1,91 @@ +"""WSI foreground mask tests.""" + +import os + +import numpy as np +import pytest + +from eva.vision.data import wsi as eva_wsi + +DEFAULT_ARGS = { + "saturation_threshold": 20, + "median_blur_threshold": 7, + "fill_holes": False, + "use_otsu": False, + "kernel_size": (7, 7), +} + + +@pytest.mark.parametrize( + "mask_level_idx, mask_args", + [ + (0, DEFAULT_ARGS), + (1, DEFAULT_ARGS), + (0, DEFAULT_ARGS | {"median_blur_threshold": None}), + (0, DEFAULT_ARGS | {"fill_holes": True}), + (0, DEFAULT_ARGS | {"use_otsu": True}), + (0, DEFAULT_ARGS | {"fill_holes": True, "use_otsu": True}), + ], +) +def test_get_mask(wsi: eva_wsi.Wsi, mask_level_idx: int, mask_args: dict): + """Tests the foreground mask generation with different configurations.""" + mask = eva_wsi.get_mask(wsi, mask_level_idx=0, **mask_args) + + assert isinstance(mask, eva_wsi.Mask) + assert isinstance(mask.mask_array, np.ndarray) + assert mask.mask_array.shape == wsi.level_dimensions[mask.mask_level_idx] + assert np.all(np.isin(mask.mask_array, [0, 1])) + + if mask.mask_level_idx == 0: + assert mask.scale_factors == (1.0, 1.0) + elif mask_level_idx == 1: + assert mask.scale_factors == (0.5, 0.5) + + +@pytest.mark.parametrize( + "width, height, target_mpp, expected_level", + [ + (4, 4, 0.25, 0), + (16, 16, 0.05, 0), + (4, 4, 0.5, 1), + ], +) +def test_get_mask_level( + wsi: eva_wsi.Wsi, width: int, height: int, target_mpp: float, expected_level: int +): + """Tests the selection of the mask level based on the patch dimensions.""" + level = eva_wsi.get_mask_level(wsi, width, height, target_mpp) + assert level == expected_level + + +@pytest.mark.parametrize( + "width, height, target_mpp", + [ + (4, 4, 0.1), + (16, 16, 0.01), + (2, 2, 0.25), + ], +) +def test_no_suitable_level_available(wsi: eva_wsi.Wsi, width: int, height: int, target_mpp: float): + """Tests the case where no suitable mask level is available. + + This can happen for instance when the patch dimensions scaled to the selected mask level + are too small or even collapse to zero pixels. + """ + with pytest.raises( + ValueError, match="No level with the specified minimum number of patch pixels available." + ): + eva_wsi.get_mask_level(wsi, width, height, target_mpp) + + +@pytest.fixture +def wsi(assets_path: str) -> eva_wsi.Wsi: + """Fixture for loading a WSI object. + + The test WSI slide has the following specs: + - level_dimensions: ((256, 256), (128, 128)) + - level_downsamples: (1.0, 2.0) + - mpp (level 0): 0.25 + """ + path = os.path.join(assets_path, "vision/datasets/wsi/0/a.tiff") + return eva_wsi.wsi_backend("openslide")(path) From b3f0f432c68f53fceda2b1ebcafa52e8ac53ec90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 22 May 2024 10:08:42 +0200 Subject: [PATCH 17/29] Add `PANDA` dataset class (#430) --- configs/vision/dino_vit/offline/panda.yaml | 16 +- docs/datasets/index.md | 7 + docs/datasets/panda.md | 68 +++++++ mkdocs.yml | 13 +- src/eva/core/data/splitting/__init__.py | 5 + src/eva/core/data/splitting/stratified.py | 56 ++++++ src/eva/vision/data/datasets/__init__.py | 2 + src/eva/vision/data/datasets/_validators.py | 4 +- .../data/datasets/classification/__init__.py | 2 + .../data/datasets/classification/panda.py | 188 ++++++++++++++++++ src/eva/vision/data/datasets/wsi.py | 2 +- src/eva/vision/data/wsi/patching/mask.py | 18 +- .../0214df71ae527e2144021178c453d204.tiff | 3 + .../02d302a8d723fa00331f373091b29135.tiff | 3 + .../157565e23ba28d5a42f63f34f3dd4425.tiff | 3 + .../682a1fd346b6fff340afbdb80c2f7caf.tiff | 3 + .../8582b59b41635fa38401d1bddad66707.tiff | 3 + .../8c357871e57c5c60277230412f2d9028.tiff | 3 + .../979cf5a2fa4079eaf74343d6ff5e1b51.tiff | 3 + .../9dd40c0127d217bc4917e4db40e06e94.tiff | 3 + .../9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff | 3 + .../a04310d441e8d2c7a5066627baeec9b6.tiff | 3 + .../fb8886059879eaac70139336cb525838.tiff | 3 + .../panda/train_with_noisy_labels.csv | 3 + tests/eva/core/data/splitting/__init__.py | 1 + .../core/data/splitting/test_stratified.py | 70 +++++++ .../datasets/classification/test_panda.py | 104 ++++++++++ .../data/datasets/classification/test_wsi.py | 2 +- .../eva/vision/data/wsi/patching/test_mask.py | 6 +- 29 files changed, 570 insertions(+), 30 deletions(-) create mode 100644 docs/datasets/panda.md create mode 100644 src/eva/core/data/splitting/__init__.py create mode 100644 src/eva/core/data/splitting/stratified.py create mode 100644 src/eva/vision/data/datasets/classification/panda.py create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff create mode 100644 tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv create mode 100644 tests/eva/core/data/splitting/__init__.py create mode 100644 tests/eva/core/data/splitting/test_stratified.py create mode 100644 tests/eva/vision/data/datasets/classification/test_panda.py diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index c33b9e35..8977a7f1 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -84,8 +84,7 @@ data: embeddings_transforms: class_path: eva.core.data.transforms.Pad2DTensor init_args: - pad_size: 100 - pad_value: 0 + pad_size: &N_PATCHES 1000 val: class_path: eva.datasets.MultiEmbeddingsClassificationDataset init_args: @@ -97,14 +96,13 @@ data: <<: *DATASET_ARGS split: test predict: - - class_path: eva.vision.datasets.WsiClassificationDataset + - class_path: eva.vision.datasets.PANDA init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./slide_data}/panda - manifest_file: manifest.csv + root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: - max_samples: 100 + max_samples: *N_PATCHES width: 224 height: 224 target_mpp: 0.5 @@ -112,14 +110,14 @@ data: image_transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: - size: ${oc.env:RESIZE_DIM, 224} + size: ${oc.env:RESIZE_DIM, 224} mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - - class_path: eva.vision.datasets.WsiClassificationDataset + - class_path: eva.vision.datasets.PANDA init_args: <<: *PREDICT_DATASET_ARGS split: val - - class_path: eva.vision.datasets.WsiClassificationDataset + - class_path: eva.vision.datasets.PANDA init_args: <<: *PREDICT_DATASET_ARGS split: test diff --git a/docs/datasets/index.md b/docs/datasets/index.md index 963de114..120978bb 100644 --- a/docs/datasets/index.md +++ b/docs/datasets/index.md @@ -6,6 +6,13 @@ ### Whole Slide (WSI) and microscopy image datasets +#### Slide-level +| Dataset | #Slides | Slide Size | Magnification (μm/px) | Task | Cancer Type | +|------------------------------------|----------|------------|------------------------|----------------------------|------------------| +| [PANDA](panda.md) | 3,152 | ~20k x 20k x 3 | 20x (0.5) | Classification (6 classes) | Prostate | + + +#### Patch-level | Dataset | #Patches | Patch Size | Magnification (μm/px) | Task | Cancer Type | |------------------------------------|----------|------------|------------------------|----------------------------|------------------| | [BACH](bach.md) | 400 | 2048x1536 | 20x (0.5) | Classification (4 classes) | Breast | diff --git a/docs/datasets/panda.md b/docs/datasets/panda.md new file mode 100644 index 00000000..16e7da8d --- /dev/null +++ b/docs/datasets/panda.md @@ -0,0 +1,68 @@ +# PANDA (Prostate cANcer graDe Assessment) + +The PANDA datasets consists of 10616 whole-slide images of digitized H&E-stained prostate tissue biopsies originating from two medical centers. After the biopsy, the slides were classified into Gleason patterns (3, 4 or 5) based on the architectural growth patterns of the tumor, which are then converted into an ISUP grade on a 0-5 scale. + +The Gleason grading system is the most important prognostic marker for prostate cancer and the ISUP grade has a crucial role when deciding how a patient should be treated. However, the system suffers from significant inter-observer variability between pathologists, leading to imperfect and noisy labels. + +Source: https://www.kaggle.com/competitions/prostate-cancer-grade-assessment + + +## Raw data + +### Key stats + +| | | +|---------------------------|----------------------------------------------------------| +| **Modality** | Vision (WSI) | +| **Task** | Multiclass classification (6 classes) | +| **Cancer type** | Prostate | +| **Data size** | 347 GB | +| **Image dimension** | ~20k x 20k x 3 | +| **Magnification (μm/px)** | 20x (0.5) - Level 0 | +| **Files format** | `.tiff` | +| **Number of images** | 10616 (9555 after removing noisy labels) | + + +### Organization + +The data `prostate-cancer-grade-assessment.zip` from [kaggle](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment/data) is organized as follows: + +``` +prostate-cancer-grade-assessment +├── train_images +│ ├── 0005f7aaab2800f6170c399693a96917.tiff +│ └── ... +├── train_label_masks (not used in eva) +│ ├── 0005f7aaab2800f6170c399693a96917_mask.tiff +│ └── ... +├── train.csv (contains Gleason & ISUP labels) +├── test.csv +├── sample_submission.csv +``` + +## Download and preprocessing + +The `PANDA` dataset class doesn't download the data during runtime and must be downloaded manually from [kaggle](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment/data). + +As done in other studies1 we exclude ~10% of the samples with noisy labels according to kaggle's [6th place solution](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment/discussion/169230) resulting in a total dataset size of 9555 WSIs. + +We then generate random stratified train / validation and test splits using a 0.7 / 0.15 / 0.15 ratio: + + +| Splits | Train | Validation | Test | +|----------|-------------|-------------|------------| +| #Samples | 6686 (70%) | 1430 (15%) | 1439 (15%) | + + +## Relevant links + +* [Kaggle Challenge](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) +* [Noisy Labels](https://github.com/analokmaus/kaggle-panda-challenge-public) + + +## License + +[CC BY-SA-NC 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/deed.en) + +## References +1 : [A General-Purpose Self-Supervised Model for Computational Pathology](https://arxiv.org/abs/2308.15474) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index a1642ff8..4acc4511 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -78,11 +78,14 @@ nav: - user-guide/advanced/model_wrappers.md - Datasets: - datasets/index.md - - WSI-patches: - - BACH: datasets/bach.md - - CRC: datasets/crc.md - - MHIST: datasets/mhist.md - - PatchCamelyon: datasets/patch_camelyon.md + - WSI: + - Slide-level: + - PANDA: datasets/panda.md + - Patch-level: + - BACH: datasets/bach.md + - CRC: datasets/crc.md + - MHIST: datasets/mhist.md + - PatchCamelyon: datasets/patch_camelyon.md - Radiology: - TotalSegmentator: datasets/total_segmentator.md - Reference API: diff --git a/src/eva/core/data/splitting/__init__.py b/src/eva/core/data/splitting/__init__.py new file mode 100644 index 00000000..5faeccd6 --- /dev/null +++ b/src/eva/core/data/splitting/__init__.py @@ -0,0 +1,5 @@ +"""Dataset splitting API.""" + +from eva.core.data.splitting.stratified import stratified_split + +__all__ = ["stratified_split"] diff --git a/src/eva/core/data/splitting/stratified.py b/src/eva/core/data/splitting/stratified.py new file mode 100644 index 00000000..ad9377a7 --- /dev/null +++ b/src/eva/core/data/splitting/stratified.py @@ -0,0 +1,56 @@ +"""Functions for stratified splitting.""" + +from typing import Any, List, Sequence, Tuple + +import numpy as np + + +def stratified_split( + samples: Sequence[Any], + targets: Sequence[Any], + train_ratio: float, + val_ratio: float, + test_ratio: float = 0.0, + seed: int = 42, +) -> Tuple[List[int], List[int], List[int] | None]: + """Splits the samples into stratified train, validation, and test (optional) sets. + + Args: + samples: The samples to split. + targets: The corresponding targets used for stratification. + train_ratio: The ratio of the training set. + val_ratio: The ratio of the validation set. + test_ratio: The ratio of the test set (optional). + seed: The seed for reproducibility. + + Returns: + The indices of the train, validation, and test sets. + """ + if len(samples) != len(targets): + raise ValueError("The number of samples and targets must be equal.") + if train_ratio + val_ratio + (test_ratio or 0) != 1: + raise ValueError("The sum of the ratios must be equal to 1.") + + np.random.seed(seed) + unique_classes, y_indices = np.unique(targets, return_inverse=True) + n_classes = unique_classes.shape[0] + + train_indices, val_indices, test_indices = [], [], [] + + for c in range(n_classes): + class_indices = np.where(y_indices == c)[0] + np.random.shuffle(class_indices) + + n_train = int(np.floor(train_ratio * len(class_indices))) or 1 + n_val = ( + len(class_indices) - n_train + if test_ratio == 0.0 + else int(np.floor(val_ratio * len(class_indices))) or 1 + ) + + train_indices.extend(class_indices[:n_train]) + val_indices.extend(class_indices[n_train : n_train + n_val]) + if test_ratio > 0.0: + test_indices.extend(class_indices[n_train + n_val :]) + + return train_indices, val_indices, test_indices or None diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index d9705124..599aaca8 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -4,6 +4,7 @@ BACH, CRC, MHIST, + PANDA, PatchCamelyon, TotalSegmentatorClassification, WsiClassificationDataset, @@ -18,6 +19,7 @@ "MHIST", "ImageSegmentation", "PatchCamelyon", + "PANDA", "TotalSegmentatorClassification", "TotalSegmentator2D", "VisionDataset", diff --git a/src/eva/vision/data/datasets/_validators.py b/src/eva/vision/data/datasets/_validators.py index 9989bc45..b6c17fe5 100644 --- a/src/eva/vision/data/datasets/_validators.py +++ b/src/eva/vision/data/datasets/_validators.py @@ -13,7 +13,7 @@ def check_dataset_integrity( dataset: vision.VisionDataset, *, - length: int, + length: int | None, n_classes: int, first_and_last_labels: Tuple[str, str], ) -> None: @@ -23,7 +23,7 @@ def check_dataset_integrity( ValueError: If the input dataset's values do not match the expected ones. """ - if len(dataset) != length: + if length and len(dataset) != length: raise ValueError( f"Dataset's '{dataset.__class__.__qualname__}' length " f"({len(dataset)}) does not match the expected one ({length}). " diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index ca74c25a..265c9740 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -3,6 +3,7 @@ from eva.vision.data.datasets.classification.bach import BACH from eva.vision.data.datasets.classification.crc import CRC from eva.vision.data.datasets.classification.mhist import MHIST +from eva.vision.data.datasets.classification.panda import PANDA from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon from eva.vision.data.datasets.classification.total_segmentator import TotalSegmentatorClassification from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset @@ -14,4 +15,5 @@ "PatchCamelyon", "TotalSegmentatorClassification", "WsiClassificationDataset", + "PANDA", ] diff --git a/src/eva/vision/data/datasets/classification/panda.py b/src/eva/vision/data/datasets/classification/panda.py new file mode 100644 index 00000000..faef1734 --- /dev/null +++ b/src/eva/vision/data/datasets/classification/panda.py @@ -0,0 +1,188 @@ +"""PANDA dataset class.""" + +import functools +import glob +import os +from typing import Any, Callable, Dict, List, Literal, Tuple + +import numpy as np +import pandas as pd +import torch +from torchvision.datasets import utils +from typing_extensions import override + +from eva.core.data import splitting +from eva.vision.data.datasets import _validators, structs, wsi +from eva.vision.data.datasets.classification import base +from eva.vision.data.wsi.patching import samplers + + +class PANDA(wsi.MultiWsiDataset, base.ImageClassification): + """Dataset class for PANDA images and corresponding targets.""" + + _train_split_ratio: float = 0.7 + """Train split ratio.""" + + _val_split_ratio: float = 0.15 + """Validation split ratio.""" + + _test_split_ratio: float = 0.15 + """Test split ratio.""" + + _resources: List[structs.DownloadResource] = [ + structs.DownloadResource( + filename="train_with_noisy_labels.csv", + url="https://raw.githubusercontent.com/analokmaus/kaggle-panda-challenge-public/master/train.csv", + md5="5e4bfc78bda9603d2e2faf3ed4b21dfa", + ) + ] + """Download resources.""" + + def __init__( + self, + root: str, + sampler: samplers.Sampler, + split: Literal["train", "val", "test"] | None = None, + width: int = 224, + height: int = 224, + target_mpp: float = 0.5, + backend: str = "openslide", + image_transforms: Callable | None = None, + seed: int = 42, + ) -> None: + """Initializes the dataset. + + Args: + root: Root directory of the dataset. + sampler: The sampler to use for sampling patch coordinates. + split: Dataset split to use. If `None`, the entire dataset is used. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + backend: The backend to use for reading the whole-slide images. + image_transforms: Transforms to apply to the extracted image patches. + seed: Random seed for reproducibility. + """ + self._split = split + self._root = root + self._width = width + self._height = height + self._target_mpp = target_mpp + self._seed = seed + + self._download_resources() + + wsi.MultiWsiDataset.__init__( + self, + root=root, + file_paths=self._load_file_paths(split), + width=width, + height=height, + sampler=sampler, + target_mpp=target_mpp, + backend=backend, + image_transforms=image_transforms, + ) + + @property + @override + def classes(self) -> List[str]: + return ["0", "1", "2", "3", "4", "5"] + + @functools.cached_property + def annotations(self) -> pd.DataFrame: + """Loads the dataset labels.""" + path = os.path.join(self._root, "train_with_noisy_labels.csv") + return pd.read_csv(path, index_col="image_id") + + @override + def prepare_data(self) -> None: + _validators.check_dataset_exists(self._root, False) + + if not os.path.isdir(os.path.join(self._root, "train_images")): + raise FileNotFoundError("'train_images' directory not found in the root folder.") + if not os.path.isfile(os.path.join(self._root, "train_with_noisy_labels.csv")): + raise FileNotFoundError("'train.csv' file not found in the root folder.") + + def _download_resources(self) -> None: + """Downloads the dataset resources.""" + for resource in self._resources: + utils.download_url(resource.url, self._root, resource.filename, resource.md5) + + @override + def validate(self) -> None: + _validators.check_dataset_integrity( + self, + length=None, + n_classes=6, + first_and_last_labels=("0", "5"), + ) + + @override + def filename(self, index: int) -> str: + return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) + + @override + def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + return base.ImageClassification.__getitem__(self, index) + + @override + def load_image(self, index: int) -> torch.Tensor: + return wsi.MultiWsiDataset.__getitem__(self, index) + + @override + def load_target(self, index: int) -> np.ndarray: + file_path = self._file_paths[self._get_dataset_idx(index)] + return np.asarray(self._get_target_from_path(file_path)) + + @override + def load_metadata(self, index: int) -> Dict[str, Any]: + return {"wsi_id": self.filename(index).split(".")[0]} + + def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]: + """Loads the file paths of the corresponding dataset split.""" + image_dir = os.path.join(self._root, "train_images") + file_paths = sorted(glob.glob(os.path.join(image_dir, "*.tiff"))) + if len(file_paths) != len(self.annotations): + raise ValueError( + f"Expected {len(self.annotations)} images, found {len(file_paths)} in {image_dir}." + ) + file_paths = self._filter_noisy_labels(file_paths) + targets = [self._get_target_from_path(file_path) for file_path in file_paths] + + train_indices, val_indices, test_indices = splitting.stratified_split( + samples=file_paths, + targets=targets, + train_ratio=self._train_split_ratio, + val_ratio=self._val_split_ratio, + test_ratio=self._test_split_ratio, + seed=self._seed, + ) + + match split: + case "train": + return [file_paths[i] for i in train_indices] + case "val": + return [file_paths[i] for i in val_indices] + case "test": + return [file_paths[i] for i in test_indices or []] + case None: + return file_paths + case _: + raise ValueError("Invalid split. Use 'train', 'val' or `None`.") + + def _filter_noisy_labels(self, file_paths: List[str]): + is_noisy_filter = self.annotations["noise_ratio_10"] == 0 + non_noisy_image_ids = set(self.annotations.loc[~is_noisy_filter].index) + filtered_file_paths = [ + file_path + for file_path in file_paths + if self._get_id_from_path(file_path) in non_noisy_image_ids + ] + return filtered_file_paths + + def _get_target_from_path(self, file_path: str) -> int: + return self.annotations.loc[self._get_id_from_path(file_path), "isup_grade"] + + def _get_id_from_path(self, file_path: str) -> str: + return os.path.basename(file_path).replace(".tiff", "") diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index 8c5817f1..f90df016 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -125,7 +125,7 @@ def __init__( super().__init__(self._load_datasets()) def _load_datasets(self) -> list[WsiDataset]: - logger.info(f"Initializing {len(self._file_paths)} WSI datasets ...") + logger.info(f"Initializing dataset with {len(self._file_paths)} WSIs ...") wsi_datasets = [] for file_path in self._file_paths: file_path = os.path.join(self._root, file_path) if self._root else file_path diff --git a/src/eva/vision/data/wsi/patching/mask.py b/src/eva/vision/data/wsi/patching/mask.py index d73a0d59..2a69425b 100644 --- a/src/eva/vision/data/wsi/patching/mask.py +++ b/src/eva/vision/data/wsi/patching/mask.py @@ -27,9 +27,9 @@ def get_mask( wsi: Wsi, mask_level_idx: int, saturation_threshold: int = 20, - median_blur_threshold: int | None = 7, + median_blur_kernel_size: int | None = None, fill_holes: bool = False, - kernel_size: Tuple[int, int] = (7, 7), + holes_kernel_size: Tuple[int, int] = (7, 7), use_otsu: bool = False, ) -> Mask: """Generates a binary foreground mask for a given WSI. @@ -37,8 +37,8 @@ def get_mask( The is a simplified version of the algorithm proposed in [1] (CLAM): 1. Convert the image to the HSV color space (easier to seperate specific colors with RGB). 2. (optional) Apply a median blur to the saturation channel to reduce noise - & closing small gaps in the mask. While yields cleaner masks, this step is the most - computationally expensive. + & closing small gaps in the mask. While this yields cleaner masks, this step is the most + computationally expensive and thus disabled by default (CLAM uses a value of 7). 3. Calculate binary mask by thresholding accross the saturation channel. [1] Lu, Ming Y., et al. "Data-efficient and weakly supervised computational @@ -49,8 +49,8 @@ def get_mask( wsi: The WSI object. mask_level_idx: The level index of the WSI at which we want to extract the mask. saturation_threshold: The threshold value for the saturation channel. - median_blur_threshold: The threshold value for the median blur operation. - kernel_size: The size of the kernel for morphological operations to fill holes. + median_blur_kernel_size: Kernel size for the median blur operation. + holes_kernel_size: The size of the kernel for morphological operations to fill holes. fill_holes: Whether to fill holes in the mask. use_otsu: Whether to use Otsu's method for the thresholding operation. If False, a fixed threshold value is used. @@ -60,8 +60,8 @@ def get_mask( image = wsi.read_region((0, 0), mask_level_idx, wsi.level_dimensions[mask_level_idx]) image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) image = ( - cv2.medianBlur(image[:, :, 1], median_blur_threshold) - if median_blur_threshold + cv2.medianBlur(image[:, :, 1], median_blur_kernel_size) + if median_blur_kernel_size else image[:, :, 1] ) @@ -69,7 +69,7 @@ def get_mask( _, mask_array = cv2.threshold(image, saturation_threshold, 1, threshold_type) if fill_holes: - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, holes_kernel_size) mask_array = cv2.dilate(mask_array, kernel, iterations=1) contour, _ = cv2.findContours(mask_array, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) for cnt in contour: diff --git a/tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff b/tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff b/tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff b/tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff b/tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff b/tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff b/tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff b/tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff b/tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff b/tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff b/tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff b/tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv b/tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv new file mode 100644 index 00000000..db3d8230 --- /dev/null +++ b/tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd9cec7cd6b94b2cb845ab5093659dd127d1e31ad2b94a8f97effd9c0184bfff +size 465 diff --git a/tests/eva/core/data/splitting/__init__.py b/tests/eva/core/data/splitting/__init__.py new file mode 100644 index 00000000..18a90221 --- /dev/null +++ b/tests/eva/core/data/splitting/__init__.py @@ -0,0 +1 @@ +"""Tests core splitting module.""" diff --git a/tests/eva/core/data/splitting/test_stratified.py b/tests/eva/core/data/splitting/test_stratified.py new file mode 100644 index 00000000..2b65ccd8 --- /dev/null +++ b/tests/eva/core/data/splitting/test_stratified.py @@ -0,0 +1,70 @@ +"""Tests for the stratified split function.""" + +import pytest + +from eva.core.data import splitting + + +@pytest.mark.parametrize( + "targets, train_ratio, val_ratio, test_ratio", + [ + ([0] * 50 + [1] * 50, 0.8, 0.2, 0.0), + ([0] * 50 + [1] * 50, 0.7, 0.15, 0.15), + ([0] * 30 + [1] * 70, 0.8, 0.2, 0.0), + ([0] * 30 + [1] * 70, 0.7, 0.15, 0.15), + ], +) +def test_stratification( + targets: list[int], train_ratio: float, val_ratio: float, test_ratio: float +): + """Tests if the stratified split maintains the class proportions.""" + samples = list(range(len(targets))) + train_indices, val_indices, test_indices = splitting.stratified_split( + samples, targets, train_ratio, val_ratio, test_ratio + ) + train_classes = [targets[i] for i in train_indices] + val_classes = [targets[i] for i in val_indices] + + for c in set(targets): + expected_train_proportion = train_ratio * targets.count(c) + expected_val_proportion = val_ratio * targets.count(c) + assert train_classes.count(c) == pytest.approx(expected_train_proportion, abs=1) + assert val_classes.count(c) == pytest.approx(expected_val_proportion, abs=1) + + assert len(train_indices) + len(val_indices) + len(test_indices or []) == len(samples) + + +@pytest.mark.parametrize("train_ratio, val_ratio, test_ratio", [(0.6, 0.3, 0.0), (0.6, 0.4, 0.3)]) +def test_invalid_ratio_sums(train_ratio: float, val_ratio: float, test_ratio: float): + """Tests if the function raises an error when the ratios do not sum to 1.""" + samples = list(range(100)) + targets = [0] * 50 + [1] * 50 + expected_error = "The sum of the ratios must be equal to 1." + with pytest.raises(ValueError, match=expected_error): + splitting.stratified_split(samples, targets, train_ratio, val_ratio, test_ratio) + + +@pytest.mark.parametrize("seed1, seed2", [(42, 43), (123, 124), (999, 1000)]) +def test_different_seeds_produce_different_outputs(seed1, seed2): + """Tests if different seeds produce different train, validation, and test indices.""" + samples = list(range(100)) + targets = [0] * 50 + [1] * 50 + train1, val1, test1 = splitting.stratified_split(samples, targets, 0.6, 0.2, 0.2, seed=seed1) + train2, val2, test2 = splitting.stratified_split(samples, targets, 0.6, 0.2, 0.2, seed=seed2) + + assert train1 != train2, "Different seeds should produce different train indices" + assert val1 != val2, "Different seeds should produce different validation indices" + assert test1 != test2, "Different seeds should produce different test indices" + + +@pytest.mark.parametrize("seed", [42, 123, 999]) +def test_same_seed_produces_same_outputs(seed): + """Tests if the same seed produces the same train, validation, and test indices.""" + samples = list(range(100)) + targets = [0] * 50 + [1] * 50 + train1, val1, test1 = splitting.stratified_split(samples, targets, 0.6, 0.2, 0.2, seed=seed) + train2, val2, test2 = splitting.stratified_split(samples, targets, 0.6, 0.2, 0.2, seed=seed) + + assert train1 == train2, "Same seed should produce the same train indices" + assert val1 == val2, "Same seed should produce the same validation indices" + assert test1 == test2, "Same seed should produce the same test indices" diff --git a/tests/eva/vision/data/datasets/classification/test_panda.py b/tests/eva/vision/data/datasets/classification/test_panda.py new file mode 100644 index 00000000..72a5b7ff --- /dev/null +++ b/tests/eva/vision/data/datasets/classification/test_panda.py @@ -0,0 +1,104 @@ +"""PANDA dataset tests.""" + +import os +from typing import Any, Literal +from unittest.mock import patch + +import numpy as np +import pytest +import torch +import torchvision.transforms.v2 as torch_transforms + +from eva.vision.data import datasets +from eva.vision.data import transforms as eva_transforms +from eva.vision.data.wsi.patching import samplers + +TARGET_SIZE = 224 +DEFAULT_ARGS = { + "width": 16, + "height": 16, + "target_mpp": 0.5, + "sampler": samplers.GridSampler(), + "backend": "openslide", + "image_transforms": torch_transforms.Compose([eva_transforms.ResizeAndCrop(size=TARGET_SIZE)]), +} + + +def test_split_and_expected_shapes(root: str): + """Test loading the dataset with different splits.""" + train_dataset = datasets.PANDA(root=root, split="train", **DEFAULT_ARGS) + val_dataset = datasets.PANDA(root=root, split="val", **DEFAULT_ARGS) + test_dataset = datasets.PANDA(root=root, split="test", **DEFAULT_ARGS) + + assert len(train_dataset.datasets) == 6 + assert len(val_dataset.datasets) == 2 + assert len(test_dataset.datasets) == 2 + + assert len(train_dataset) == 384 + assert len(val_dataset) == 128 + assert len(test_dataset) == 128 + + _check_batch_shape(train_dataset[0]) + _check_batch_shape(val_dataset[0]) + _check_batch_shape(test_dataset[0]) + + +@pytest.mark.parametrize("split", ["train", "val", "test", None]) +def test_filenames(root: str, split: Literal["train", "val", "test"]): + """Tests that the number of filenames matches the dataset size.""" + dataset = datasets.PANDA(root=root, split=split, **DEFAULT_ARGS) + + filenames = set() + for i in range(len(dataset)): + filenames.add(dataset.filename(i)) + + assert len(filenames) == len(dataset.datasets) + + +def test_same_split_same_seed(root: str): + """Test that the generated split is deterministic when using the same seed.""" + dataset1 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) + dataset2 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) + + assert len(dataset1) == len(dataset2) + assert dataset1._file_paths == dataset2._file_paths + + for i in range(len(dataset1)): + assert np.allclose(dataset1[i][1], dataset2[i][1]) + + +def test_different_seed_different_split(root: str): + """Test that the generated split is different when using a different seed.""" + dataset1 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) + dataset2 = datasets.PANDA(root=root, split="train", seed=43, **DEFAULT_ARGS) + + assert len(dataset1) == len(dataset2) + assert dataset1._file_paths != dataset2._file_paths + + +def _check_batch_shape(batch: Any): + assert isinstance(batch, tuple) + assert len(batch) == 3 + + image, target, metadata = batch + assert isinstance(image, torch.Tensor) + assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) + + assert isinstance(target, np.ndarray) + assert target.size == 1 + + assert isinstance(metadata, dict) + assert "wsi_id" in metadata + + +@pytest.fixture +def root(assets_path: str) -> str: + """Fixture returning the root directory of the dataset.""" + return os.path.join(assets_path, "vision/datasets/panda") + + +@pytest.fixture(autouse=True) +def mock_download(): + """Mocks the download function to avoid downloading resources when running tests.""" + with patch.object(datasets.PANDA, "_download_resources", return_value=None): + yield diff --git a/tests/eva/vision/data/datasets/classification/test_wsi.py b/tests/eva/vision/data/datasets/classification/test_wsi.py index 6aa151f5..ca020c2b 100644 --- a/tests/eva/vision/data/datasets/classification/test_wsi.py +++ b/tests/eva/vision/data/datasets/classification/test_wsi.py @@ -20,7 +20,7 @@ "width": 32, "height": 32, "target_mpp": 0.25, - "sampler": samplers.GridSampler(None), + "sampler": samplers.GridSampler(), "backend": "openslide", "image_transforms": torch_transforms.Compose([eva_transforms.ResizeAndCrop(size=TARGET_SIZE)]), } diff --git a/tests/eva/vision/data/wsi/patching/test_mask.py b/tests/eva/vision/data/wsi/patching/test_mask.py index 072f81b6..e63375ea 100644 --- a/tests/eva/vision/data/wsi/patching/test_mask.py +++ b/tests/eva/vision/data/wsi/patching/test_mask.py @@ -9,10 +9,10 @@ DEFAULT_ARGS = { "saturation_threshold": 20, - "median_blur_threshold": 7, + "median_blur_kernel_size": 7, "fill_holes": False, "use_otsu": False, - "kernel_size": (7, 7), + "holes_kernel_size": (7, 7), } @@ -21,7 +21,7 @@ [ (0, DEFAULT_ARGS), (1, DEFAULT_ARGS), - (0, DEFAULT_ARGS | {"median_blur_threshold": None}), + (0, DEFAULT_ARGS | {"median_blur_kernel_size": None}), (0, DEFAULT_ARGS | {"fill_holes": True}), (0, DEFAULT_ARGS | {"use_otsu": True}), (0, DEFAULT_ARGS | {"fill_holes": True, "use_otsu": True}), From 28e69be751b6249372b6ecbc050e1a70a30521fd Mon Sep 17 00:00:00 2001 From: roman807 Date: Wed, 22 May 2024 14:30:05 +0200 Subject: [PATCH 18/29] fixed panda (#463) --- src/eva/vision/data/datasets/classification/panda.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/eva/vision/data/datasets/classification/panda.py b/src/eva/vision/data/datasets/classification/panda.py index faef1734..000099e0 100644 --- a/src/eva/vision/data/datasets/classification/panda.py +++ b/src/eva/vision/data/datasets/classification/panda.py @@ -65,9 +65,6 @@ def __init__( """ self._split = split self._root = root - self._width = width - self._height = height - self._target_mpp = target_mpp self._seed = seed self._download_resources() @@ -143,6 +140,7 @@ def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) """Loads the file paths of the corresponding dataset split.""" image_dir = os.path.join(self._root, "train_images") file_paths = sorted(glob.glob(os.path.join(image_dir, "*.tiff"))) + file_paths = [os.path.relpath(path, self._root) for path in file_paths] if len(file_paths) != len(self.annotations): raise ValueError( f"Expected {len(self.annotations)} images, found {len(file_paths)} in {image_dir}." From 7afc09c30d72c8c5cfddbf8b6fa07c002752563d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Thu, 23 May 2024 13:59:18 +0200 Subject: [PATCH 19/29] Update `EmbeddingsWriter` to store tensors as lists (#465) --- src/eva/core/callbacks/writers/embeddings.py | 8 +++++--- .../datasets/embeddings/classification/embeddings.py | 6 ++++++ .../embeddings/classification/multi_embeddings.py | 8 +++++--- .../embeddings/embeddings/tensor_2_shape_8.pt | 3 --- .../embeddings/embeddings/tensor_2_shape_8_list.pt | 3 +++ .../embeddings/embeddings/tensor_3_shape_8.pt | 3 --- .../embeddings/embeddings/tensor_3_shape_8_list.pt | 3 +++ .../embeddings/embeddings/tensor_6_shape_1x8.pt | 3 --- .../embeddings/embeddings/tensor_6_shape_1x8_list.pt | 3 +++ .../embeddings/embeddings/tensor_7_shape_1x8.pt | 3 --- .../embeddings/embeddings/tensor_7_shape_1x8_list.pt | 3 +++ .../eva/assets/core/datasets/embeddings/manifest.csv | 4 ++-- .../multi-embeddings/embeddings/tensor_6_shape_1x8.pt | 3 --- .../embeddings/tensor_6_shape_1x8_list.pt | 3 +++ .../multi-embeddings/embeddings/tensor_7_shape_6x8.pt | 3 --- .../embeddings/tensor_7_shape_6x8_list.pt | 3 +++ .../multi-embeddings/embeddings/tensor_8_shape_2x8.pt | 3 --- .../embeddings/tensor_8_shape_2x8_list.pt | 3 +++ .../multi-embeddings/embeddings/tensor_9_shape_5x8.pt | 3 --- .../embeddings/tensor_9_shape_5x8_list.pt | 3 +++ .../core/datasets/multi-embeddings/manifest.csv | 4 ++-- tests/eva/core/callbacks/writers/test_embeddings.py | 11 +++++++---- 22 files changed, 51 insertions(+), 38 deletions(-) delete mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8.pt create mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt delete mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8.pt create mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt delete mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8.pt create mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt delete mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8.pt create mode 100644 tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt delete mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8.pt create mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt delete mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8.pt create mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt delete mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8.pt create mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt delete mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8.pt create mode 100644 tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index 292344f1..4c8d4520 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -219,7 +219,7 @@ def _save_items( def _save_predictions( prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool ) -> None: - """Saves the embedding tensors to .pt files. + """Saves the embedding tensors as list to .pt files. If it's not the first save to this save_path, the new predictions are concatenated with the existing ones and saved to the same file. @@ -230,10 +230,12 @@ def _save_predictions( torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") for buffer in prediction_buffers ] - predictions = torch.stack(predictions, dim=0) if not is_first_save: - predictions = torch.cat([torch.load(save_path), predictions], dim=0) + previous_predictions = torch.load(save_path, map_location="cpu") + if not isinstance(previous_predictions, list): + raise ValueError("Previous predictions should be a list of tensors.") + predictions = predictions + previous_predictions os.makedirs(os.path.dirname(save_path), exist_ok=True) torch.save(predictions, save_path) diff --git a/src/eva/core/data/datasets/embeddings/classification/embeddings.py b/src/eva/core/data/datasets/embeddings/classification/embeddings.py index a321cac2..8904394d 100644 --- a/src/eva/core/data/datasets/embeddings/classification/embeddings.py +++ b/src/eva/core/data/datasets/embeddings/classification/embeddings.py @@ -54,6 +54,12 @@ def _load_embeddings(self, index: int) -> torch.Tensor: filename = self.filename(index) embeddings_path = os.path.join(self._root, filename) tensor = torch.load(embeddings_path, map_location="cpu") + if isinstance(tensor, list): + if len(tensor) > 1: + raise ValueError( + f"Expected a single tensor in the .pt file, but found {len(tensor)}." + ) + tensor = tensor[0] return tensor.squeeze(0) @override diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py index b103b699..4cb031da 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py @@ -32,9 +32,9 @@ def __init__( The manifest must have a `column_mapping["multi_id"]` column that contains the unique identifier group of embeddings. For oncology datasets, this would be usually the slide id. Each row in the manifest file points to a .pt file that can contain - one or multiple embeddings. There can also be multiple rows for the same `multi_id`, - in which case the embeddings from the different .pt files corresponding to that same - `multi_id` will be stacked along the first dimension. + one or multiple embeddings (either as a list or stacked tensors). There can also be + multiple rows for the same `multi_id`, in which case the embeddings from the different + .pt files corresponding to that same `multi_id` will be stacked along the first dimension. Args: root: Root directory of the dataset. @@ -80,6 +80,8 @@ def _load_embeddings(self, index: int) -> torch.Tensor: embeddings = [] for path in embedding_paths: embedding = torch.load(os.path.join(self._root, path), map_location="cpu") + if isinstance(embedding, list): + embedding = torch.stack(embedding, dim=0) embeddings.append(embedding.unsqueeze(0) if embedding.ndim == 1 else embedding) embeddings = torch.cat(embeddings, dim=0) diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8.pt deleted file mode 100644 index 4356f915..00000000 --- a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:998be8c190a910135c2ea2722543c2750ddc070280427b7ab211db3da59ee9b8 -size 1225 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt new file mode 100644 index 00000000..abe16557 --- /dev/null +++ b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ea1a50d4c8e714ea4d13cbe41f7ab3f455f8fb8963be1ba1787a7b5ab9a4545 +size 1250 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8.pt deleted file mode 100644 index 4356f915..00000000 --- a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:998be8c190a910135c2ea2722543c2750ddc070280427b7ab211db3da59ee9b8 -size 1225 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt new file mode 100644 index 00000000..f42c1040 --- /dev/null +++ b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf79fa09a95cc06d3b07a0badde28badc86a5412a50c43836e86c2e1215aeddf +size 1250 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8.pt deleted file mode 100644 index fdab67c6..00000000 --- a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85c2a3fe8ded76eda86274f0066d8f34445a88d1e90c23e2b598194d2bd6542c -size 1235 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt new file mode 100644 index 00000000..35b400a5 --- /dev/null +++ b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eabea1dec7432f00698f3a0aa2ed9d838a4dc126450979e7d807cc9c90feb6bc +size 1324 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8.pt deleted file mode 100644 index fdab67c6..00000000 --- a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85c2a3fe8ded76eda86274f0066d8f34445a88d1e90c23e2b598194d2bd6542c -size 1235 diff --git a/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt new file mode 100644 index 00000000..d32d60f9 --- /dev/null +++ b/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa9320e36efabc9dc6fa9f70e3c8232a6a906a8e622b92d81318aa855e6d044e +size 1324 diff --git a/tests/eva/assets/core/datasets/embeddings/manifest.csv b/tests/eva/assets/core/datasets/embeddings/manifest.csv index c0a54e26..ea0224b9 100644 --- a/tests/eva/assets/core/datasets/embeddings/manifest.csv +++ b/tests/eva/assets/core/datasets/embeddings/manifest.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fc8758e7b95dfb048d9cfba4c64667f553c9980dde874066ed795382980b2d0 -size 337 +oid sha256:5798f6f5031188227f211531f20d79e6df1916e620eaf653777dc4417840b65d +size 357 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8.pt deleted file mode 100644 index 417ac877..00000000 --- a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7a7f391ab7f206a92cca08d630538a430c0d5cadf3eaadb3d3f845724d76692 -size 1235 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt new file mode 100644 index 00000000..79d7ed13 --- /dev/null +++ b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:356b1db144d75d6a6cc2bb78e553bfeb0248cb78ce5272c1614fdf0c1c11a9ea +size 1324 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8.pt deleted file mode 100644 index 4fe6454c..00000000 --- a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6ea1a6a0588de18e5ca1e744a6d7e2cd933773cdae280a92985e78eca06a7c62 -size 1427 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt new file mode 100644 index 00000000..f6213ac3 --- /dev/null +++ b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b4b1cc0cfc033ee5643c1c7b1d1f911641ba115f4dfd7d74c4fe488fc07c51e +size 1772 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8.pt deleted file mode 100644 index 0c37da5d..00000000 --- a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d730238ef61f0dc4b98719e72ecff2fcee0f3c69b5c634554429828792f6a251 -size 1299 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt new file mode 100644 index 00000000..043d17e8 --- /dev/null +++ b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3323f1ad0aa408b081c0a6eac1c9b3c46d0d1d61e5fbfe084ea39794c2c3cee2 +size 1452 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8.pt deleted file mode 100644 index fa63fd0f..00000000 --- a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6ad0657aaa9a8d0521ce9f51ae2fcc748eaf4c94a7e27845b32d9587542b98de -size 1363 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt new file mode 100644 index 00000000..e5d062b7 --- /dev/null +++ b/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c1de7941fe339d557a99fcedeae0a8ab7f1f52302f44a400573ce7aa2f8e66a +size 1644 diff --git a/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv b/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv index 084e97ea..71e152bf 100644 --- a/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv +++ b/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f810f5b5bde999b0b655d0107d27c4e257094a6f11b6ad507cc3240bb68d81d2 -size 512 +oid sha256:9c414260a55131f60431d23ca34baf5da47f3eb18d614cd8e71d5e51da402cef +size 532 diff --git a/tests/eva/core/callbacks/writers/test_embeddings.py b/tests/eva/core/callbacks/writers/test_embeddings.py index c1f0570d..ec04515c 100644 --- a/tests/eva/core/callbacks/writers/test_embeddings.py +++ b/tests/eva/core/callbacks/writers/test_embeddings.py @@ -106,13 +106,16 @@ def _check_embedding_dimensions(output_dir: str, grouping_enabled: bool): embedding_paths = Path(output_dir).glob("*.pt") for path in embedding_paths: - tensor = torch.load(path) - assert tensor.ndim == 2 + tensor_list = torch.load(path) + assert isinstance(tensor_list, list) + for t in tensor_list: + assert isinstance(t, torch.Tensor) + assert t.ndim == 1 if grouping_enabled: - assert tensor.shape[0] > 1 + assert len(tensor_list) > 1 else: - assert tensor.shape[0] == 1 + assert len(tensor_list) == 1 def _check_expected_n_files(output_dir: str, expected_file_count: int): From bd0c83dcf1332834260a7afaac871fd9339054a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Fri, 24 May 2024 12:25:02 +0200 Subject: [PATCH 20/29] add tiffslide backend (#468) --- src/eva/vision/data/wsi/backends/__init__.py | 31 ++++++++++++-- src/eva/vision/data/wsi/backends/base.py | 37 ++++++++++++++++ src/eva/vision/data/wsi/backends/openslide.py | 21 +--------- src/eva/vision/data/wsi/backends/tiffslide.py | 42 +++++++++++++++++++ tests/eva/vision/data/datasets/test_wsi.py | 36 +++++++++++----- 5 files changed, 134 insertions(+), 33 deletions(-) create mode 100644 src/eva/vision/data/wsi/backends/tiffslide.py diff --git a/src/eva/vision/data/wsi/backends/__init__.py b/src/eva/vision/data/wsi/backends/__init__.py index 4a6b23b2..273707bf 100644 --- a/src/eva/vision/data/wsi/backends/__init__.py +++ b/src/eva/vision/data/wsi/backends/__init__.py @@ -9,16 +9,31 @@ LRU_CACHE_SIZE = 32 -def is_openslide_available() -> bool: +def _is_openslide_available() -> bool: """Whether the OpenSlide library is available.""" return importlib.util.find_spec("openslide") is not None +def _is_tiffslide_available() -> bool: + """Whether the TiffSlide library is available.""" + return importlib.util.find_spec("tiffslide") is not None + + +def is_backend_available(backend: str) -> bool: + """Whether the specified backend is available.""" + match backend: + case "openslide": + return _is_openslide_available() + case "tiffslide": + return _is_tiffslide_available() + return False + + def wsi_backend(backend: str = "openslide") -> Callable[..., Wsi]: """Returns the backend to use for reading the whole-slide images.""" match backend: case "openslide": - if is_openslide_available(): + if _is_openslide_available(): from eva.vision.data.wsi.backends.openslide import WsiOpenslide return WsiOpenslide @@ -27,6 +42,16 @@ def wsi_backend(backend: str = "openslide") -> Callable[..., Wsi]: "Missing optional dependency: openslide.\n" "Please install using `pip install openslide-python`." ) + case "tiffslide": + if _is_tiffslide_available(): + from eva.vision.data.wsi.backends.tiffslide import WsiTiffslide + + return WsiTiffslide + else: + raise ValueError( + "Missing optional dependency: tiffslide.\n" + "Please install using `pip install tiffslide`." + ) case _: raise ValueError(f"Unknown WSI backend selected: {backend}") @@ -37,4 +62,4 @@ def get_cached_wsi(file_path: str, backend: str) -> Wsi: return wsi_backend(backend)(file_path) -__all__ = ["Wsi", "wsi_backend", "get_cached_wsi", "is_openslide_available"] +__all__ = ["Wsi", "wsi_backend", "get_cached_wsi", "_is_openslide_available"] diff --git a/src/eva/vision/data/wsi/backends/base.py b/src/eva/vision/data/wsi/backends/base.py index 830a213b..b8cc4e06 100644 --- a/src/eva/vision/data/wsi/backends/base.py +++ b/src/eva/vision/data/wsi/backends/base.py @@ -41,6 +41,11 @@ def mpp(self) -> float: """Microns per pixel at the highest resolution (level 0).""" @abc.abstractmethod + def _read_region( + self, location: Tuple[int, int], level: int, size: Tuple[int, int] + ) -> np.ndarray: + """Abstract method to read a region at a specified zoom level.""" + def read_region( self, location: Tuple[int, int], level: int, size: Tuple[int, int] ) -> np.ndarray: @@ -52,6 +57,9 @@ def read_region( size: Region size as (width, height) in pixels at the selected read level. Remember to scale the size correctly. """ + self._verify_location(location, size) + data = self._read_region(location, level, size) + return self._read_postprocess(data) def get_closest_level(self, target_mpp: float) -> int: """Calculate the slide level that is closest to the target mpp. @@ -74,3 +82,32 @@ def get_closest_level(self, target_mpp: float) -> int: level_idx = np.argmax(level_mpps_filtered) return int(level_idx) + + def _verify_location(self, location: Tuple[int, int], size: Tuple[int, int]) -> None: + """Verifies that the requested region is within the slide dimensions. + + Args: + location: Top-left corner (x, y) to start reading at level 0. + size: Region size as (width, height) in pixels at the selected read level. + """ + x_max, y_max = self.level_dimensions[0] + x_scale = x_max / self.level_dimensions[0][0] + y_scale = y_max / self.level_dimensions[0][1] + + if ( + int(location[0] + x_scale * size[0]) > x_max + or int(location[1] + y_scale * size[1]) > y_max + ): + raise ValueError(f"Out of bounds region: {location}, {size}") + + def _read_postprocess(self, data: np.ndarray) -> np.ndarray: + """Post-processes the read region data. + + Args: + data: The read region data as a numpy array of shape (height, width, channels). + """ + # Change color to white where the alpha channel is 0 + if data.shape[2] == 4: + data[data[:, :, 3] == 0] = 255 + + return data[:, :, :3] diff --git a/src/eva/vision/data/wsi/backends/openslide.py b/src/eva/vision/data/wsi/backends/openslide.py index e5f141d1..10c7f8a9 100644 --- a/src/eva/vision/data/wsi/backends/openslide.py +++ b/src/eva/vision/data/wsi/backends/openslide.py @@ -55,27 +55,10 @@ def mpp(self) -> float: return (x_mpp + y_mpp) / 2.0 @override - def read_region( + def _read_region( self, location: Tuple[int, int], level: int, size: Tuple[int, int] ) -> np.ndarray: - x_max, y_max = self.level_dimensions[0] - - x_scale = x_max / self._wsi.level_dimensions[level][0] - y_scale = y_max / self._wsi.level_dimensions[level][1] - - if ( - int(location[0] + x_scale * size[0]) > x_max - or int(location[1] + y_scale * size[1]) > y_max - ): - raise ValueError(f"Out of bounds region: {location}, {size}, {level}") - - data = np.array(self._wsi.read_region(location, level, size)) - - if data.shape[2] == 4: - # Change color to white where the alpha channel is 0 - data[data[:, :, 3] == 0] = 255 - - return data[:, :, :3] + return np.array(self._wsi.read_region(location, level, size)) _conversion_factor_to_micrometer = { diff --git a/src/eva/vision/data/wsi/backends/tiffslide.py b/src/eva/vision/data/wsi/backends/tiffslide.py new file mode 100644 index 00000000..7577e19e --- /dev/null +++ b/src/eva/vision/data/wsi/backends/tiffslide.py @@ -0,0 +1,42 @@ +"""Module for loading data from WSI files using the OpenSlide library.""" + +from typing import Sequence, Tuple + +import numpy as np +import tiffslide # type: ignore +from typing_extensions import override + +from eva.vision.data.wsi.backends import base + + +class WsiTiffslide(base.Wsi): + """Class for loading data from WSI files using the TiffSlide library.""" + + _wsi: tiffslide.TiffSlide + + @override + def open_file(self, file_path: str) -> tiffslide.TiffSlide: + return tiffslide.TiffSlide(file_path) + + @property + @override + def level_dimensions(self) -> Sequence[Tuple[int, int]]: + return self._wsi.level_dimensions + + @property + @override + def level_downsamples(self) -> Sequence[float]: + return self._wsi.level_downsamples + + @property + @override + def mpp(self) -> float: + x_mpp = float(self._wsi.properties[tiffslide.PROPERTY_NAME_MPP_X]) + y_mpp = float(self._wsi.properties[tiffslide.PROPERTY_NAME_MPP_Y]) + return (x_mpp + y_mpp) / 2.0 + + @override + def _read_region( + self, location: Tuple[int, int], level: int, size: Tuple[int, int] + ) -> np.ndarray: + return np.array(self._wsi.read_region(location, level, size)) diff --git a/tests/eva/vision/data/datasets/test_wsi.py b/tests/eva/vision/data/datasets/test_wsi.py index b79b2f6f..6385264d 100644 --- a/tests/eva/vision/data/datasets/test_wsi.py +++ b/tests/eva/vision/data/datasets/test_wsi.py @@ -6,27 +6,34 @@ import pytest from eva.vision.data import datasets +from eva.vision.data.wsi.backends import is_backend_available from eva.vision.data.wsi.patching import samplers @pytest.mark.parametrize( - "width, height, overlap", + "width, height, overlap, backend", [ - (4, 4, (0, 0)), - (4, 4, (2, 2)), - (33, 33, (0, 0)), - (224, 224, (0, 0)), + (4, 4, (0, 0), "openslide"), + (4, 4, (2, 2), "openslide"), + (33, 33, (0, 0), "openslide"), + (224, 224, (0, 0), "openslide"), + (4, 4, (0, 0), "tiffslide"), + (4, 4, (2, 2), "tiffslide"), + (33, 33, (0, 0), "tiffslide"), + (224, 224, (0, 0), "tiffslide"), ], ) -def test_len(width: int, height: int, root: str, overlap: Tuple[int, int]): +def test_len(width: int, height: int, root: str, overlap: Tuple[int, int], backend: str): """Test the length of the dataset using different patch dimensions.""" + if not is_backend_available(backend): + pytest.skip(f"{backend} backend is not available.") dataset = datasets.WsiDataset( file_path=os.path.join(root, "0/a.tiff"), width=width, height=height, target_mpp=0.25, sampler=samplers.GridSampler(max_samples=None, overlap=overlap), - backend="openslide", + backend=backend, ) layer_shape = dataset._wsi.level_dimensions[0] @@ -34,18 +41,25 @@ def test_len(width: int, height: int, root: str, overlap: Tuple[int, int]): @pytest.mark.parametrize( - "width, height, target_mpp", - [(4, 4, 0.25), (4, 4, 1.3)], + "width, height, target_mpp, backend", + [ + (4, 4, 0.25, "openslide"), + (4, 4, 1.3, "openslide"), + (4, 4, 0.25, "tiffslide"), + (4, 4, 1.3, "tiffslide"), + ], ) -def test_patch_shape(width: int, height: int, target_mpp: float, root: str): +def test_patch_shape(width: int, height: int, target_mpp: float, root: str, backend: str): """Test the shape of the extracted patches.""" + if not is_backend_available(backend): + pytest.skip(f"{backend} backend is not available.") dataset = datasets.WsiDataset( file_path=os.path.join(root, "0/a.tiff"), width=width, height=height, target_mpp=target_mpp, sampler=samplers.GridSampler(max_samples=None), - backend="openslide", + backend=backend, ) mpp_ratio = target_mpp / ( From e9021cfc17506a8877a133e62bc39ac0cd3ba00c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Mon, 27 May 2024 10:23:41 +0200 Subject: [PATCH 21/29] added panda cli unit tests (#470) --- configs/vision/tests/offline/panda.yaml | 128 ++++++++++++++++++ .../vision/tests/offline/patch_camelyon.yaml | 2 +- tests/eva/vision/test_vision_cli.py | 10 ++ 3 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 configs/vision/tests/offline/panda.yaml diff --git a/configs/vision/tests/offline/panda.yaml b/configs/vision/tests/offline/panda.yaml new file mode 100644 index 00000000..5cbd0456 --- /dev/null +++ b/configs/vision/tests/offline/panda.yaml @@ -0,0 +1,128 @@ +--- +trainer: + class_path: eva.Trainer + init_args: + default_root_dir: &LIGHTNING_ROOT ${oc.env:LIGHTNING_ROOT, logs/test/offline/panda} + max_epochs: &MAX_EPOCHS 1 + limit_train_batches: 2 + limit_val_batches: 2 + callbacks: + - class_path: eva.callbacks.EmbeddingsWriter + init_args: + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT}/panda + dataloader_idx_map: + 0: train + 1: val + 2: test + metadata_keys: ["wsi_id"] + backbone: + class_path: eva.models.ModelFromFunction + init_args: + path: torch.hub.load + arguments: + repo_or_dir: facebookresearch/dino:main + model: dino_vits16 + pretrained: false + checkpoint_path: &CHECKPOINT_PATH ${oc.env:CHECKPOINT_PATH, null} +model: + class_path: eva.HeadModule + init_args: + head: + class_path: eva.vision.models.networks.ABMIL + init_args: + input_size: ${oc.env:IN_FEATURES, 384} + output_size: &NUM_CLASSES 6 + criterion: torch.nn.CrossEntropyLoss + optimizer: + class_path: torch.optim.SGD + init_args: + lr: &LR_VALUE ${oc.env:LR_VALUE, 0.00004} + momentum: 0.9 + weight_decay: 0.0 + lr_scheduler: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: *MAX_EPOCHS + eta_min: 0.0 + metrics: + common: + - class_path: eva.metrics.AverageLoss + - class_path: eva.metrics.MulticlassClassificationMetrics + init_args: + num_classes: *NUM_CLASSES +data: + class_path: eva.DataModule + init_args: + datasets: + train: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: &DATASET_ARGS + root: *DATASET_EMBEDDINGS_ROOT + manifest_file: manifest.csv + split: train + embeddings_transforms: + class_path: eva.core.data.transforms.Pad2DTensor + init_args: + pad_size: &N_PATCHES 5 + val: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: val + test: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: test + predict: + - class_path: eva.vision.datasets.PANDA + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:TESTS_ROOT, tests/eva}/assets/vision/datasets/panda + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: *N_PATCHES + width: 2 + height: 2 + target_mpp: 0.5 + split: train + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.PANDA + init_args: + <<: *PREDICT_DATASET_ARGS + split: val + - class_path: eva.vision.datasets.PANDA + init_args: + <<: *PREDICT_DATASET_ARGS + split: test + dataloaders: + train: + batch_size: &BATCH_SIZE 2 + shuffle: true + num_workers: 0 + pin_memory: false + persistent_workers: false + prefetch_factor: null + val: + batch_size: *BATCH_SIZE + num_workers: 0 + pin_memory: false + persistent_workers: false + prefetch_factor: null + test: + batch_size: *BATCH_SIZE + num_workers: 0 + pin_memory: false + persistent_workers: false + prefetch_factor: null + predict: + batch_size: &PREDICT_BATCH_SIZE 2 + num_workers: 0 + pin_memory: false + persistent_workers: false + prefetch_factor: null diff --git a/configs/vision/tests/offline/patch_camelyon.yaml b/configs/vision/tests/offline/patch_camelyon.yaml index 2b8e106f..b9155881 100644 --- a/configs/vision/tests/offline/patch_camelyon.yaml +++ b/configs/vision/tests/offline/patch_camelyon.yaml @@ -15,7 +15,7 @@ trainer: 1: val 2: test backbone: - class_path: eva.core.models.networks.wrappers.ModelFromFunction + class_path: eva.models.ModelFromFunction init_args: path: torch.hub.load arguments: diff --git a/tests/eva/vision/test_vision_cli.py b/tests/eva/vision/test_vision_cli.py index a1c69963..f8cd169e 100644 --- a/tests/eva/vision/test_vision_cli.py +++ b/tests/eva/vision/test_vision_cli.py @@ -3,6 +3,7 @@ import os import tempfile from unittest import mock +from unittest.mock import patch import pytest @@ -21,6 +22,7 @@ "configs/vision/dino_vit/offline/crc.yaml", "configs/vision/dino_vit/offline/mhist.yaml", "configs/vision/dino_vit/offline/patch_camelyon.yaml", + "configs/vision/dino_vit/offline/panda.yaml", "configs/vision/owkin/phikon/offline/bach.yaml", "configs/vision/owkin/phikon/offline/crc.yaml", "configs/vision/owkin/phikon/offline/mhist.yaml", @@ -61,6 +63,7 @@ def test_fit_from_configuration(configuration_file: str, lib_path: str) -> None: "configuration_file", [ "configs/vision/tests/offline/patch_camelyon.yaml", + "configs/vision/tests/offline/panda.yaml", ], ) def test_predict_fit_from_configuration(configuration_file: str, lib_path: str) -> None: @@ -80,3 +83,10 @@ def test_predict_fit_from_configuration(configuration_file: str, lib_path: str) def _skip_dataset_validation() -> None: """Mocks the validation step of the datasets.""" datasets.PatchCamelyon.validate = mock.MagicMock(return_value=None) + + +@pytest.fixture(autouse=True) +def mock_download(): + """Mocks the download functions to avoid downloading resources when running tests.""" + with patch.object(datasets.PANDA, "_download_resources", return_value=None): + yield From 2efbca5f564945e8e4fd2950db4de8f5597b83d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 29 May 2024 09:00:09 +0200 Subject: [PATCH 22/29] move wsi initialization to setup method of `MultiWsiDataset` (#472) --- src/eva/vision/data/datasets/wsi.py | 9 ++++----- .../vision/data/datasets/classification/test_panda.py | 9 +++++++++ .../eva/vision/data/datasets/classification/test_wsi.py | 6 +++++- tests/eva/vision/data/datasets/test_wsi.py | 1 + 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index f90df016..07bd5542 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -9,6 +9,7 @@ from torch.utils.data import dataset as torch_datasets from typing_extensions import override +from eva.core.data.datasets import base from eva.vision.data import wsi from eva.vision.data.datasets import vision from eva.vision.data.wsi.patching import samplers @@ -83,7 +84,7 @@ def _apply_transforms(self, image: np.ndarray) -> np.ndarray: return image -class MultiWsiDataset(torch_datasets.ConcatDataset): +class MultiWsiDataset(torch_datasets.ConcatDataset, base.Dataset): """Dataset class for reading patches from multiple whole-slide images.""" def __init__( @@ -108,10 +109,6 @@ def __init__( sampler: The sampler to use for sampling patch coordinates. backend: The backend to use for reading the whole-slide images. image_transforms: Transforms to apply to the extracted image patches. - column_mapping: Defines the map between the variables and the manifest - columns. It will overwrite the `default_column_mapping` with - the provided values, so that `column_mapping` can contain only the - values which are altered or missing. """ self._root = root self._file_paths = file_paths @@ -122,6 +119,8 @@ def __init__( self._backend = backend self._image_transforms = image_transforms + @override + def setup(self): super().__init__(self._load_datasets()) def _load_datasets(self) -> list[WsiDataset]: diff --git a/tests/eva/vision/data/datasets/classification/test_panda.py b/tests/eva/vision/data/datasets/classification/test_panda.py index 72a5b7ff..8f523aeb 100644 --- a/tests/eva/vision/data/datasets/classification/test_panda.py +++ b/tests/eva/vision/data/datasets/classification/test_panda.py @@ -29,6 +29,7 @@ def test_split_and_expected_shapes(root: str): train_dataset = datasets.PANDA(root=root, split="train", **DEFAULT_ARGS) val_dataset = datasets.PANDA(root=root, split="val", **DEFAULT_ARGS) test_dataset = datasets.PANDA(root=root, split="test", **DEFAULT_ARGS) + _setup_datasets(train_dataset, val_dataset, test_dataset) assert len(train_dataset.datasets) == 6 assert len(val_dataset.datasets) == 2 @@ -47,6 +48,7 @@ def test_split_and_expected_shapes(root: str): def test_filenames(root: str, split: Literal["train", "val", "test"]): """Tests that the number of filenames matches the dataset size.""" dataset = datasets.PANDA(root=root, split=split, **DEFAULT_ARGS) + _setup_datasets(dataset) filenames = set() for i in range(len(dataset)): @@ -59,6 +61,7 @@ def test_same_split_same_seed(root: str): """Test that the generated split is deterministic when using the same seed.""" dataset1 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) dataset2 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) + _setup_datasets(dataset1, dataset2) assert len(dataset1) == len(dataset2) assert dataset1._file_paths == dataset2._file_paths @@ -71,6 +74,7 @@ def test_different_seed_different_split(root: str): """Test that the generated split is different when using a different seed.""" dataset1 = datasets.PANDA(root=root, split="train", seed=42, **DEFAULT_ARGS) dataset2 = datasets.PANDA(root=root, split="train", seed=43, **DEFAULT_ARGS) + _setup_datasets(dataset1, dataset2) assert len(dataset1) == len(dataset2) assert dataset1._file_paths != dataset2._file_paths @@ -102,3 +106,8 @@ def mock_download(): """Mocks the download function to avoid downloading resources when running tests.""" with patch.object(datasets.PANDA, "_download_resources", return_value=None): yield + + +def _setup_datasets(*datasets: datasets.PANDA): + for dataset in datasets: + dataset.setup() diff --git a/tests/eva/vision/data/datasets/classification/test_wsi.py b/tests/eva/vision/data/datasets/classification/test_wsi.py index ca020c2b..d14573d8 100644 --- a/tests/eva/vision/data/datasets/classification/test_wsi.py +++ b/tests/eva/vision/data/datasets/classification/test_wsi.py @@ -39,10 +39,12 @@ def test_pickleable(dataset: datasets.WsiClassificationDataset): def test_split(root: str): """Test loading the dataset with different splits.""" dataset = datasets.WsiClassificationDataset(root=root, split=None, **DEFAULT_ARGS) + dataset.setup() assert len(dataset) == 192 _check_batch_shape(dataset[0]) train_dataset = datasets.WsiClassificationDataset(root=root, split="train", **DEFAULT_ARGS) + train_dataset.setup() assert len(train_dataset) == 64 _check_batch_shape(train_dataset[0]) @@ -82,7 +84,9 @@ def _check_batch_shape(batch: Any): @pytest.fixture def dataset(root: str) -> datasets.WsiClassificationDataset: """Fixture returning a dataset instance.""" - return datasets.WsiClassificationDataset(root=root, **DEFAULT_ARGS) + dataset = datasets.WsiClassificationDataset(root=root, **DEFAULT_ARGS) + dataset.setup() + return dataset @pytest.fixture diff --git a/tests/eva/vision/data/datasets/test_wsi.py b/tests/eva/vision/data/datasets/test_wsi.py index 6385264d..de816475 100644 --- a/tests/eva/vision/data/datasets/test_wsi.py +++ b/tests/eva/vision/data/datasets/test_wsi.py @@ -87,6 +87,7 @@ def test_multi_dataset(root: str): sampler=samplers.GridSampler(max_samples=None), backend="openslide", ) + dataset.setup() assert isinstance(dataset.datasets[0], datasets.WsiDataset) layer_shape = dataset.datasets[0]._wsi.level_dimensions[0] From 80ec72e1c7e0f187b8432654a412fe9e5d5a6143 Mon Sep 17 00:00:00 2001 From: roman807 Date: Thu, 6 Jun 2024 13:07:53 +0200 Subject: [PATCH 23/29] 459 add camelyon16 slide level task (#476) * added panda dataset class * clean up * remove samples with noisy labels * clean up table in dataset readme * added function for stratified splits * added unit tests * cleanup * addressed comments * fixed issue with resource download * validation fix * updated readme * added to mkdocs * added image_dir to exception print * updated root path in yaml config * added panda to datasets overview table in docs * added md5 hash for downloaded resources * update init * added camelyon16 * added camelyon16 * updated camelyon16 class * added tests and config * formatting * formatting * formatting * formatting * added test files * formatting * lint * added target transforms * formatting * fixed dataset * addressed comments * addressed comments * fix test * fix test * fixed test * addressed comments * updated loss * fix annotations * lint --------- Co-authored-by: Nicolas Kaenzig --- .../vision/dino_vit/offline/camelyon16.yaml | 135 ++++++++++ docs/datasets/camelyon16.md | 68 +++++ docs/datasets/index.md | 7 +- src/eva/vision/data/datasets/__init__.py | 2 + src/eva/vision/data/datasets/_validators.py | 13 + .../data/datasets/classification/__init__.py | 2 + .../datasets/classification/camelyon16.py | 245 ++++++++++++++++++ .../camelyon16/testing/images/test_001.tif | 3 + .../camelyon16/testing/images/test_002.tif | 3 + .../datasets/camelyon16/testing/reference.csv | 3 + .../camelyon16/training/normal/normal_001.tif | 3 + .../camelyon16/training/normal/normal_002.tif | 3 + .../camelyon16/training/tumor/tumor_001.tif | 3 + .../camelyon16/training/tumor/tumor_002.tif | 3 + .../classification/test_camelyon16.py | 83 ++++++ tests/eva/vision/test_vision_cli.py | 1 + 16 files changed, 574 insertions(+), 3 deletions(-) create mode 100644 configs/vision/dino_vit/offline/camelyon16.yaml create mode 100644 docs/datasets/camelyon16.md create mode 100644 src/eva/vision/data/datasets/classification/camelyon16.py create mode 100644 tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif create mode 100644 tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif create mode 100644 tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv create mode 100644 tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif create mode 100644 tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif create mode 100644 tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif create mode 100644 tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif create mode 100644 tests/eva/vision/data/datasets/classification/test_camelyon16.py diff --git a/configs/vision/dino_vit/offline/camelyon16.yaml b/configs/vision/dino_vit/offline/camelyon16.yaml new file mode 100644 index 00000000..90a1bf9b --- /dev/null +++ b/configs/vision/dino_vit/offline/camelyon16.yaml @@ -0,0 +1,135 @@ +--- +trainer: + class_path: eva.Trainer + init_args: + n_runs: &N_RUNS ${oc.env:N_RUNS, 3} + default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/camelyon16} + max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} + callbacks: + - class_path: lightning.pytorch.callbacks.LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: lightning.pytorch.callbacks.ModelCheckpoint + init_args: + filename: best + save_last: true + save_top_k: 1 + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + min_delta: 0 + patience: 74 + monitor: *MONITOR_METRIC + mode: *MONITOR_METRIC_MODE + - class_path: eva.callbacks.EmbeddingsWriter + init_args: + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16 + dataloader_idx_map: + 0: train + 1: val + 2: test + metadata_keys: ["wsi_id"] + backbone: + class_path: eva.models.ModelFromFunction + init_args: + path: torch.hub.load + arguments: + repo_or_dir: ${oc.env:REPO_OR_DIR, facebookresearch/dino:main} + model: ${oc.env:DINO_BACKBONE, dino_vits16} + pretrained: ${oc.env:PRETRAINED, true} + force_reload: ${oc.env:FORCE_RELOAD, false} + checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} + logger: + - class_path: lightning.pytorch.loggers.TensorBoardLogger + init_args: + save_dir: *OUTPUT_ROOT + name: "" +model: + class_path: eva.HeadModule + init_args: + head: + class_path: eva.vision.models.networks.ABMIL + init_args: + input_size: ${oc.env:IN_FEATURES, 768} + output_size: &NUM_CLASSES 1 + projected_input_size: 128 + criterion: torch.nn.BCEWithLogitsLoss + optimizer: + class_path: torch.optim.AdamW + init_args: + lr: &LR_VALUE 0.000039 + betas: [0.9, 0.999] + lr_scheduler: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: *MAX_STEPS + eta_min: 0.0 + metrics: + common: + - class_path: eva.metrics.AverageLoss + - class_path: eva.metrics.BinaryClassificationMetrics +data: + class_path: eva.DataModule + init_args: + datasets: + train: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: &DATASET_ARGS + root: *DATASET_EMBEDDINGS_ROOT + manifest_file: manifest.csv + split: train + embeddings_transforms: + class_path: eva.core.data.transforms.Pad2DTensor + init_args: + pad_size: 10_000 + target_transforms: + class_path: eva.core.data.transforms.dtype.ArrayToFloatTensor + val: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: val + test: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: test + predict: + - class_path: eva.vision.datasets.Camelyon16 + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: 10_000 + width: 224 + height: 224 + target_mpp: 0.25 + split: train + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.Camelyon16 + init_args: + <<: *PREDICT_DATASET_ARGS + split: val + - class_path: eva.vision.datasets.Camelyon16 + init_args: + <<: *PREDICT_DATASET_ARGS + split: test + dataloaders: + train: + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} + shuffle: true + val: + batch_size: *BATCH_SIZE + test: + batch_size: *BATCH_SIZE + predict: + batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} + num_workers: 12 #multiprocessing.cpu_count + prefetch_factor: 2 diff --git a/docs/datasets/camelyon16.md b/docs/datasets/camelyon16.md new file mode 100644 index 00000000..96584406 --- /dev/null +++ b/docs/datasets/camelyon16.md @@ -0,0 +1,68 @@ +# Camelyon16 + +The Camelyon16 dataset consists of 400 WSIs of lymph nodes for breast cancer metastasis classification. The dataset is a combination of two independent datasets, collected from two separate medical centers in the Netherlands (Radboud University Medical Center and University Medical Center Utrecht). The dataset contains the slides from which [PatchCamelyon](patch_camelyon.md)-patches were extracted. + +The dataset is divided in a train set (270 slides) and test set (130 slides), both containing images from both centers. + +The task was part of [Grand Challenge](https://grand-challenge.org/) in 2016 and has later been replaced by Camelyon17. + +Source: https://camelyon16.grand-challenge.org + +## Raw data + +### Key stats + +| | | +|---------------------------|----------------------------------------------------------| +| **Modality** | Vision (Slide-level) | +| **Task** | Binary classification | +| **Cancer type** | Breast | +| **Data size** | ~700 GB | +| **Image dimension** | ~100-250k x ~100-250k x 3 | +| **Magnification (μm/px)** | 40x (0.25) - Level 0 | +| **Files format** | `.tif` | +| **Number of images** | 400 (270 train, 130 test) | + + +### Organization + +The data `CAMELYON16` (download links [here](https://camelyon17.grand-challenge.org/Data/)) is organized as follows: + +``` +CAMELYON16 +├── training +│ ├── normal +| │ ├── normal_001.tif +| │ └── ... +│ ├── tumor +| │ ├── tumor_001.tif +| │ └── ... +│ └── lesion_annotations.zip +├── testing +│ ├── images +| │ ├── test_001.tif +| │ └── ... +│ ├── evaluation # masks not in use +│ ├── reference.csv # targets +│ └── lesion_annotations.zip +``` + +## Download and preprocessing + +The `Camelyon16` dataset class doesn't download the data during runtime and must be downloaded manually from links provided [here](https://camelyon17.grand-challenge.org/Data/). + +The dataset is split into train / test. Additionally, we split the train set into train/val using the same splits as [PatchCamelyon](patch_camelyon.md) (see metadata CSV files on [Zenodo](https://zenodo.org/records/2546921)). + +| Splits | Train | Validation | Test | +|----------|-------------|-------------|------------| +| #Samples | 216 (54%) | 54 (13.5%) | 130 (32.5%) | + + +## Relevant links + +* [Grand Challenge dataset description](https://camelyon16.grand-challenge.org/Data/) +* [Download links](https://camelyon17.grand-challenge.org/Data/) + + +## References +1 : [A General-Purpose Self-Supervised Model for Computational Pathology](https://arxiv.org/abs/2308.15474) \ No newline at end of file diff --git a/docs/datasets/index.md b/docs/datasets/index.md index 120978bb..a2a46920 100644 --- a/docs/datasets/index.md +++ b/docs/datasets/index.md @@ -7,9 +7,10 @@ ### Whole Slide (WSI) and microscopy image datasets #### Slide-level -| Dataset | #Slides | Slide Size | Magnification (μm/px) | Task | Cancer Type | -|------------------------------------|----------|------------|------------------------|----------------------------|------------------| -| [PANDA](panda.md) | 3,152 | ~20k x 20k x 3 | 20x (0.5) | Classification (6 classes) | Prostate | +| Dataset | #Slides | Slide Size | Magnification (μm/px) | Task | Cancer Type | +|------------------------------------|----------|---------------------------|------------------------|----------------------------|------------------| +| [Camelyon16](camelyon16.md) | 400 | ~100-250k x ~100-250k x 3 | 40x (0.25) | Classification (2 classes) | Breast | +| [PANDA](panda.md) | 10,616 | ~20k x 20k x 3 | 20x (0.5) | Classification (6 classes) | Prostate | #### Patch-level diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index 599aaca8..864d5a4a 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -5,6 +5,7 @@ CRC, MHIST, PANDA, + Camelyon16, PatchCamelyon, TotalSegmentatorClassification, WsiClassificationDataset, @@ -20,6 +21,7 @@ "ImageSegmentation", "PatchCamelyon", "PANDA", + "Camelyon16", "TotalSegmentatorClassification", "TotalSegmentator2D", "VisionDataset", diff --git a/src/eva/vision/data/datasets/_validators.py b/src/eva/vision/data/datasets/_validators.py index b6c17fe5..ef6407e4 100644 --- a/src/eva/vision/data/datasets/_validators.py +++ b/src/eva/vision/data/datasets/_validators.py @@ -57,3 +57,16 @@ def check_dataset_exists(dataset_dir: str, download_available: bool) -> None: if download_available: error_message += " You can set `download=True` to download the dataset automatically." raise FileNotFoundError(error_message) + + +def check_number_of_files(file_paths: List[str], expected_length: int, split: str | None) -> None: + """Verifies the number of files in the dataset. + + Raise: + ValueError: If the number of files in the dataset does not match the expected one. + """ + if len(file_paths) != expected_length: + raise ValueError( + f"Expected {expected_length} files, for split '{split}' found {len(file_paths)}. " + f"{_SUFFIX_ERROR_MESSAGE}" + ) diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index 265c9740..0b86ee5c 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -1,6 +1,7 @@ """Image classification datasets API.""" from eva.vision.data.datasets.classification.bach import BACH +from eva.vision.data.datasets.classification.camelyon16 import Camelyon16 from eva.vision.data.datasets.classification.crc import CRC from eva.vision.data.datasets.classification.mhist import MHIST from eva.vision.data.datasets.classification.panda import PANDA @@ -16,4 +17,5 @@ "TotalSegmentatorClassification", "WsiClassificationDataset", "PANDA", + "Camelyon16", ] diff --git a/src/eva/vision/data/datasets/classification/camelyon16.py b/src/eva/vision/data/datasets/classification/camelyon16.py new file mode 100644 index 00000000..e0072906 --- /dev/null +++ b/src/eva/vision/data/datasets/classification/camelyon16.py @@ -0,0 +1,245 @@ +"""Camelyon16 dataset class.""" + +import functools +import glob +import os +from typing import Any, Callable, Dict, List, Literal, Tuple + +import numpy as np +import pandas as pd +import torch +from typing_extensions import override + +from eva.vision.data.datasets import _validators, wsi +from eva.vision.data.datasets.classification import base +from eva.vision.data.wsi.patching import samplers + + +class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification): + """Dataset class for Camelyon16 images and corresponding targets.""" + + _val_slides = [ + "normal_010", + "normal_013", + "normal_016", + "normal_017", + "normal_019", + "normal_020", + "normal_025", + "normal_030", + "normal_031", + "normal_032", + "normal_052", + "normal_056", + "normal_057", + "normal_067", + "normal_076", + "normal_079", + "normal_085", + "normal_095", + "normal_098", + "normal_099", + "normal_101", + "normal_102", + "normal_105", + "normal_106", + "normal_109", + "normal_129", + "normal_132", + "normal_137", + "normal_142", + "normal_143", + "normal_148", + "normal_152", + "tumor_001", + "tumor_005", + "tumor_011", + "tumor_012", + "tumor_013", + "tumor_019", + "tumor_031", + "tumor_037", + "tumor_043", + "tumor_046", + "tumor_057", + "tumor_065", + "tumor_069", + "tumor_071", + "tumor_073", + "tumor_079", + "tumor_080", + "tumor_081", + "tumor_082", + "tumor_085", + "tumor_097", + "tumor_109", + ] + """Validation slide names, same as the ones in patch camelyon.""" + + def __init__( + self, + root: str, + sampler: samplers.Sampler, + split: Literal["train", "val", "test"] | None = None, + width: int = 224, + height: int = 224, + target_mpp: float = 0.5, + backend: str = "openslide", + image_transforms: Callable | None = None, + seed: int = 42, + ) -> None: + """Initializes the dataset. + + Args: + root: Root directory of the dataset. + sampler: The sampler to use for sampling patch coordinates. + split: Dataset split to use. If `None`, the entire dataset is used. + width: Width of the patches to be extracted, in pixels. + height: Height of the patches to be extracted, in pixels. + target_mpp: Target microns per pixel (mpp) for the patches. + backend: The backend to use for reading the whole-slide images. + image_transforms: Transforms to apply to the extracted image patches. + seed: Random seed for reproducibility. + """ + self._split = split + self._root = root + self._width = width + self._height = height + self._target_mpp = target_mpp + self._seed = seed + + wsi.MultiWsiDataset.__init__( + self, + root=root, + file_paths=self._load_file_paths(split), + width=width, + height=height, + sampler=sampler, + target_mpp=target_mpp, + backend=backend, + image_transforms=image_transforms, + ) + + @property + @override + def classes(self) -> List[str]: + return ["normal", "tumor"] + + @property + @override + def class_to_idx(self) -> Dict[str, int]: + return {"normal": 0, "tumor": 1} + + @functools.cached_property + def annotations_test_set(self) -> Dict[str, str]: + """Loads the dataset labels.""" + path = os.path.join(self._root, "testing/reference.csv") + reference_df = pd.read_csv(path, header=None) + return {k: v.lower() for k, v in reference_df[[0, 1]].itertuples(index=False)} + + @functools.cached_property + def annotations(self) -> Dict[str, str]: + """Loads the dataset labels.""" + annotations = {} + if self._split in ["test", None]: + path = os.path.join(self._root, "testing/reference.csv") + reference_df = pd.read_csv(path, header=None) + annotations.update( + {k: v.lower() for k, v in reference_df[[0, 1]].itertuples(index=False)} + ) + + if self._split in ["train", "val", None]: + annotations.update( + { + self._get_id_from_path(file_path): self._get_class_from_path(file_path) + for file_path in self._file_paths + if "test" not in file_path + } + ) + return annotations + + @override + def prepare_data(self) -> None: + _validators.check_dataset_exists(self._root, True) + + expected_directories = ["training/normal", "training/tumor", "testing/images"] + for resource in expected_directories: + if not os.path.isdir(os.path.join(self._root, resource)): + raise FileNotFoundError(f"'{resource}' not found in the root folder.") + + if not os.path.isfile(os.path.join(self._root, "testing/reference.csv")): + raise FileNotFoundError("'reference.csv' file not found in the testing folder.") + + @override + def validate(self) -> None: + + expected_n_files = { + "train": 216, + "val": 54, + "test": 129, + None: 399, + } + length = expected_n_files[self._split] + _validators.check_number_of_files(self._file_paths, length, self._split) + _validators.check_dataset_integrity( + self, + length=None, + n_classes=2, + first_and_last_labels=("normal", "tumor"), + ) + + @override + def filename(self, index: int) -> str: + return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) + + @override + def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + return base.ImageClassification.__getitem__(self, index) + + @override + def load_image(self, index: int) -> torch.Tensor: + return wsi.MultiWsiDataset.__getitem__(self, index) + + @override + def load_target(self, index: int) -> np.ndarray: + file_path = self._file_paths[self._get_dataset_idx(index)] + class_name = self.annotations[self._get_id_from_path(file_path)] + return np.asarray(self.class_to_idx[class_name], dtype=np.int64) + + @override + def load_metadata(self, index: int) -> Dict[str, Any]: + return {"wsi_id": self.filename(index).split(".")[0]} + + def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]: + """Loads the file paths of the corresponding dataset split.""" + train_paths, val_paths = [], [] + for path in glob.glob(os.path.join(self._root, "training/**/*.tif")): + if self._get_id_from_path(path) in self._val_slides: + val_paths.append(path) + else: + train_paths.append(path) + test_paths = glob.glob(os.path.join(self._root, "testing/images", "*.tif")) + + match split: + case "train": + paths = train_paths + case "val": + paths = val_paths + case "test": + paths = test_paths + case None: + paths = train_paths + val_paths + test_paths + case _: + raise ValueError("Invalid split. Use 'train', 'val' or `None`.") + return sorted([os.path.relpath(path, self._root) for path in paths]) + + def _get_id_from_path(self, file_path: str) -> str: + """Extracts the slide ID from the file path.""" + return os.path.basename(file_path).replace(".tif", "") + + def _get_class_from_path(self, file_path: str) -> str: + """Extracts the class name from the file path.""" + class_name = self._get_id_from_path(file_path).split("_")[0] + if class_name not in self.classes: + raise ValueError(f"Invalid class name '{class_name}' in file path '{file_path}'.") + return class_name diff --git a/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif b/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif b/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv b/tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv new file mode 100644 index 00000000..5b36aa91 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe2d8f0df36ba2b44f1ff875300019aa7df443fe1f428d7142dcc2f4ddc1a908 +size 50 diff --git a/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif b/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif b/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif b/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif b/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif new file mode 100644 index 00000000..64bc6f24 --- /dev/null +++ b/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a0877005a9da2360e67107b25c4657696516a54504a5f903b895ebdfad5062 +size 246784 diff --git a/tests/eva/vision/data/datasets/classification/test_camelyon16.py b/tests/eva/vision/data/datasets/classification/test_camelyon16.py new file mode 100644 index 00000000..26125254 --- /dev/null +++ b/tests/eva/vision/data/datasets/classification/test_camelyon16.py @@ -0,0 +1,83 @@ +"""Camelyon16 dataset tests.""" + +import os +from typing import Any, Literal + +import numpy as np +import pytest +import torch +import torchvision.transforms.v2 as torch_transforms + +from eva.vision.data import datasets +from eva.vision.data import transforms as eva_transforms +from eva.vision.data.wsi.patching import samplers + +TARGET_SIZE = 224 +DEFAULT_ARGS = { + "width": 16, + "height": 16, + "target_mpp": 0.5, + "sampler": samplers.GridSampler(), + "backend": "openslide", + "image_transforms": torch_transforms.Compose([eva_transforms.ResizeAndCrop(size=TARGET_SIZE)]), +} + + +def test_split_and_expected_shapes(root: str): + """Test loading the dataset with different splits.""" + train_dataset = datasets.Camelyon16(root=root, split="train", **DEFAULT_ARGS) + val_dataset = datasets.Camelyon16(root=root, split="val", **DEFAULT_ARGS) + test_dataset = datasets.Camelyon16(root=root, split="test", **DEFAULT_ARGS) + + _setup_datasets(train_dataset, val_dataset, test_dataset) + + assert len(train_dataset.datasets) == 3 + assert len(val_dataset.datasets) == 1 + assert len(test_dataset.datasets) == 2 + + assert len(train_dataset) == 192 + assert len(val_dataset) == 64 + assert len(test_dataset) == 128 + + _check_batch_shape(train_dataset[0]) + _check_batch_shape(val_dataset[0]) + _check_batch_shape(test_dataset[0]) + + +@pytest.mark.parametrize("split", ["train", "val", "test", None]) +def test_filenames(root: str, split: Literal["train", "val", "test"]): + """Tests that the number of filenames matches the dataset size.""" + dataset = datasets.Camelyon16(root=root, split=split, **DEFAULT_ARGS) + _setup_datasets(dataset) + + filenames = set() + for i in range(len(dataset)): + filenames.add(dataset.filename(i)) + + assert len(filenames) == len(dataset.datasets) + + +def _check_batch_shape(batch: Any): + assert isinstance(batch, tuple) + assert len(batch) == 3 + + image, target, metadata = batch + assert isinstance(image, torch.Tensor) + assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) + + assert isinstance(target, np.ndarray) + assert target.size == 1 + + assert isinstance(metadata, dict) + assert "wsi_id" in metadata + + +@pytest.fixture +def root(assets_path: str) -> str: + """Fixture returning the root directory of the dataset.""" + return os.path.join(assets_path, "vision/datasets/camelyon16") + + +def _setup_datasets(*datasets: datasets.Camelyon16): + for dataset in datasets: + dataset.setup() diff --git a/tests/eva/vision/test_vision_cli.py b/tests/eva/vision/test_vision_cli.py index f8cd169e..c09a1cae 100644 --- a/tests/eva/vision/test_vision_cli.py +++ b/tests/eva/vision/test_vision_cli.py @@ -23,6 +23,7 @@ "configs/vision/dino_vit/offline/mhist.yaml", "configs/vision/dino_vit/offline/patch_camelyon.yaml", "configs/vision/dino_vit/offline/panda.yaml", + "configs/vision/dino_vit/offline/camelyon16.yaml", "configs/vision/owkin/phikon/offline/bach.yaml", "configs/vision/owkin/phikon/offline/crc.yaml", "configs/vision/owkin/phikon/offline/mhist.yaml", From b91f1b56eacfd537ead200ecada954197a6ef5c5 Mon Sep 17 00:00:00 2001 From: roman807 Date: Mon, 10 Jun 2024 12:16:20 +0200 Subject: [PATCH 24/29] 475 define slide level evaluation protocol (#511) * updated configs * adjust patience * addressed comments * fixed typo * remove prefetch factor --- .../vision/dino_vit/offline/camelyon16.yaml | 20 ++- configs/vision/dino_vit/offline/panda.yaml | 24 ++-- .../owkin/phikon/offline/camelyon16.yaml | 129 ++++++++++++++++++ .../vision/owkin/phikon/offline/panda.yaml | 128 +++++++++++++++++ tests/eva/vision/test_vision_cli.py | 2 + 5 files changed, 280 insertions(+), 23 deletions(-) create mode 100644 configs/vision/owkin/phikon/offline/camelyon16.yaml create mode 100644 configs/vision/owkin/phikon/offline/panda.yaml diff --git a/configs/vision/dino_vit/offline/camelyon16.yaml b/configs/vision/dino_vit/offline/camelyon16.yaml index 90a1bf9b..29a55c1d 100644 --- a/configs/vision/dino_vit/offline/camelyon16.yaml +++ b/configs/vision/dino_vit/offline/camelyon16.yaml @@ -2,9 +2,9 @@ trainer: class_path: eva.Trainer init_args: - n_runs: &N_RUNS ${oc.env:N_RUNS, 3} - default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/camelyon16} - max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} + n_runs: &N_RUNS ${oc.env:N_RUNS, 5} + default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/camelyon16} + max_epochs: &MAX_EPOCHS ${oc.env:MAX_EPOCHS, 100} callbacks: - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: @@ -19,12 +19,12 @@ trainer: - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 - patience: 74 + patience: ${oc.env:PATIENCE, 10} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - class_path: eva.callbacks.EmbeddingsWriter init_args: - output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16 + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16} dataloader_idx_map: 0: train 1: val @@ -51,19 +51,19 @@ model: head: class_path: eva.vision.models.networks.ABMIL init_args: - input_size: ${oc.env:IN_FEATURES, 768} + input_size: ${oc.env:IN_FEATURES, 384} output_size: &NUM_CLASSES 1 projected_input_size: 128 criterion: torch.nn.BCEWithLogitsLoss optimizer: class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE 0.000039 + lr: ${oc.env:LR_VALUE, 0.001} betas: [0.9, 0.999] lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: - T_max: *MAX_STEPS + T_max: *MAX_EPOCHS eta_min: 0.0 metrics: common: @@ -123,7 +123,7 @@ data: split: test dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 32} shuffle: true val: batch_size: *BATCH_SIZE @@ -131,5 +131,3 @@ data: batch_size: *BATCH_SIZE predict: batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} - num_workers: 12 #multiprocessing.cpu_count - prefetch_factor: 2 diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 8977a7f1..600bfb12 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -2,9 +2,9 @@ trainer: class_path: eva.Trainer init_args: - n_runs: &N_RUNS ${oc.env:N_RUNS, 1} + n_runs: &N_RUNS ${oc.env:N_RUNS, 5} default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/panda} - max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} + max_epochs: &MAX_EPOCHS ${oc.env:MAX_EPOCHS, 49} callbacks: - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: @@ -19,12 +19,12 @@ trainer: - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 - patience: 13 + patience: ${oc.env:PATIENCE, 8} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - class_path: eva.callbacks.EmbeddingsWriter init_args: - output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/panda + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/panda} dataloader_idx_map: 0: train 1: val @@ -53,17 +53,17 @@ model: init_args: input_size: ${oc.env:IN_FEATURES, 384} output_size: &NUM_CLASSES 6 + projected_input_size: 128 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.00004} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.001} + betas: [0.9, 0.999] lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: - T_max: *MAX_STEPS + T_max: *MAX_EPOCHS eta_min: 0.0 metrics: common: @@ -123,11 +123,11 @@ data: split: test dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 16} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 32} shuffle: true val: batch_size: *BATCH_SIZE + test: + batch_size: *BATCH_SIZE predict: batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} - num_workers: 12 #multiprocessing.cpu_count - prefetch_factor: 2 diff --git a/configs/vision/owkin/phikon/offline/camelyon16.yaml b/configs/vision/owkin/phikon/offline/camelyon16.yaml new file mode 100644 index 00000000..f7aca73f --- /dev/null +++ b/configs/vision/owkin/phikon/offline/camelyon16.yaml @@ -0,0 +1,129 @@ +--- +trainer: + class_path: eva.Trainer + init_args: + n_runs: &N_RUNS ${oc.env:N_RUNS, 5} + default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/owkin/phikon/offline/camelyon16} + max_epochs: &MAX_EPOCHS ${oc.env:MAX_EPOCHS, 100} + callbacks: + - class_path: lightning.pytorch.callbacks.LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: lightning.pytorch.callbacks.ModelCheckpoint + init_args: + filename: best + save_last: true + save_top_k: 1 + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + min_delta: 0 + patience: ${oc.env:PATIENCE, 10} + monitor: *MONITOR_METRIC + mode: *MONITOR_METRIC_MODE + - class_path: eva.callbacks.EmbeddingsWriter + init_args: + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/camelyon16} + dataloader_idx_map: + 0: train + 1: val + 2: test + metadata_keys: ["wsi_id"] + backbone: + class_path: eva.models.HuggingFaceModel + init_args: + model_name_or_path: owkin/phikon + tensor_transforms: + class_path: eva.core.models.networks.transforms.ExtractCLSFeatures + logger: + - class_path: lightning.pytorch.loggers.TensorBoardLogger + init_args: + save_dir: *OUTPUT_ROOT + name: "" +model: + class_path: eva.HeadModule + init_args: + head: + class_path: eva.vision.models.networks.ABMIL + init_args: + input_size: ${oc.env:IN_FEATURES, 768} + output_size: &NUM_CLASSES 1 + projected_input_size: 128 + criterion: torch.nn.BCEWithLogitsLoss + optimizer: + class_path: torch.optim.AdamW + init_args: + lr: ${oc.env:LR_VALUE, 0.001} + betas: [0.9, 0.999] + lr_scheduler: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: *MAX_EPOCHS + eta_min: 0.0 + metrics: + common: + - class_path: eva.metrics.AverageLoss + - class_path: eva.metrics.BinaryClassificationMetrics +data: + class_path: eva.DataModule + init_args: + datasets: + train: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: &DATASET_ARGS + root: *DATASET_EMBEDDINGS_ROOT + manifest_file: manifest.csv + split: train + embeddings_transforms: + class_path: eva.core.data.transforms.Pad2DTensor + init_args: + pad_size: 10_000 + target_transforms: + class_path: eva.core.data.transforms.dtype.ArrayToFloatTensor + val: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: val + test: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: test + predict: + - class_path: eva.vision.datasets.Camelyon16 + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: 10_000 + width: 224 + height: 224 + target_mpp: 0.25 + split: train + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.Camelyon16 + init_args: + <<: *PREDICT_DATASET_ARGS + split: val + - class_path: eva.vision.datasets.Camelyon16 + init_args: + <<: *PREDICT_DATASET_ARGS + split: test + dataloaders: + train: + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 32} + shuffle: true + val: + batch_size: *BATCH_SIZE + test: + batch_size: *BATCH_SIZE + predict: + batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} diff --git a/configs/vision/owkin/phikon/offline/panda.yaml b/configs/vision/owkin/phikon/offline/panda.yaml new file mode 100644 index 00000000..32194554 --- /dev/null +++ b/configs/vision/owkin/phikon/offline/panda.yaml @@ -0,0 +1,128 @@ +--- +trainer: + class_path: eva.Trainer + init_args: + n_runs: &N_RUNS ${oc.env:N_RUNS, 5} + default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/owkin/phikon/offline/panda} + max_epochs: &MAX_EPOCHS ${oc.env:MAX_EPOCHS, 49} + callbacks: + - class_path: lightning.pytorch.callbacks.LearningRateMonitor + init_args: + logging_interval: epoch + - class_path: lightning.pytorch.callbacks.ModelCheckpoint + init_args: + filename: best + save_last: true + save_top_k: 1 + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} + mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + min_delta: 0 + patience: ${oc.env:PATIENCE, 8} + monitor: *MONITOR_METRIC + mode: *MONITOR_METRIC_MODE + - class_path: eva.callbacks.EmbeddingsWriter + init_args: + output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/panda} + dataloader_idx_map: + 0: train + 1: val + 2: test + metadata_keys: ["wsi_id"] + backbone: + class_path: eva.models.HuggingFaceModel + init_args: + model_name_or_path: owkin/phikon + tensor_transforms: + class_path: eva.core.models.networks.transforms.ExtractCLSFeatures + logger: + - class_path: lightning.pytorch.loggers.TensorBoardLogger + init_args: + save_dir: *OUTPUT_ROOT + name: "" +model: + class_path: eva.HeadModule + init_args: + head: + class_path: eva.vision.models.networks.ABMIL + init_args: + input_size: ${oc.env:IN_FEATURES, 768} + output_size: &NUM_CLASSES 6 + criterion: torch.nn.CrossEntropyLoss + optimizer: + class_path: torch.optim.AdamW + init_args: + lr: ${oc.env:LR_VALUE, 0.001} + betas: [0.9, 0.999] + lr_scheduler: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: *MAX_EPOCHS + eta_min: 0.0 + metrics: + common: + - class_path: eva.metrics.AverageLoss + - class_path: eva.metrics.MulticlassClassificationMetrics + init_args: + num_classes: *NUM_CLASSES +data: + class_path: eva.DataModule + init_args: + datasets: + train: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: &DATASET_ARGS + root: *DATASET_EMBEDDINGS_ROOT + manifest_file: manifest.csv + split: train + embeddings_transforms: + class_path: eva.core.data.transforms.Pad2DTensor + init_args: + pad_size: &N_PATCHES 1000 + val: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: val + test: + class_path: eva.datasets.MultiEmbeddingsClassificationDataset + init_args: + <<: *DATASET_ARGS + split: test + predict: + - class_path: eva.vision.datasets.PANDA + init_args: &PREDICT_DATASET_ARGS + root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment + sampler: + class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler + init_args: + max_samples: *N_PATCHES + width: 224 + height: 224 + target_mpp: 0.5 + split: train + image_transforms: + class_path: eva.vision.data.transforms.common.ResizeAndCrop + init_args: + size: ${oc.env:RESIZE_DIM, 224} + mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} + std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} + - class_path: eva.vision.datasets.PANDA + init_args: + <<: *PREDICT_DATASET_ARGS + split: val + - class_path: eva.vision.datasets.PANDA + init_args: + <<: *PREDICT_DATASET_ARGS + split: test + dataloaders: + train: + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 32} + shuffle: true + val: + batch_size: *BATCH_SIZE + test: + batch_size: *BATCH_SIZE + predict: + batch_size: &PREDICT_BATCH_SIZE ${oc.env:PREDICT_BATCH_SIZE, 64} diff --git a/tests/eva/vision/test_vision_cli.py b/tests/eva/vision/test_vision_cli.py index c09a1cae..174f0c32 100644 --- a/tests/eva/vision/test_vision_cli.py +++ b/tests/eva/vision/test_vision_cli.py @@ -28,6 +28,8 @@ "configs/vision/owkin/phikon/offline/crc.yaml", "configs/vision/owkin/phikon/offline/mhist.yaml", "configs/vision/owkin/phikon/offline/patch_camelyon.yaml", + "configs/vision/owkin/phikon/offline/panda.yaml", + "configs/vision/owkin/phikon/offline/camelyon16.yaml", ], ) def test_configuration_initialization(configuration_file: str, lib_path: str) -> None: From 094b9e4d635ec5c3c7582eb31851a3825b93998c Mon Sep 17 00:00:00 2001 From: roman807 Date: Tue, 11 Jun 2024 14:14:49 +0200 Subject: [PATCH 25/29] update 360-aggregated-feature before PR to main (#527) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Updated developer guide (#418) * Update `TotalSegmentator2D` dataset to fetch all the slices (#416) * Move metrics to CPU when using single device (#446) * Remove total segmentator classification dataset (#450) * updated eva logo (#454) * updated eva logo * renamed files * Update actions/checkout digest to a5ac7e5 (#458) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> * Add configuration logger (#466) * Update `README` with paper citation (#474) * update docs (#482) * Update img shields of README (#480) * Fix `torch` and `jsonargparse` versions (#483) * update depedencies * update * bump micro version (#486) * update config links (#487) * Update paper citation format (#489) * Update the vision dataset return types to `tv_tensors` (#478) * Refactor embeddings writer (#461) * fixed phikon configs (#493) * Refactor embeddings datasets (#495) * Add doc tests and minor fixes (#492) * support setting download as env-variable (#514) * updated confis and doc * typo * update datasets * fixed types * src/eva/core/callbacks/writers/embeddings/base.py * formatting * types --------- Co-authored-by: Nicolas Känzig <36882833+nkaenzig@users.noreply.github.com> Co-authored-by: ioangatop Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/ci.yaml | 62 +- .github/workflows/docs.yaml | 2 +- .github/workflows/release.yaml | 3 +- README.md | 46 +- configs/vision/dino_vit/offline/bach.yaml | 7 +- .../vision/dino_vit/offline/camelyon16.yaml | 4 +- configs/vision/dino_vit/offline/crc.yaml | 7 +- configs/vision/dino_vit/offline/mhist.yaml | 9 +- configs/vision/dino_vit/offline/panda.yaml | 4 +- .../dino_vit/offline/patch_camelyon.yaml | 11 +- configs/vision/dino_vit/online/bach.yaml | 3 +- configs/vision/dino_vit/online/crc.yaml | 3 +- configs/vision/dino_vit/online/mhist.yaml | 5 +- .../dino_vit/online/patch_camelyon.yaml | 5 +- configs/vision/owkin/phikon/offline/bach.yaml | 9 +- .../owkin/phikon/offline/camelyon16.yaml | 4 +- configs/vision/owkin/phikon/offline/crc.yaml | 9 +- .../vision/owkin/phikon/offline/mhist.yaml | 11 +- .../vision/owkin/phikon/offline/panda.yaml | 4 +- .../owkin/phikon/offline/patch_camelyon.yaml | 13 +- configs/vision/tests/offline/panda.yaml | 2 +- .../vision/tests/offline/patch_camelyon.yaml | 9 +- .../vision/tests/online/patch_camelyon.yaml | 4 +- docs/DEVELOPER_GUIDE.md | 5 +- docs/images/eva-logo.png | Bin 11913 -> 18602 bytes docs/images/eva-stripes.png | Bin 3578 -> 5275 bytes docs/index.md | 5 +- docs/reference/core/callbacks.md | 2 +- docs/reference/vision/data/datasets.md | 1 - docs/user-guide/advanced/model_wrappers.md | 2 +- .../advanced/replicate_evaluations.md | 2 +- docs/user-guide/getting-started/how_to_use.md | 2 +- .../user-guide/tutorials/offline_vs_online.md | 4 +- noxfile.py | 3 - pdm.lock | 681 ++++++++++-------- pyproject.toml | 5 +- src/eva/core/callbacks/__init__.py | 5 +- src/eva/core/callbacks/config.py | 143 ++++ src/eva/core/callbacks/writers/__init__.py | 4 +- src/eva/core/callbacks/writers/embeddings.py | 269 ------- .../callbacks/writers/embeddings/__init__.py | 5 + .../callbacks/writers/embeddings/_manifest.py | 68 ++ .../core/callbacks/writers/embeddings/base.py | 172 +++++ .../writers/embeddings/classification.py | 112 +++ .../writers/{ => embeddings}/typings.py | 4 +- src/eva/core/data/datasets/__init__.py | 4 +- .../data/datasets/classification/__init__.py | 8 + .../classification/embeddings.py | 11 +- .../classification/multi_embeddings.py | 6 +- .../{embeddings/base.py => embeddings.py} | 67 +- .../core/data/datasets/embeddings/__init__.py | 13 - .../embeddings/classification/__init__.py | 10 - src/eva/core/loggers/__init__.py | 6 +- src/eva/core/loggers/experimental_loggers.py | 8 + src/eva/core/loggers/log/__init__.py | 5 + src/eva/core/loggers/log/parameters.py | 64 ++ src/eva/core/loggers/log/utils.py | 13 + src/eva/core/models/modules/module.py | 26 +- src/eva/core/models/modules/typings.py | 2 +- src/eva/vision/data/datasets/__init__.py | 2 - src/eva/vision/data/datasets/_utils.py | 6 +- .../data/datasets/classification/__init__.py | 2 - .../data/datasets/classification/bach.py | 25 +- .../data/datasets/classification/base.py | 35 +- .../datasets/classification/camelyon16.py | 14 +- .../data/datasets/classification/crc.py | 25 +- .../data/datasets/classification/mhist.py | 25 +- .../data/datasets/classification/panda.py | 14 +- .../datasets/classification/patch_camelyon.py | 29 +- .../classification/total_segmentator.py | 213 ------ .../data/datasets/classification/wsi.py | 4 +- .../segmentation/total_segmentator.py | 109 +-- src/eva/vision/data/datasets/wsi.py | 8 +- .../data/transforms/common/resize_and_crop.py | 13 +- src/eva/vision/utils/io/__init__.py | 3 +- src/eva/vision/utils/io/image.py | 23 +- .../semantic_labels/masks.nii.gz | Bin 0 -> 79 bytes .../semantic_labels/masks.nii.gz | Bin 0 -> 79 bytes .../semantic_labels/masks.nii.gz | Bin 0 -> 79 bytes .../callbacks/writers/embeddings/__init__.py | 1 + .../test_classification.py} | 2 +- .../classification/__init__.py | 0 .../classification/test_embeddings.py | 5 +- .../classification/test_multi_embeddings.py | 2 +- .../core/data/datasets/embeddings/__init__.py | 1 - .../data/datasets/classification/test_bach.py | 11 +- .../classification/test_camelyon16.py | 8 +- .../data/datasets/classification/test_crc.py | 9 +- .../datasets/classification/test_mhist.py | 9 +- .../datasets/classification/test_panda.py | 7 +- .../classification/test_patch_camelyon.py | 9 +- .../classification/test_total_segmentator.py | 63 -- .../segmentation/test_total_segmentator.py | 6 +- tests/eva/vision/data/datasets/test_wsi.py | 2 +- .../transforms/common/test_resize_and_crop.py | 28 +- 95 files changed, 1426 insertions(+), 1260 deletions(-) create mode 100644 src/eva/core/callbacks/config.py delete mode 100644 src/eva/core/callbacks/writers/embeddings.py create mode 100644 src/eva/core/callbacks/writers/embeddings/__init__.py create mode 100644 src/eva/core/callbacks/writers/embeddings/_manifest.py create mode 100644 src/eva/core/callbacks/writers/embeddings/base.py create mode 100644 src/eva/core/callbacks/writers/embeddings/classification.py rename src/eva/core/callbacks/writers/{ => embeddings}/typings.py (89%) create mode 100644 src/eva/core/data/datasets/classification/__init__.py rename src/eva/core/data/datasets/{embeddings => }/classification/embeddings.py (87%) rename src/eva/core/data/datasets/{embeddings => }/classification/multi_embeddings.py (94%) rename src/eva/core/data/datasets/{embeddings/base.py => embeddings.py} (91%) delete mode 100644 src/eva/core/data/datasets/embeddings/__init__.py delete mode 100644 src/eva/core/data/datasets/embeddings/classification/__init__.py create mode 100644 src/eva/core/loggers/experimental_loggers.py create mode 100644 src/eva/core/loggers/log/__init__.py create mode 100644 src/eva/core/loggers/log/parameters.py create mode 100644 src/eva/core/loggers/log/utils.py delete mode 100644 src/eva/vision/data/datasets/classification/total_segmentator.py create mode 100644 tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz create mode 100644 tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz create mode 100644 tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz create mode 100644 tests/eva/core/callbacks/writers/embeddings/__init__.py rename tests/eva/core/callbacks/writers/{test_embeddings.py => embeddings/test_classification.py} (99%) rename tests/eva/core/data/datasets/{embeddings => }/classification/__init__.py (100%) rename tests/eva/core/data/datasets/{embeddings => }/classification/test_embeddings.py (91%) rename tests/eva/core/data/datasets/{embeddings => }/classification/test_multi_embeddings.py (98%) delete mode 100644 tests/eva/core/data/datasets/embeddings/__init__.py delete mode 100644 tests/eva/vision/data/datasets/classification/test_total_segmentator.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 01998020..7b6cf4ed 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,30 +7,11 @@ on: branches: jobs: - security: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 - - name: Perform gitleaks checks - run: | - # Download and check - curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_linux_x64.tar.gz - curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_checksums.txt - shasum -a 256 --ignore-missing --quiet -c gitleaks_8.18.2_checksums.txt - if [ $? != 0 ]; then exit 1; fi - # Extract gitleaks - tar -zxvf gitleaks_8.18.2_linux_x64.tar.gz gitleaks - # Run gitleaks - ./gitleaks detect \ - --config .gitleaks.toml \ - --gitleaks-ignore-path .gitleaksignore \ - --no-git quality: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Setting up PDM uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 with: @@ -60,7 +41,7 @@ jobs: sudo add-apt-repository ppa:openslide/openslide sudo apt install -y openslide-tools - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: lfs: true - name: Setting up PDM @@ -74,3 +55,42 @@ jobs: python-versions: ${{ matrix.python-version }} - name: Executing unit tests run: nox -s test + docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - name: Setting up PDM + uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 + with: + python-version: "3.10" + architecture: x64 + - name: Setting up nox + uses: wntrblm/nox@5656fcedc31a1ea37d016e4d94d00185330cc528 # 2024.04.15 + with: + python-versions: "3.10" + - name: Configure Git Credentials + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + - name: Building docs + run: nox -s docs -- deploy --update-aliases dev + security: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - name: Perform gitleaks checks + run: | + # Download and check + curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_linux_x64.tar.gz + curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_checksums.txt + shasum -a 256 --ignore-missing --quiet -c gitleaks_8.18.2_checksums.txt + if [ $? != 0 ]; then exit 1; fi + # Extract gitleaks + tar -zxvf gitleaks_8.18.2_linux_x64.tar.gz gitleaks + # Run gitleaks + ./gitleaks detect \ + --config .gitleaks.toml \ + --gitleaks-ignore-path .gitleaksignore \ + --no-git diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 7805b489..e540ccd2 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -13,7 +13,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: fetch-depth: 0 - name: Setting up PDM diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b3214f57..07bf4275 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -2,6 +2,7 @@ name: Release on: + workflow_dispatch: push: tags: - "*" @@ -13,7 +14,7 @@ jobs: id-token: write contents: write steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Setting up PDM uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 with: diff --git a/README.md b/README.md index d30f8ac0..4cbbdf5f 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,18 @@
- +
+ + +

_Oncology FM Evaluation Framework by kaiko.ai_ [![PyPI](https://img.shields.io/pypi/v/kaiko-eva.svg?logo=python)](https://pypi.python.org/pypi/kaiko-eva) -[![docs](https://img.shields.io/badge/docs-latest-green)](https://kaiko-ai.github.io/eva/latest) -[![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license) +[![docs](https://img.shields.io/badge/📚_docs-latest-green)](https://kaiko-ai.github.io/eva/latest) +[![license](https://img.shields.io/badge/⚖️_License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license)
+[![paper](http://img.shields.io/badge/OpenReview-MIDL_2024-B31B1B.svg)](https://openreview.net/forum?id=FNBQOPj18N¬eId=FNBQOPj18N)

Installation • @@ -59,12 +63,12 @@ eva --version ## How To Use -_eva_ can be used directly from the terminal as a CLI tool as follows: +_`eva`_ can be used directly from the terminal as a CLI tool as follows: ```sh eva {fit,predict,predict_fit} --config url/or/path/to/the/config.yaml ``` -When used as a CLI tool, `_eva_` supports configuration files (`.yaml`) as an argument to define its functionality. +When used as a CLI tool, _`eva`_ supports configuration files (`.yaml`) as an argument to define its functionality. Native supported configs can be found at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs) directory of the repo. Apart from cloning the repo, you can download the latest config folder as `.zip` from your browser from [here](https://download-directory.github.io/?url=https://github.com/kaiko-ai/eva/tree/main/configs). Alternatively, @@ -98,7 +102,7 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate ## Benchmarks -In this section you will find model benchmarks which were generated with `_eva_`. +In this section you will find model benchmarks which were generated with _`eva`_. ### Table I: WSI patch-level benchmark @@ -129,15 +133,15 @@ over 5 runs, with an average standard deviation of ±0.003._
_References_: -1. _"Emerging properties in self-supervised vision transformers”_ -2. _"Benchmarking self-supervised learning on diverse pathology datasets”_ -3. _"Scaling self-supervised learning for histopathology with masked image modeling”_ -4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_ -5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_ +1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294) +2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690) +3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1) +4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474) +5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217) ## Contributing -_eva_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md) +_`eva`_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md) and [contributing guide](./docs/CONTRIBUTING.md) for help on how to do so. All contributors must follow the [code of conduct](./docs/CODE_OF_CONDUCT.md). @@ -162,7 +166,23 @@ Our codebase is built using multiple opensource contributions

---- + +## Citation + +If you find this repository useful, please consider giving a star ⭐ and adding the following citation: + +```bibtex +@inproceedings{kaiko.ai2024eva, + title={eva: Evaluation framework for pathology foundation models}, + author={kaiko.ai and Ioannis Gatopoulos and Nicolas K{\"a}nzig and Roman Moser and Sebastian Ot{\'a}lora}, + booktitle={Medical Imaging with Deep Learning}, + year={2024}, + url={https://openreview.net/forum?id=FNBQOPj18N} +} +``` + +
+
diff --git a/configs/vision/dino_vit/offline/bach.yaml b/configs/vision/dino_vit/offline/bach.yaml index 3d1dd721..926371db 100644 --- a/configs/vision/dino_vit/offline/bach.yaml +++ b/configs/vision/dino_vit/offline/bach.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 400 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/bach dataloader_idx_map: @@ -89,12 +90,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/bach split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/3632035 # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/offline/camelyon16.yaml b/configs/vision/dino_vit/offline/camelyon16.yaml index 29a55c1d..c165b37e 100644 --- a/configs/vision/dino_vit/offline/camelyon16.yaml +++ b/configs/vision/dino_vit/offline/camelyon16.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 10} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16} dataloader_idx_map: @@ -98,7 +98,7 @@ data: predict: - class_path: eva.vision.datasets.Camelyon16 init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + root: ${oc.env:DATA_ROOT, ./data/camelyon16} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/dino_vit/offline/crc.yaml b/configs/vision/dino_vit/offline/crc.yaml index 1790d610..56985ee2 100644 --- a/configs/vision/dino_vit/offline/crc.yaml +++ b/configs/vision/dino_vit/offline/crc.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 24 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/crc dataloader_idx_map: @@ -89,11 +90,11 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/crc split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/dino_vit/offline/mhist.yaml b/configs/vision/dino_vit/offline/mhist.yaml index 2fac6964..77cd7cde 100644 --- a/configs/vision/dino_vit/offline/mhist.yaml +++ b/configs/vision/dino_vit/offline/mhist.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 51 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/mhist dataloader_idx_map: @@ -78,7 +79,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -89,7 +92,7 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 600bfb12..57f34696 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 8} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/panda} dataloader_idx_map: @@ -98,7 +98,7 @@ data: predict: - class_path: eva.vision.datasets.PANDA init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment + root: ${oc.env:DATA_ROOT, ./data/panda/prostate-cancer-grade-assessment} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/dino_vit/offline/patch_camelyon.yaml b/configs/vision/dino_vit/offline/patch_camelyon.yaml index 1a6a7b98..bb9fa5d3 100644 --- a/configs/vision/dino_vit/offline/patch_camelyon.yaml +++ b/configs/vision/dino_vit/offline/patch_camelyon.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 9 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/patch_camelyon dataloader_idx_map: @@ -79,7 +80,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -95,12 +98,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/patch_camelyon split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1494286 # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/online/bach.yaml b/configs/vision/dino_vit/online/bach.yaml index 6171eda2..a72689e1 100644 --- a/configs/vision/dino_vit/online/bach.yaml +++ b/configs/vision/dino_vit/online/bach.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -76,7 +77,7 @@ data: # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/online/crc.yaml b/configs/vision/dino_vit/online/crc.yaml index f90c0cfc..102f3b44 100644 --- a/configs/vision/dino_vit/online/crc.yaml +++ b/configs/vision/dino_vit/online/crc.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -75,7 +76,7 @@ data: # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/dino_vit/online/mhist.yaml b/configs/vision/dino_vit/online/mhist.yaml index cf4c6770..62b659c1 100644 --- a/configs/vision/dino_vit/online/mhist.yaml +++ b/configs/vision/dino_vit/online/mhist.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &LIGHTNING_ROOT ${oc.env:LIGHTNING_ROOT, logs/dino_vits16/online/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -69,14 +70,12 @@ data: init_args: &DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.MHIST init_args: diff --git a/configs/vision/dino_vit/online/patch_camelyon.yaml b/configs/vision/dino_vit/online/patch_camelyon.yaml index 0f3d2e2c..f594a3ee 100644 --- a/configs/vision/dino_vit/online/patch_camelyon.yaml +++ b/configs/vision/dino_vit/online/patch_camelyon.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -74,14 +75,12 @@ data: # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.PatchCamelyon init_args: diff --git a/configs/vision/owkin/phikon/offline/bach.yaml b/configs/vision/owkin/phikon/offline/bach.yaml index 35fe73e6..12ad9c50 100644 --- a/configs/vision/owkin/phikon/offline/bach.yaml +++ b/configs/vision/owkin/phikon/offline/bach.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 400 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/bach dataloader_idx_map: @@ -75,8 +76,6 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,12 +86,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/bach split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/3632035 # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/owkin/phikon/offline/camelyon16.yaml b/configs/vision/owkin/phikon/offline/camelyon16.yaml index f7aca73f..d44bbc58 100644 --- a/configs/vision/owkin/phikon/offline/camelyon16.yaml +++ b/configs/vision/owkin/phikon/offline/camelyon16.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 10} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/camelyon16} dataloader_idx_map: @@ -94,7 +94,7 @@ data: predict: - class_path: eva.vision.datasets.Camelyon16 init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + root: ${oc.env:DATA_ROOT, ./data/camelyon16} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/owkin/phikon/offline/crc.yaml b/configs/vision/owkin/phikon/offline/crc.yaml index a1abcca6..c823aea6 100644 --- a/configs/vision/owkin/phikon/offline/crc.yaml +++ b/configs/vision/owkin/phikon/offline/crc.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 24 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/crc dataloader_idx_map: @@ -75,8 +76,6 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,11 +86,11 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/crc split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/owkin/phikon/offline/mhist.yaml b/configs/vision/owkin/phikon/offline/mhist.yaml index a4dbf234..f4dce943 100644 --- a/configs/vision/owkin/phikon/offline/mhist.yaml +++ b/configs/vision/owkin/phikon/offline/mhist.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 51 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/mhist dataloader_idx_map: @@ -73,10 +74,10 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,7 +88,7 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/owkin/phikon/offline/panda.yaml b/configs/vision/owkin/phikon/offline/panda.yaml index 32194554..462c2b53 100644 --- a/configs/vision/owkin/phikon/offline/panda.yaml +++ b/configs/vision/owkin/phikon/offline/panda.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 8} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/panda} dataloader_idx_map: @@ -93,7 +93,7 @@ data: predict: - class_path: eva.vision.datasets.PANDA init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment + root: ${oc.env:DATA_ROOT, ./data/panda/prostate-cancer-grade-assessment} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml index be2bc4a7..8d27ba00 100644 --- a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml +++ b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 9 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/patch_camelyon dataloader_idx_map: @@ -74,10 +75,10 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -93,12 +94,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/patch_camelyon split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1494286 # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/tests/offline/panda.yaml b/configs/vision/tests/offline/panda.yaml index 5cbd0456..28844dd1 100644 --- a/configs/vision/tests/offline/panda.yaml +++ b/configs/vision/tests/offline/panda.yaml @@ -7,7 +7,7 @@ trainer: limit_train_batches: 2 limit_val_batches: 2 callbacks: - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT}/panda dataloader_idx_map: diff --git a/configs/vision/tests/offline/patch_camelyon.yaml b/configs/vision/tests/offline/patch_camelyon.yaml index b9155881..16286058 100644 --- a/configs/vision/tests/offline/patch_camelyon.yaml +++ b/configs/vision/tests/offline/patch_camelyon.yaml @@ -7,7 +7,8 @@ trainer: limit_train_batches: 2 limit_val_batches: 2 callbacks: - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ConfigurationLogger + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT}/patch_camelyon dataloader_idx_map: @@ -71,7 +72,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -83,7 +86,7 @@ data: root: ${oc.env:TESTS_ROOT, tests/eva}/assets/vision/datasets/patch_camelyon split: train download: false - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/tests/online/patch_camelyon.yaml b/configs/vision/tests/online/patch_camelyon.yaml index 52c3b466..073fb82a 100644 --- a/configs/vision/tests/online/patch_camelyon.yaml +++ b/configs/vision/tests/online/patch_camelyon.yaml @@ -42,13 +42,11 @@ data: root: ${oc.env:TESTS_ROOT, tests/eva}/assets/vision/datasets/patch_camelyon split: train download: &DOWNLOAD_DATA false - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.PatchCamelyon init_args: diff --git a/docs/DEVELOPER_GUIDE.md b/docs/DEVELOPER_GUIDE.md index 92f562ad..a7f97bc1 100644 --- a/docs/DEVELOPER_GUIDE.md +++ b/docs/DEVELOPER_GUIDE.md @@ -17,10 +17,7 @@ Add a new dependency to the `core` submodule:
`pdm add ` Add a new dependency to the `vision` submodule:
-`pdm add -G vision ` - -After adding a new dependency, you also need to update the `pdm.lock` file:
-`pdm update` +`pdm add -G vision -G all ` For more information about managing dependencies please look [here](https://pdm-project.org/latest/usage/dependency/#manage-dependencies). diff --git a/docs/images/eva-logo.png b/docs/images/eva-logo.png index 7c0f7eaef2eac73e1eaf7a8ca47adb42d9d344e3..c31d3ffe900c1c9f464e516cffbf9e11de42d1a2 100644 GIT binary patch literal 18602 zcmeEu_&FoxnNJ>Ce9H`ieVJ$xU8 zREc?#o;9k$LZgmIUaMs|ew3zXk{0nI^WwV5D316v=Zfy)rUQPthMd(H{rp)>NvNjF z^!?NH*aZ$zPD2Pz3t$FYSRYX$4o}?4P4gBmL%V%4|NgC8m2VwY@D0Rm)#Wl=wE_Sg zIzr+9{l*m8PW8XvlyB|-HwN%nmmK_(s~;Vv|KAKW!~cs0*`#v0X7MeQ2K)X+FEt7E zmMMqgc~VfhbkLL?*DFH|dU*uLO6i{+obX%voOb6h8qMF5psP^4upT5Fl==4t+1dKf z4a=j4&8z^>P7-C&O?^BnT~Mrf;SR4OzLfsgmpUnOPb_=4_(wEa_`~0e@Z}p{*S2dA z$-`wv_Z(UdirsDk!0XIvCip}mr|s$;{#X6-(+VAL7Vd1F z>)lKaud1>Am%Y}9QM=p^f8+JjCh$f)OBzeyZt<&2PvM|ilC_OzQ{YnW!)dU4_MT~z z{7HJL;%0RPLp6orTeNNRS+M;68ru=o+^G?q9ZsNTLA2cCXUl8VTc65JM%Ryi2KR;< zc%inPoLbtWAYf!#@8Y0T9XI{k1i}$Hu$CDKzmOu= z$fo!Lu2hxI(DwAUi{=<%)!Xx+GK{xPla-ZLq@}JkOVVm}dfpM|Kdr2B{Hwz|fkKwb zt%2Op(86Mm+mW0BuGtY}O1vv?51&zD3Koig{}iOEldf{9(xrVstnq1zHfhv!)%|*D zINmqN|M31F6|RpGA7<*0=h$3tL>JBr|J$RLZQVZl-4=Ow{no$5U$D{zQoiQo?|k(| zLC(b_V{_9&_VcGsh>6J$t8Xihe!u&BI&=Ica{`k(&jaws$R#_3BE|FvV{)je3Y+{r z!zVN#i9Tr2ni4_2Hwdq#9d=2o`mJa9R0L;&_C{Df<%!_*Lg*3Zv*`^V=W>R$S_1-L z@p>qZZ~*yaS+`wWmZrWyi`3l6ktNB`Du^kBE25F2eXl@y^$F>K^l!~`H~p3uTha?F zu_OW}{%uogp&hjum27Gcu5nW+_iQJd&W!L4Ig9df-|!D-ls7Fc`27ZB^}A?=`oodk zpNMq=pk0O#Z6;0A5Qf}xX!Vmq*;^$&Y{Mj`{^f>vkW4+r`_Xqh{JM3ahvX5 z%)XyTFOP;`1R4@@&mqIDQIxTfx2GCyq(_EPL)wP!OKDE!AO5oM=CADY`=f?Qfki@M zIOL_xKf|6RrHWiHu}bEcBp-t(E~PjBczmaMFy+VA{iv}qmwkBhHI$sCLfU_U+`D2b zLccr7+#8>f&dDX#S#3iw7-6y5osXG5EPCy&E-Rl(4ghuJ&1;raT^OBabPicf^(({J zQg`_2F^(65!@&GAPQn7AbjH}C65B5f;=8dfF89)rDr-JcUXGp>)?}+?vm?!=b4U33 zIX~`&LY<3&4P9T}XeUx+x}v)>qH`{neg{wVt$qTF`L2(KBGW=7%fuQFjIb9~M& z?i}ahf_X8Hq`bpz8krVy_qD!7d28;Z=$ANs!I&3w)ie&vU)S=S%_fYVqqFC^T0D=l zM(vE{fShZ@a8}8>G^$+@#9cM208}f!hvoD){AOd;*tu_nVdR4R|s`{ELz z76UGPHLa$3>J3J_{QK2E&nn29D~SysnX9T!FzN>6E4T|fi=u|PB&YXIMdI=BPgxxm z)C7P|i28Sd4fQ5hZT;b6?db>32Hs~!R*>U>09og5GviWQpt9H; z1)*h2ns&fOW|L+r0 zzwPd+sf-VA^F-SUpSGpd&Slry`S&-nWmyuxCU0%$Ovsl-5&+5@PK(OJ z6I_0fq_=1+>K=PfdKOxp6SHviFdCLMkLXfD(0MVJd{%G!E%Ns;CZ$T;%GVUqAf?O# zg!Fld?BCwl$}#z76yT#DV=EdmninjfJ?h!z@{mM84SGhY`(zg_QuDM{*E*?}7)GM; z69OIgaV}AF#h;mQ5CLz8KezX}z@1q&iD7fzFCa;a2<%pgoRq$dqr#J>X-%it;EuON zneG5NGm3Z{kCI?X1^=P9q^w-KMSb>>Jw7sgA7ZkEAg2)>VP8XITbJAKiL+d4XL zC?A>F_b1tX*vuXCQ0@r&iP2GxadIt2q1&0N@hd6p_tbg8D8E@{e!h3o`ki*ylm-^taY0|1z(Fl4TPu=KJ zID7|K;vBy1r_Je_cUEAf^mei4n_;3KL|aNO2wkjBWnCI6 zh4W6F_OdGVoQ9V;!_9ZGc*mow!}0nykuXbK{d=z`>R6md9Cvh8238#J9Z{#}~uVu_8vEi09?SzI#z;A8f2!R|rWfHgmd(};Y0Kt@|#DyY8QkCU3S?5-<&Mnmr z^-9HP1VUmpdG5z`2M1z+sUWqTUy$9$SnP#W$%$#2;}3NCBiQ|)l=Y#)@GfV&n$)h2 z(JIC0bVpKr+bDzBM8E6}0^pn54d3Z>&*0^R&3kFeJS?s$iJYu`wnUMO+eOlK;;paE z2-n6vS(2mh79>Rwq@YzeP2Kw}8QM|&@9s#nCe5b>aHC?j+xzA>(v_FhwJR0t$~=%N zM&l!M4sj=Pu2iYvFwvd!TfAw=t{^K1r|^p8Ry%Nvq{-Iw7o#^T6Q7Tew1dnIa)C z`%C~ty0iJtU1$WFg8Rm$SC%#~Xjy^VjpHVzYHEi1QQITF%8p)qM8EUu{ZnD1W@S|I zH8+x8!z$Ov`1uAiVJIt*BTzwzmPK#d{4vBLNy>)y1`khRqO;79k3#Z1QjGa~RP|Bd zQPqGy-JK4Uv&v1=zus2N&ZQxtmWxZUXo;<(oDM&F13tR1)7$xnw$lo@^i&UZbSkcY zG;{p4?d_T0Z6#pm9JlGdaC_sccfs${T+U}Pp*|f-CgRe?4__n|>E_Qwz~3{nUwsR` zuXyKXo3@3}OBI4jx|(UW2zcGn?$i1&<0kJy#ofgY%h?PS7+As_y~@VQmC)a%IRKsH zNiulNG%JQECn-K^`d@uXXcbIf^AA?MC}s^tkn+7|KCs0uE;6#RG6w9H9trPEaj!^> zH0cHa_3^Ftm`W)r(M+|%{#)jhFRK`FoeJkIAGk#=Qo!3Gbz;s(eWBjD!&<=L9tS+m*M}!<+ z_6hj|^@w;_D#JJ-Ksnyqz740OCU06CaVG0R>Y}q}*kROYZ6hr$#Mg$aSR_j+L1+7u z8-E1MzUqs&U0l)rMXv=B5CSQuYHmICoYjiyqPJpgZtU5K72=S)#)9-KJz!sv(rjmg zt~zzid_05IDNaRBY77lXj`$d5KIAO0paMcVFns&f5?!cbR}x9c@159q4c5H(8P5_P z<=)e(+UJq>hZk_nRt=0WxH9&wc2c-002{wcqprAQ*yU0#{6KZG^wAK zbb83@NHTV{y!?^ug%`cl#>c~zTdSi zzB>9q34b-_Z{kzW+_hJ`Elt-Qv)+f?nR2ST&w5aK-feZKKdut-Yv=4f7usRI+w`Aa z&8-xi1Z&Cm+9NeY;?em*}so(>?R;PmH1y&FutSqS$_PF|0ki#b51v|R%#D3fD(#|s`ToL=9^5+B)6W!unjZyc{}H8}(oF1Ic^TfP*Tar8{`mc&;g zB=+TXyCFXhzDF9l{~J&Hio=3CgD^uZCi#3cmdx6gT%j~U9$TZ8x ziLFlFn_a|1_VQ}=fs>0-e%xW&igMubn!B&P0*&~#81VShB#ZAfhx5y&RMs~RR>yI5 z@3#2{=sYgI%Y2slgYv5DTQWMYp>6x)`DYXT>KZ!I6AY{hZ_e`B%wp1~2Hp=lNG_GkZCamei zsbw;@7yxo46%m$HsuOb+uhn9x_>W&r5y=aGi3W2gf+fuhoqJQpvizGFPkF;AXYJ6o0wgq=MU;A6z#$5wbmo;$13fezhAIaguj!r+ z%ye7Tl|o@gRU|LZWzPHYPNTFk?xMCt70&UQZtq=J`r5^`Fj?}86ylI#8*?P$|k1K2BOC>)U%;B&{i|O7* zBe&T)eINlati@09pWofJ2L^;{>PNml}SQ62){&ZxEhsGgSOd;ta127c z@$;svZkrVTk#Ip z@Z7Dj6@1Euxj5YYAP;8lVyZ1zE@RZta#8W`S{>GA_sYk9E=FXWXc%|2HiZ)FPv56c zz97K#U)I=hF=>X7QvUL8Hr%U3``@{wmo3B>=Jd%>pB0)ED%C&7lK>&LW~Nu;7_7|-p&BQP7ah@x8OaOhGHANF%t`yD zyA%FgmPRC z2;Y4A5)yD-~QtzWD6exI>lnRH~q2+c4 z-dNR&lMnL$jk9XzyJ#a*n5!Ps-C@iTLL!jSJ|*P0bp5EI8A<@VGv_tY_HoWo6wSAl znepa}_RX-fsQ~k%iCSqEp^uOa%1m)b!C02E=1c1-X47w|l-)b3GI#X>z|w(*+c&9J zU)t>9DCsafUv&v4K2rQ`C)u!Zp}+l!h(E*092hQyk|+7wq=B&^yeRY6ixYN1_wYgE zcP|8E!BxN;zWs7P6_YdGvxO1@i{F%p&`W*wuu-i|Cparr*GZpe3r&mz{q>c_>)l{l zawd|h)hm}<+lT5kk>UAi!HPo631gCeX`Uw7TY3H1&9{Wk^&b-gmJ}@Pz7u?Q8wf^Z zm8Dlf!Eg-0NipbndrW2=8|&wp-;QELU3%W=crpgPH&-@e_5I-G-_O>zsLee@cbufo zHUz?T8Uh0Q)l%0BcRos#0}(J9W@i64d}ubAf_9i$K1SKh+GOi<&6&Ucnh)%b)^WMI zwH51O<7{>3UA>LtPvk8CV4NCB48Kc_8nR+72P%>n{8!r}_|Vq3&eJ_R2#CMA>tj>q z`?{JFGSO@XZG|7`kj!6f4Ozq!nSc;+Gqpu#UYq_ek~B^BByO%O{yk?}51{ZHVl}S= z<+7Na;|jko)#U${ikh$}!zdmSH`tkJ{Zsry#%y$J1a@VHhY zY~(`w+W&rc$-((ex#XWK&$+OI^{hd1@zzT`emX&lywA)yc&v;XJT{=)o{z@Bz8p=^ z#fc1K@sHLKuvG*f*KC%dN)nEY#nPYSDo2RIT_n@myq}eg!iZ)GyACJm&3)^y(6X1< zz5bFGXs4dz+vm$`l#K`$@z5u~N|| zwz5=Ty~KbFu!OmA`X*KSKEKuR#7vDmlaA&%3Fmj+z%OWc8*#_3X^uEKSp28Ana6GC zANA$7up$PDjpzQf0jg6Ia`RWW)i{NN48F&jpu6zr*xgsqOC>9s{x~V)wwRQN|MmHR z(#gKiyUuY!nzWY@uxT)AwF4KPr26pB%+G-9F5P?+%(Gmxgh0FL-2MH7R$q+r@(k~n z%bpLZ7B0u`jh0SIiPT?gnHmDF6(@#8+4!iHcv)x>J3Np1+x=GBJUX#JW_LG#|7OeJ z=5qj8OmkuK^@Gb6DK7_@iBV=kk1^X=Y}Mb$;jdz%Z_VdU3uAU+Uy_y%>py+4g&g9p z8~&?$fojTYz1<%{!5FmXO-%%}qXxKq8_;{G<9Eu|b*sse=<>Lvy)ky5erOV_=hUA! z@u;Q`zXvjIWaxa|sW2Uqd<}@scZ&i*rMf2>+-l4K=QsH=Q_bCUz@ZwHut zub>gRIUwVuDt4^*bz&J-cM$*MTDkj;|K6B(>-8tsf>ECYujj7rT5taR=H1@kNMhtb zs7oUPnvc+v1P)!T$$W39VqUPdjqP4SW4gFDkHtUKd}}SfSy<|%l~rt5k6-#+*tg~v zK3M7U#oPF%=?EU2JhFnzSG?w7?FwCaY`q${rG-pj@GyJIYPayIbeJ{3p3U-lssU73Hf_+9HrvM`A~yEA-i4Ey z4(b~-6g>LrUQC_ct%>IFiZWE`kC{{ffI*+j_P!AZs3^+|gMV#ss`{{QK4H`4c(Ixo zbHO8U#+GKNsA$<=>k)Sn0dQS!?%gcJvH>BY70kYVnUU{(p7{{T!0SK!*GtPCJ3K^2 zyFZ2fP?LB$&lsOE*>B+)Ak71Ie1H~x1|Iby1PPqm&T2)!w4T15lanTx`|4&1y|k9n z|CdLmwCyWBEgXdI-;>V&)V3!2yeNsj?wy@!*DPotfZd1`4v(H_;(Qv9{qa?yt*m@x zZg`$M0XDPJ;2V$ngq%|`6T_nILx zmM1fnOY=`wo>3Y~`sTo7Nxq*2?b!!&u~%p(7Bx~){n)<=zT?Sjz3qKAGCUKo`dq$7 z1pN<8S5!A;OImw1RF-S-M{? z43OLNUbgLyy+sx=2t>7VwfeHv$zerL?)Cqa?i>4qO`Iy|%!4xSg8Z8~x3AQkAQx3V ziBGwE(jMifPQ&;wFSlw)O2y=)9POU}^u)AfN(6F4U|IJD?r|mOG|20$k^mdc8boNm z)28=pYOb@%O_P%8B*)&fbhhQ36?pc55j&{*vouFr2#4&o(tpMXpzD-QO!y^?|rBueMZ*v+G|OjwQJ|Ba&(hjV@m>?q{+VmO8TX;MXvQmVb){_9R)E%U5-df!5ih;QbJaPSjFr@Kpmf!c?4++bb(kuL)E~Crg z(J>?Lz4@%kH8qdc ztbEEAHonm+N2moFOeD#In0!qFLh8oiN8>_DCq?UjY&M8r59WeI^jmYNe$1leXXL`Z zXZVmYw6@Tb@|&|V9Ew0?b%U6nwLE3BY%N)V;CxlcT{Vb)1^!3TKV`FJwhFxD=%KTy ztF-0j6+&_#TKeJL3MaCos5sfcx^s)9Op%LxfB=}&&pc!_^SD#zSCGk1pt0x@j~h4g zUaN-w#b=U@y@~1=g(l@ARg^w>YSMBI{>;;(MU0jnct;$I06F^L+-+ z>mJMua$06>tOzJ46Wt}5Du%y8ql z@6Y1qul5tRi#lN(#$-bKzHO`@Y2ev}B|Zm6o^Sf8%J7&LzBNbyZ}yVpzB(j~%x3*Y!$ms$Dn-NPc6vJ4TFdcfmf z^2G3Y(smU>;WDkVK)HpYLB5-Yg*cqr;CS0cs2k#emz%yIYq5b3?4G2IuBjZMFh)bP zJ4T2C>>5ub>`G)g_WMGZfd505YXws2iRPj=cS@%}{~WW6%_*Nt0d7Azqr~ymmmNN3 z*TbgNZ&55S;z1~v>imQj@#!;fFv~lW;kf|eQNe4Kl!I9g55~{1ZWVOL{iY74wxq=L|5geLyD6%-&)A`O;Se?_POtT6_@umXyB zbEx_F`!RM`9+Gw!m#yoDuOgpV;A-)>P~MGq^5wNa2+j9U+#Si%Jwmt78@qIGp;UuP zk?js~5St12c*4csjh&kgDZh|P*4hTXxtSiNf@F^g0Lm2bV|lp=>Xq(%ZH?{ZnI@B} z|1z7GU-^)@#d>~hb++8-oEUD8eteS(xNlC=JbHU(f+7y65UVtSbToi3<;?#1eL3c} zH3vAyZY-|)7VmZAgjx+;cQ)vVzStIvd01cjxc15yW>FN6PvjYi!ZE%WT2Gv&kg9*=2Hr5c@e!8^_H0Rn<$(3Zf0iPXxT*JHt7s*)l=1CGec|> zfRy7L*hi5!4}0V*L=u6lNab4=HUP$)m(@d`t|}hLE%kDRy(l`~*5{eEi!6H0rG??! zA0f;QUhoD{8g9Phqsw`kyM_%)vbFtCnm&rxofwslRR6t7+_{Bz=%9#e&vVz~0dihK z{fqNEs2=-}*ja_=-?W33Wsc}>&jiek$mELsl**;&cKfdxvuALKh@bR;QzmLd$m#ku zi?4!9O2-DYD^LA1TY0WGaBFWT-Nb)gd=e{bL;$R!>;njgZ6+r-oPC3?;pv5zZR-_4 zbFVwKD7~h*!IxDEgm^#+bqb8PUs2)oRCjymWrE%jsSc);E@WiLW`s?K+s1RMciwc9 z97z}Hqylt=sG22+b0jxy&y6~;M_>w>^2IAJcb{H+!qr{f8D%d|L^Dw_mRb1H&Wx{!(T~xVBTN zI5%bxY?9&W-0GVw{j@})`X58`99qyp1;i8E-FliZ#%>pd{-F|!|3LfGTlHr|!&BRM z!ak)%kO^WCj01+%WPnzs*Cla*e7DL#)e21w<2O`3wo$2p%^ zqtDL#vi?Ds!PElSR?*ar*N zWuhSlXR_QRZw}ETvHd=4AZpz+v9JK{mvjm5ToU88QZn%04Wrws?i;M6{+gj^?Re9x zyI*s^)5x7iMOATutA!D45R!CrpHLl2?>nc$92CjeQ2tAKRxkQF?PdQSr6LdMCnB&- z>61kgCX|tA#EHcJ?snq|BOs^*bx;L!8!%J`%2+_k$~*_W%e+Z@r92?9CMfkTY%JVX zi1TU+W>6=kE}q-5UF#ndx6+rx?;MBCzjaxKWXZVFPmxr%$9%f|_|4yR+==sa|6Nz- zxJSr>Uj9=DyRssGL9br$nYx+<0WC}?b$J^~_~>T9Bk)<2VhFEQZ<`*3I)M?=O||8b z?2gc3!GAo2!oJnFKv($`+=RTxGRqxDSYeB#%J9$m$hP#?U_N-=uW!ex71(47|O;1=wTs3nl>gwn?^m zQ<~2v_)2O5R4PHon@ocVg{dOS%B#NCP6}st=cXgOa_GyyaWRuOFH#N}0gvC?I}nHP z$AT;?m>Ytyd=4QLS^@VAMRrAcGIDG2+ukMs&1lA)AIt(Q>TQ~yh0A{Kl___>dRDrv z%xH`+T1W~zI*7HBNRvDCi@~iu%mGh-MebHp`Wx}@#cV7u{Wt88MrBxDfse)1jXI1s z+uy5cfWGHB6ffe9wRZFb30Z#In$%gWxb)|!#)`!f$MUq~0DGV!xq`WkWswYl0GElv zlrEPlFTV2CC#~eZwe?As0{_4!u^rDgb}>=JNj~a(1nQUslJlt>ubhgT<*9dLEg_&& z!e=N=2P`@U?e^g~bHP-in0jBt9aZcRW_7Ib!#+HoQn-Gk_>u*5F&w5pOFLp9wgBRV za~x%BM=V^x+BfK9{^A~JXsUIG@c3Jc`!5(aG63LX=@xE~ui4j{v{D6n#B6JTUmFS{qPkt#(=YivXWI`0OKGPA?3YnCl9#%Vq77@W6cRP%k>Si$r#MQ|J66oDmRKnNgZ}vo`z@#&fco+Kg75=j5cM+z~yJEouV&I-gqF^xH5JGe-!hrx2l;ZtwQR z9If^1E&!7Qc=j>d(9(B2b0G>zJh>i6m-8bsR%hUvdX(1C%TXc8>NsOQ&@(Fcg&MhT z-QO4CJf_YeL2dA_sD`HAMlp@ZWek%8_vdsq zcvORk+GkY~I;W=h&#zt*&eutMeH3^$BVK8CQ`e4<$CP+}5jz?{r=HVSJRCU}0q^Go zhFLbFmdK<&2%Y<^KK__!b^IWtYEj?#&EH(~+oPjDr!65Yy--Y^HF)f5w@&%v&q+fX zpg}FkJS=yxK^4QS!ppTO$Sz}qwlSM4hgTN<^@yOp?v;3BA-cT}*Q07i7{XFHtvQHi zhZ^m=Aah0DO;PF}fmBp^!@7c$%{qJd;%HH6I%|Kop zJ+c!}TQ2%RUbNB7p#axpDd)%aXv?-buj$)4XhSb{5UQ|K4!yFv!Dd zUhmw(bxx4N7)lBh`=+|WQfl1+z-9S~hV=rD(1U&I6X7t;^TSk!~vIWd~~lAY?|8ra6tHmPqQ$qCeoQr`&S&7&$jlxZ>IBRvweHj||px z&uck;%#7&f6vn9Gjon0f>A`L#T9@~?$fUmP5CG01{+wNdQ8?Nn?2pnHW3oJOr$ zt{9YjbyLvJ<uE~6?1!6H}2;ZRde=fcb+Oc($3&iJno>q&+HQB0I8h5Qn6@ zK?2`f^EHWS$GJZNh8aL<24^7k1PE1OF{U64Yp^*wI3?8brpEG&JpXznOBN036>4z4 ze)&?g8=;{kCQ5-M5lDq*f7h6CD)ec{q$#J%KN;nP9Qu!X|NA^SJjz}6<1Xr0FU0Sm zKNG01GRDreu#QytAwFw0fA(^&4~0HaT) zpk@|SuMPG|6{;0p%n=J}GL8C*((|BEMJ)GmjF8+#V|yN|WEiyKYvnnPz0ujfWS0rw zL>UDla1mS0*%&0Jl(db&LdnSOkX1MOB2tI!^kO8N6oM8q*5jzX|5jfi)KYR>G}qgF z@Ye^@V_^qT665!nM>}h#R3IV)Wxb?wg*awRzEF!2W zJt&k7Y%W6c)53&@5YfN29CAyCGCjGjiCPp9FnkZ}$-O>wlyQw!^+G)6D;NT4Cj>B% zH=Fj-5WMH*x5rjHnhQg$KzueYvubX(rHj4)#q%^a&uSI zqU4g(h5K4j;_9qG;Q~%uW;r*DLjK5erV{LmYOd5V@F4))R(jh@9S}#@*JWPWe8RII z=K{aK!Gv!gcvTCjFReg4?zBkFPB0WK4t-m?z4?os@!rMKuczaQB?ldO|1FjbgkUvo z!s-|oN!>es&>vnL_;D7=Bv4D~lC1eOjPxrCkCmL@2VLe42(1=Gwl&qpV|)WGo6HH<==fB3W3P=Pzgd&?=FN0kujV)MTT5ig zWN9PBPX0demUiis*mWY!2bnQsd3)dXZ4vJ3zR2xE!@0Ypj!nxIgoSleV@xd=s^W)! zPi6AWDomq?a-_NHemIQL03pzN!^H`=y4SIaGuOhtdHb2&AN5#GaOSReq}L|c&#TC_ zXpWu1h`yNSTjwBXke)Mr6)JTl7Ta>V>Rs4`T!bDUo6|^*6|9|%>Y${rEGA8WYMcxC7p<_QwcT9$dcn5 zcFl9w#vclM+H{2>ur)eo0+WKNMsgFsK`(#fQ*q(Q>b`&HO=I&lkQrYv;{XJWf~96< zuiNLDVU0CugZlSGmWJ0aIOR7$VqtmdxX7f=iM*b$JQl=}ohsV<+)LYUa&MUt*oi@y zDv+ZN4F|%K>2G{zCihVuql5Q%Ma$;f0&k;!E**D~PQBt?WWb0J0&Wcrgu;xD*-3Z( zLRzRbNwr?8Iny;>!`p-G}c3Yd(O`T04eD*`=eYfiDQPLOiY0qF@WvPQG*wc{72E2XTj7RRpy}#fzI5|c_9Ji z1soy@%pXAtGpU2`xS19WIU85eO7D{T@pU1J3run(d=^)mq3x!`_Re2N2JQEMo2x*!b7fm!>ytzN*t}uI*j+-rF$29&x36OZM0%sMsxE2VaWlGyZ0;z~0n8 zK4C3|uwRX$y_M%4Vxsg{`T#^m&FGzf{=Dd#n+v=bOV;u0@mWdhe(t9<&uJ9$YsP{> zSDDB`0-q=Ah*C$}({EG=|K+Tc+lv(`Yn6R;PHD2`-)dt1NRrLa6XoD+#Z%Y0e889laO;u1n;@8=*e}QKYHmG z>_>Uj%*iCe^7f@-5lI?fi1YYt06j&EbXjep%%>U9M_}h9_dQc`jsPl}^$GDh`GCc&m(Mg!2cu=D-}Oz7}K| z&lHgG)g&|GBKgvwhiw6`dSsS`SYtN*dUq<6CI+<-vA_JGAc0kosf)KW4w`nV$@$19 zEE>m7ovj*{KvQ)b)2&|!8m7(+d>~7kKf2CGVaPuYX^H8T!=#`S>=epd&@+q*goEsn z{WhxC7{R55n-r9yd%-4}<<>B2+M%!WS1#8GvN9muy$#xDa?>!>=r8rTKdgOlQz+M{ zAuaL8Y?l2;5KZFK=}m&rfYA(fQDPrIby2i@M}-rp%@n#{-pkK64(0Bvyu}1+62U;K z!Fw^uZdi?W^+v8y=)oHm#f6?nYg9?-w;tX~mj2820n=@?o-}n9tQ{*Z6uK6l@93U= zl~+QG&?7;Q7*I#k{M?gr)c7yGKl94z>E4F5+lgQOt)Ed~x{{o~m3ZHox=sGyJ()Ta zqRPUKtmX6}cCQv$D3WhUbI_zuQrLaD3&ZP|FaOY;rZ9o}xey#uNZ+*v>=&q7(s zLcv^y1d>EaI9hKCYoat|Gr{Zn?>aJa@cSWM5Y~+PAe4?*jO?k?cLEe|Q)wC#)=jQ3>)e%2i-VLFL@-!vb3;?+Dxp=c2~I?tWQOuH!&Z=_1}RemND8YI3USCX z%-}-0T-SU2q~}KWP1cFKiuhvx>ZY{r){k$G4tLhnZ`BVQ6#lzDWUdXCKI{sL?TZYI zELbz&o@;lWQHlty+(rJhX}0M59diCj*6e;ue8k}9)v-}X;(f4~=1e}d?$s&rk?0l+ z7jlt(UY@4u%ZMWVVYPZLlYbS`p}{tPubV+)3jpl%Bgx?RIM?#q`W#qOGsC=!g2;B? z`BdlEoT;X$p|gh~w5s_`&QJ&J&$shY5u3UBTM?kr=3V#@+Mtrj-?JD%gzu zl?n~eJkmzB7`h@Ut>0X!lkiTTLycUV1-8e#Xh%C9&B+GMWc;YVB{1hyk9gCKT$kV{ zoygZc&KxM9av>E;Ajuec1G0LcRQ%QqrzRrhl?Ns0;!Eqs+xu41Ps3P0oH3~)pQrE* zHL1zu54ho|nL%bbBdJuY*NMV{A8uE#7H!IrAT|NtY&kXyL$6jEx~5Oqq~{xW69ARm zp#JLMUjYYIPqVdp`tlEBst}Sl4XDz^OkGnp_bfclA+DUYAn0lonoo$Jiuh+g%b%!- z2~0X2DN!mSLYYtpRS z%Rgo=BIIkQYL$J)2rF~LMtWcOO{s*I9GGT25X`GL{lfd)B5rY8MT->17nn!`0Iuhz zw@P0=qxw96xn7T^snkPWsC~iJ$NL`|@%atf#C+R)+KCx%&3;l*qE(sr#lEV;X7t{d zCZ;T(AN~!`8$Nu%u~$4eI%0^4uvzpAG@f}ADCx*S-azQsG%Yu$UHbj9 zagtWfp=}^WjY_y!(F{7wyBTeU#k!)VudUi-MLu}4G=G`n8IF5tZKSXu1OOH;~&K0ra883$_34tSvLE#X0Gq>ccLdd=ESpX z0tALl5sNlED_u)5l9pOs-3LFU*G@vM&M%})pQ=m`%mknR8mfKwJYlgF``S^t%!B_@ z0g^`Y18vikqsSpKcALyxAa&4ZT)Lg})~p(Ow23KjM*M^0lvKg?DumGGGh63=!`{Zl zVAiZ;LSnj4@5BSCz(EdQU4>O_-&3;ES)dn@BJ*HYJz?Qj*X+th)z0XD%bT8VyZCG6 z?=%^#3KoL?!2Z~clVZbpiAf?gRAoL_N4_)_8w{%?ntr3_kRl}b{gqbRUQpw+;OyWL z#lwYG+|dVpU;j-N>A=&}NvU@Bf^Q?}v7e80lKTFT)mW8xQ$2k{Sy5V0xh)79k_y?G z314>|MeMe^1%ZL9)gytMmDBkVaENDfXtC|?r~-WitQPqwQ9}#W;AslW^RMWt?E1lcF7M|1 z{R*+ti7r;$8Q)(kf6kjPOc4vkc`NUZD8(xG=Hd=d{a&7IbeK0FA3m56C0lCS8tR6`PY)G*cE?@z4$H7hQa?H8kb7avog6+)HQ3xR zXiS25zzni94zNWV@|HytPeKMOR+b7-^W?^5O`!2~gM@a}|7FJHcqiiT!T$y(y&9OC zD;m~M)4?#6!Q2!|FU0JqW%u=f-amIBt%@?+;y14QEm+H~lQl^S?@~e^QOh$e0#>Ys zx$$^Lp_dRpmZ#*#p(Y#5spx2y9xGgiHM76I`y5!;R6G%y!Uig#O5gdEOercOR=?c^ zyXD}9I;R@A;WwY6nkr3M{HKqzhpC+15-a(eIuBW|rXhoOMbR2D2KByRaQ14hzq8Xp zX>4ZgKl-To?`?fk`6_?pCGSy34{t^96(8an#s#P|3l>OP9s# zn6SvU?a{17Uv6dGG7|yg4NkoJWI9M_N>5Brmb|8g-dxCW%z2lV!tI?7)KzKM)SG{5 z&{d?irL$A!!5XMSB_YF1zDQ!JK4X#mOb_T>HwsOu`mnwB+D6OmEB@)`(hToXXvpzW z)H!kKU+jqwf#Cj$ZtyG1%&=9n#Wge2nLk&q>xa;OS`V)zX^>V7i1W!U%ono`*u4tk zgi{&kqlB}B}MbG28mzzm|s z@+18(WeZvkhx+I~?#6jlC3ERJa%|X(cE9={W#;tYZBu% zA-+oT$;s5ckotK|husL;2PN0RY2pej6|%q9@94aJ3{o5K@(6{ksR&BDtXP@rUQlTu ztO_(K1yd@#=j1;RNKrkz@;nWpt3gl4jeP@t)J#W?p21Zq^T@Os^}PFX%0SIJoLaD6W4e z;ttD4Nm?juzhbMj)N`$ikh0xLxW6n&Q*Xo;ywg{ej%;9yp0yVR+CwEGC;cDndxQzWJO9U|G9fsm!BX8C$YV@D-b$ynCZ5Am${-Reh6URuEe^N61G(`-$ z*+gvdcUArHP4L)OxJOiQIxL>_RkzW)_(D`UkYO^1jV@oChJWG9b6YW|Qt zavyQaHhS405hL{A*SN6^#m99Dpk0a(O(M-4To`F%D&X`j--O+)!(7Y2V|P;_!n|F! zto+w}-|4-%vRR{NMg3rvndATMi%Ph=TzmYQz-QB=#a@dPvF_F0d&*{sX@m%H())lp z(|XlRv1#8v-g=h#;{U}k*{yTMyyq`D`gGZciF=}>vd@c6`nCNV=gx-Ns~6q7s-^M4 z>tXrCP>};tbD|dCh`D5aE9aU<$cp)iqR(Ewf6}g%n^BfI#p##vf8e$m3-oh-Widz{PeZz313MVMaqXXw7V z=liYW?WSJ(xtChExV?O!cK72gl^5)P_h;r_x_EYbM(DRoeGCi*JqNOR&*|y)p8fAN zKWE>uv`+3bk=;paz6M|Q-hFESlDR=ldI>96tvD}oierzPzr_-!h}LI&E3edOgs=R+ zevjQr#)w}ldxNfNtnt;m<}E$(rCy`669dCK)`eR?_4Im4m)dY0ED2e;ai-X;wR;7IaBeqItm&!gDU-E-YUblRi(V2R2l%<}-`N>6RrOJLbOD=Inu+j&V8HZUt~1oW=S) z;oHU+mNI9PF5XC=z5VO@Q+4aS6tA1q&i_B3Z^mxm!lRqBzW@4I`Co6l%eTwF4qwwS z%)gbpL?N>?YP5L*jrV&esxZfn%%FR*s$> h16*V=s87lHX|FxCqHV=i2GCj<22WQ%mvv4FO#t}tm>d8A literal 11913 zcmeHt^CueF~iSYB2P6O9Dz&Ye4$65_&&ckZBc-??+|?H~8y|NK!l zz5stfm6ui)fj{1P2kw}U-H}Tdmd-Y-1G0sHEd`K-1O7~4Pz9_SfMPM=N(1DP;DH~& z4F>EfK)noTlmY5k@MjQEC;|kbK(!Rm#R0n}Ae9eHYJpfDkjMjM5kR*RP(%Wq3c#5L z7!m+xATX{00-3;~9!M8}H`zeH3UH?bo^*g62$V~}U&+9{4m^(rFOvXw8h8-{?3=-p za3EgHu0UW77EEgD61Cbo?=mS9i2=2ZI zDkXp*1U!ubZ)*TyD8T#(gmb|C_kcA8JoE>6A%HOv*fav$Mxa>^BY;{dux z$v`j*Jc|a80sw6sP$&j}d;rcZKq4R9dk^qKz};s;oh z0dpcC3I#F+0L35hrUR`Cz@7@UD}Z<&5X}X=835xWpo|6@<$x#*(8mLVYG7Ii1hT;E zEPxjbsA7R-13(V|#9@FU3g83-<659p0-i+ysRAI93z(CDP9+e^0e9a4x_E&40oXQy z_MWM?a8D?AQj}G?1O5m720PH>?%aWVkPsGBc1zn|JkQ-!iC%{Ndhr^)e>%L((0?y4 zfBWO?2a{NDJu6f;2CPtq=b;a}@1mf57`sdPUgD)an?YI_i|^Z?1?Im_`A6MLI<_W@ zdA2f6cegwN7Q)TY-6s)>~-?vsgW>j2)p?n>dt zDp>=8+I)wqZi7!gFRS0}4tAk{kNLx$@g)84ebx=SGwx&YHZ8={60W z*mE^gG%fvYtbz!JC}q8erRAr8CNT>^$kJIZH*Cu*txx-zi>9#7DUs7ggWH81pcB4tC-8X*<~afGh>T~fOZU}t zx+Hc{wiH=iG)Ej?Yjq=fZO}g3N`tv2jpEWJyDLX>H)mTn@v6c9< zWDH&3uYr*TiB{2Kyp+tUcp$_RZu%X=zTdl4?w(YZmD(_8dyC1P3pF zKtA{NiRCb}Eb$ccM|Gm>ND(#bRkYm<;UzKz>+Ds&s>^-r65hs_0rxzbYxXk2wT&0z z`&+w|u@boO+xm{PO0{-IAyRZAka=nJ3{AJME>@Sg?v06XxdMkgH9PHOgfK6J>8Ls+ z8l7p3iL9(z%`L;7GZ5r%Pt`)N8+y6eZYpt>E!hgj7H!YvzxHHS7qt+Am%7XCDTPHpCZsc;yku>qcjPW{Z&2sXb1*#AZn;Lpmx5DEEt0@Dm z>q5BL(W`v9kg1#V0(#*MjE6R`y_`MK(!X}A^KUo*CRgZU!K~Bb_PZ;B*XTirZZ!Q? z2)VI3?exRo>ScNAr%xIo)HbZ8s&1Li$frSB z__^ab$y@pgUxs9%@Tqof2Z{)KW2@n@u z?$(9h%KC#Zjdg)w!KLTmVHz)Ot0d~j_J(i12sQ4yMQ*-w?c>m`Y(WvZpFq)|;1-XC zk!qa<5GE{Wqj9Y8fQDz&>y@{>_ryPETn;MOd#>}?<;gN~JKmCi^0T^Aiyln3E#_8H ziiB&?(@cc+HJuBfy7PgdRvoI==pkpzE)_Jhr|G;mCVgtm{0AKaYfHXcuAQzEzBR-t zwTw?VpuQQ11E*gm*LzN^nH-!=ahSigzMtq($lTP`>S8sc-1PH(DS8O0U5oiw)oW7; zc<`>Fv(pUMN$P`m$tQMlnU0RjuWWB<3U!^tWb^46<>K;c_C8QW2w7li1Q^<*$`{t4 zh@5?GN0%i9`ab1XA~#VwUv}522jdZVw@NV7^ZlIp-<+>#rhCk8XL$E|FCKXNduT-_ zE5OiwE#*Cm%h*s5AXSh0K?ynELdCk#U4eJFmre-Cw-d}jW2p-19A;q|}+f|~skmn#9Lmv=& z{Yju%9hyWF1|hdg-Hf3gDZ=rm>L8bTl>k$3p5%n^vx9nDVH#$M&JY z(lCX_jE2pU9GB&bB!W-KqAW0{?;m5>ye}_1G9b*kv1oG(QFnPD8i7<4i`R|vi7ZUP6t|OmZ zNDgkr`8!BU|Gj4Tah4j1ERpL{^pc}-gTZ( zWr{5h9vl>jU1xQ;W1VI8J-8ut%V8txI^AhMExse17yGNeN6Mq~EMJxR5Gs5FXyl0c zqRUj;Rl2#1ii8kC5`*@V^mMKJLiCg!CR!@BI93qCZ*AnH>0@1^%KnC$iYjCB2oor8 z)UFw7iT6&+88eDZQ~O|7)#P4n3I9Xw$(>LhQL+lXX}fmh&dV)QY0pcJW!6%i#IHj_ zR6hS%7eCps*-V!9Rj~3Duf76x5x)BylDB+M?AoTe%21zG=!xCSOobPG%sjb~rEHFd z=krA=9tnu3HuA`IvLiLWDK{rOir|khQIZ4A#j>o_opu$bbO_VF*q_}C#qxMZU$U=< zZQ&zrqWN+L*8afRF4065W7m980}*`K#ZFdUIyh1FiC+}fU%leVeX_+e7FUaesJJre zc9~CIw(oh-lw0l~ko5;7(y@$;Y)(xLV*5UU^BU+_C zC9c0%*r3^lm(c@!q|wjU(8q{%=&;sxMhy!Pnp&eHO@12E-C&#L(60U4Yr^j^eR-K( z)g{F1sn$>ko$NAF7Ont1yba4=$r~Ph-wD(9rMXvAyKvgwWrK4PiVs74{g<7MJ92JrNp2@y@KaC-C;J#}8x?%$YXs$x|*Z48IM7qa^vS>C{Ue8Fe zMHB{+=w2GH;qKh!K5djnU!o)myYkFjKJtiG;U*8si9^EESDr9DEv+j++iX+qGPOb< zamXQs`h9W}j5vOR+ZKPLlQl$cz zN^MI0umTjhSafm|O}mIt0#0H=rV#_5!knf%N{INV#>bT6I^A>No9FzLE(F{D1q}#5 ze?jrp+9uu#VUEC@^GTMqPTfW)jhn2kxh);LhdnYMjYD5hgidLNKx{rC1)@!9jJ7x{ z_RS;psWk7(-SAV$gdSFq?*Ga%tF|E;-N4M1Jiweqx~K>%JybYQTTr%hzV(Mx1Immd0+QLly5EzaGa_oaQK`lEm3!YuI_#CC_7Z%YW4O5|le&USQiN za7lRyL6YM*`p#ops zO5T}UV;t3PSy&uSc_TTAX^@ZS1CFi z0r-jfNdJ$D5i}3*L=EAje_2sfeb=TYrP4D!d=>ZB^Rv$aKAvP=IG{FUx>ve0NAOxh z=xI-s*MO8t^eShF721F^(k6Y`$+4AUfpH7;4ZmCLMb5unfxi^(V}B)oUvcIN(Yi%1 zHju`)MpBKkt9O2x)ulB52d75l%nGed7t&y)RF30ecnh9QdKGTD_7Vd&+~dB^5tF!; zIMKVec3QLRNwNyXqWhY8>_Jf3z@(8@bPY(<)}47mEN@Tf4Gdx77U0P%)1PJaUe{Hy zOD8qUw9RR7TTImMLkvq^kEx8tquw#Chcwu}gK&#r;h*0fS!tsdt}ucivH?axtwDndnAF7c%K4aEHAKE)dBK~@TC$GqIh-Q) zok(UD7e1oHc%J@!@KZLZ&+&4O(Cvc0Iilf_&$y@DPkUsNkbb=CiXt?g5X67u$s1UB z*7Yk?SA%53S;K)$3lV&RaTpR>Z%Cpob>Hea6+^A!JmH`}{0-V)en@%5n%y>n2`|02 z5%IqPJnRWNI)ME|nb2J|sV?c;rE`G#_&QHARNhsP&Oxdl!9f_wf!SV5dan3X>yNQl zl|ujMKU-f|6K^U+`=pHCdCU0cCJE$#=S7e8t#(&J{BX==lp!=B#(JfSu8hscSo}LJ zJpW<-r!3@g^t5pOf$kMf$770YRNcZCGu6lh@>%p;J@fjrX7p}{TH)`o2a5}aGP;F% zO<2gZ_^j=jT>s)2alK@3dq1z?+_GBnB}y}lEFRCP%aR}8-wYYf4|+Zv!Gixxy_a|v zF6fLsheMWCWYWzR8FjDN_0CS3N{Pqp+&QCsFEZlCO1xf@Hqt z(J#^zsuA4Lw5ZLvFwbP8?iOb~x#&y&HVbc~yI(IIIWX&DQmFVp_&qY09cW7A7C6Co zwat=hqqFeV%20w7Ype>{aVXIJnP6F9 z!>4)Qw@huZH)D&fQ7u!3vpmlvI5O+)n-sj+p)b*WO{Ist3lJdQ6)q$8AJSZycJtEK zWq-YGv1;q=n3`q9aJY=pf1wY}8E&*%#4kZyy>-gJ2QW=)Cqq&@G$n>{Dc+yf1)#qg z(2SQu+iU3_Ihe0AozO%A`<>BX_nyZ6t@;KlM1|?wC*~Xm;|ZOANmH^1e^ejkI-_yI zlJQw}OzdKvXOCtiEfUWDXe(sYURRhdzLb1Bp~274*=u3NV@1U-<@I1`f{^Ke))u4W z(j~qn`DTk^kh%JKDz(->NJ?GfIteXpixo~vqxnW%^1BFBjVDb0Ju};sSgAgp%Et+I ztYSK0_Fw*Yw-KLuLa#iWea1piP5)AVOdM6Zl{pX0;oE+j!b(l}k6<1t2ZdF7hFZUAn^!KaAS0)8 z4;wf83bs@gYrDmM@L8{vWNeaGUy(JSfX53}(y+|rf?21zHhmwCHy-3P)~WVfyOh~oD_10(Wn2Ci=zY{3ja{QwL)JxYAGK#zEw=yIz5L?64Gv|} z^>O%BMX){iGcht-fn)zaa$LU#iH{9ZRf-9}%bW?qM96BOM9}%`Z53c!{D11viX&n&Z^++) z>@qUzTT|x`AO51|F3#$1jnya7r*$ZYT;qv7_}H3p?hWBKM8iM*E^F+%~ z5;+M*{{bLL{ooOnSWEq|rRQr_IlQgr%|o90xjuPXL}B-fc_jGx_&D-thq_YcCfUYL zyz+JJXf|>hdO(HehYu5{sMIw$%qI*_DonX>@Vwi;gX}sGprRD7r6X+)raSVJ3)p>j zGQRE3nve@uBU;`+B}E@Rik#vfS4(MQ8)ooc>64*8D!FYYKE&V%hPdUmLIiF$(XB|5 z*O-PG$Q<&0Un846dSaYBW&R2Dls493T9nZ^wF(U}gQp0rAG{AfTruz%`TiR4`Citr{Bd(Q+#U^qf&k4K`#)~A1l@!`rHUsyzvR?!lK zZnAQoo2C*>{B6v~OH4^Pc>_?^r1MX_ByE+h1VT=qC>C>N+{n^TxXKV09({Iiy276B zx7N$5bX4Rl3s%Mzhf5rw74c(J{@utt8t)bw_M)67w3T8NVjPNAd$aGZj-1hR-ik-L zc&00x0XT?wzW?{f@Aj3>~K@*%AfH!58q<&EaJe()jU7CE;NBU0o8M=6i*1- zbNL}TO3L+kMc3!I2XSezP_@x!P+krsf}m8rqHlc`zNJZ-!>OQ$ZwA*v`tLaVA}>9_ znZxQL@oJh`LqNYPvJ&ti>YmBBvYOjPABYV{S$U88vDb~=4{nNyjG&-ceqE0ap_ zSdc?~?_ZP zfpmzXv>YXG8ocpXu9Dn*0J)kyQP;zv_?0UeUY3)bA==vqDcljqKn~;ZNdrAc$c655 zJMq*Er}Y4s6$Dc0svumF6gwURP4!Q?<4acZy)bI#?VyN{eUPF}afaKS=Q1$Wj6m<^ z$piHb-jqv7C}G9EnOmEmMMSqEV*k?=L3duBw}~~GzTA-nH4ygNFSwP+92%7)(S^2J zd{cUn{oRW_zjO|UR>Fgayv})lrOHZ|C|tNV);A~j3FQEjOs6{9Pu(%4>?@>bM;ZxQ zEGIcGQ!H>#@dVHAhO(zja`D} z!W>?DjTq-!f%`Mj(5<7A@bxVVli+1&ys{&80>U|j=u!yE^qot}b>+nTYZoEKo6<;e z19L87!zpu>PJXIvD|eQ7JU*X_!-H_fWD%z&y+vO-mMr1MlR`uaeBMSM?=W?QbC8vD zteuAx@5tY(`uUB&eJ5Uy(h2P)9T!wVef4=`UWw4HokLfxW!0q7$8}?{R1W7{4@&6- z%9i>S`NG4s6sNKuBS#HUc1D2^*|}%1{qG>hE$WKK!MLRQ(O|3SV%X7?UZCCROa)IXOX2YaPc<_;?8PhIGNepT%`=xUdD-Zs#_f%wMGgzU%im{-- zy__nv(vLdIb}E3PVL$CmdEWV4Ns5#8Fh@^Oh6<^gl~)BMNo%=ACt_FiL5&|!`+Z4f z3Q1C$-1N@;_2j3Lkzz)Qak9vX_3g=>)XHC8GuS5^$*a&I&xc((&h%Rgib~?L5xMZ46Xl_%)6k8$*oE|%Yao-vRR4p`%Z2s`L6v-@5l~;; z*sqv&P^8!L&dCWax-RccimGx|^HG$f!Q;{GG)~CT*J)(QDpvN02GXJ7!%lV3hhOsk z(U9vmJ(W*l3KK*D>5FtC6Zx#fQD8I#Q$sxTA+RNpz}umuet0&-T_}Gkoke zGtE1^5;}1ewH2Rg>K0va3ZT!^khNxqAoFoN7FNQbfED zK6+=k;59pAHjc_8OrxFMIb;v9W3>NM`$>8oqbIJL5y(&AIjZU z^;OYx2Bnc=Gi3E;IyW`X^kn0)5Ie{SS#{0#dAcLU4N-tO`G&tGagY^3M~M7>Unz2Q zCMWk~&3l_6u|2b_?ft@~x?Q4;a?a}Zc=E?Im2t$|0eUeF)}w@b^&3B770>_ zioZ{F{lNRr^m?L5ahhh=6|Ii+>#O6VGzZ0dIg7<%CQs~5#te}$uQBvzL7wB)^&WKA zOY!v+^b8wU`MLosypeKEcnG$TX1=nVS|C z9SeRA|E$yDlH%!gobmR*;^%ng%`lr!8he`VNN1FsxpmcIj4WTZ$&_W!$X54RJADoT zrn!+|Ze^jFQZnigt@aBc`MNxsB|K7?=4QyhG>|v!4DVCD?>l$Tlh~PMybB!e@n<7c z7hLSJY>h3>771J&8)0iyW6=V~Vg^lX;)SO_!zvfA;;T3a+jBCbE)2-O1s2CJwzk>B zm+~J^oZ}sfI&Crse<3psy*a1#edDS9B=m^FNJHS%p0!3fI6-fjgI$XBa(_6fSWoz& z?&Y>aG&gQuZ7cYuHs~veH`|-T?Mtf43GcY#W8z;_Ne|=p-Su=Rpx>iOD=hlR8|CDn zHL2J9`!v1Uaz?sTFJ z)!k)8EG6ErEN{N9d^FWe=wgC{K=qE})X!3z+(HV zM0l2mS*Tn&pRNo(=fRq>9RqivSAY4mm38R3^KB22EBfeo4iA&YZVlh>w^jP#RJ6fu z1z%|&bVgnx!JqjUCcau|!PH}V@&16Aj@;UeNNPAOobiBiOxA3W6KTgY}A?Jvw@w*37?re|nZqXK3-!q^cfLl{f@5WZS{?KpI~iM5vlq?!f4su*!z0Up-0ADIt<6(!?Mr9rbtGwnq~5p*wK{IQm7*88;C~c8bXIC ztqFYD=i2fDc2$ad?qmKkL!0AI!{=zP|Qxf}2SEn=h?R-OpG_?&hk> zPEpRMFPmRU(U?0P&mrHb)A68B zr*O&By?!zDQ|Wll8=h53A?g6_4cS;>oRflrR%cj5rM{CtwaJH_+H=G{)${Pvtj!?) zygJP9(Wa<|vYlCe~ZfgPflS&sP7;n`X3Y1s&& z_ZxFLe5d2mpT8wdX4K#6=J4|(b_%WYl{t!Jj-oxOEiERz(X+-i+e|bqT~0uRPEGpK z8jsu&tW3V@M^5hUqP}aq`$+O;bZ1uq^dfv4?iAUug40hUINn^PZ#kXaoesD$m|6Sw z*t}iR;gv6N#BqtTl*pT{)T+OLka)@DT*BB(ibw{5D*KoQnmrZ5pQJ**7$n1pbNczr zFd+>(pwP*w1J5y&Ge1TEuIarOH;?&{J=YH2ArF400ZFzbwZ3o?WhpT3bbf)NjnA(U;ziQ&2gCb(2rr zMr{<2B!0fFckyxBdDTa}{M(`!|9qKS?i$M(Zrh}Nz_-rV5qdK|fHvw?A{IDOKXsNe z!7mvY)j3x%cOkO_7d?8-bLoCr+SWt9jIkxlvFt-@*3MmHCM^QN8<$ko-Ns@#NU;K6 zScZXn%6#3*le88n!QrJYG_n-WE#3B*KOTNizstJHl2MyQTq^sUNF`^$-XTH{b?#7U zO^b%(#AW9AtBf1j-CnVwvF zIO(%)ncNgB&Ec!_t7;%GY3yg`4pZiTxEOb5V7r&OJ(N{$!|F(wc|^p?p@i_#hMiF> z-;dT;S$@yX*4s%oKF<##8XYlA`1sfU{hg6K&$?cRCp?&n9g`i>c=-x2F*0>`v7C|NBPozu||9`SDa(|69Kx%@P6>ZU$-UsU{Kc5Hd=L L$O>1y(f9j*6Y%pj diff --git a/docs/images/eva-stripes.png b/docs/images/eva-stripes.png index 274c1c7c2827c0a592bdea8776392a2872d62af3..ff7528b23b92f67eab447b169fd7377377e8aafb 100644 GIT binary patch literal 5275 zcmeHLi(69J7DpS$u#p~)V=scV$tg?J9;PBpqfOWOLd}{?OGyjK6fgl1O>w3is~N}2 zX)IsV?RK>kQAx3fGHH&zL~kjisOY4SBH~0qc;uW5)=YhO?)?Yu=Q-d0_S$=`-&$+0 zwf5fUJEuZ}x7*t-wS&Q6_CbFO41>X}wwN1Y?uX_XP8y9-~^6py1vmK<$&aM+=HmQ3Tx8>9_ zr>GlC;v#cxj|;+vWECMGB1gZB+oZa(^2U}_{i|B#qg+PmopwkaUh?bVb(GVblW7DJ z>6d@4U*7lNvv%jmbxXcVCDQNQYb$j56o*OqBp|<8+DzRL6PQiQZajM$8-2Iv0Y#he zp??N_onv&T`;oSgED!n*p3@Qb?8czE81BG09j_# zw^O*!`egcaN&URCP+)F>F=XA%*uq!B--gUVjwS64iX#wUYt8grn6=d+*c>xuWqx5f z@vskOXc%m@xrf2#ocj)DYwj1BTj(Y0Ke$#~F3tIqMpy*5?g|JBGWWX@7~MUOW5IikVbKjIYf&kxu;T+ED#tr)z?f?NQ@YCDkYIh_f7S8o{{i1 z)0SzBGkQjZk>KyVmk^2$pe2*go83L!J)CiNXfzs|d??O0Ebz;>aP!FD`7nc# z(D+nRVnkp(#T-?$Yd9}&Pwb4Dzi{3;`Ud$mos=9vqnLpV+`F{A1;5YyPvDHnz3)tV zdwRWZ@;%ObNXrs@Lz3go)?zF%#F^dv%iLReEXESx_X7SVn=`ZKbmHu=m_JJcXP17n z+a3m6KnM!lx;xcMEh8ka3R-A0`14}P;_&C#>f_oZ@4a3w3s;=IMtyw2=Eb$~p04o1 zJ*M=uj8fgl*Ph%twyz9tGNLqeT$#ZjG&W&4>1>m}wrM~))rXTxMFt^J(kj(mV9I43 z!1Rc&OxMSiu2+EuqMo9H*)s&sU=$%(G zRC%V5+&(&@v*SBb1y@RBnIf^+ZrF@_|Y2tx+M8hj>C5j zTy2BnAr%Pk>`~yN3b-UdxsBYU<(hOm0b*QSM-zmf&~e!%j(w3lwLQON6btkp_vh4T?=S%zPf0Kd)?)oe8o$8iWt6H>XC50o z`!@_hStiT|0g&@VFahPInG83?USn+mtvaAKg3$gIwV90C(4py*e5FEEn}(SgnNHqi zMx+dQgk-4dQQ?OHD`2atJrzOkpetUci4m<1rGow_6}%q*Ar}|*C<7#lpp3LVC>!zG z!F5YZSr9ZP<Eco?=R(&nPM=rrscdSipR(?<1GcX* zlt5SXqQ{RF8Iv_4{16CLOF6j-wtdl7SnvgluTad-DpT0qs^hT-3CrbEX{y8 zT`JL+Ox(8q@I8s`L&_7KzJ!y!2HDn)Qb{&)cYc~{L-4cqgL)JB8)$ z)CI4H*F@Y^qxPxvY?@OMbel z`#>w;o9To2vuZ{U6!r_QuFfwK%(eHkh z3U~=`K>nox81R#Z)i#E{0>FXv9hb!je zpId%AoDp<3;+|EPZ9X$J~$ z(eI-UQ-uOi=Tm&)Kv9LZTu0;bzo#45)Z_V^n75_}j2shKP}pPfnr+^$6U4cdD#4KB z0l?s^e0f4_9D%UM26jlF>MtuQUx@Afo~(9a~a zIXw|t^h5FQoMF&uNlzNFIJ@P}#nPiQc`U1&L*?I#`??PQ0LxNm6^e3;Jc>S*`u1}a zo&^TFqS=>liRZ^EZ`Uevy$#$mMcPeWDv&)Lr9?E6W#HmfA=4-UbT!VhD_x=?|B3_~ zR(98e8lVXo>aYW_-?&l;_PYs?k*c$P_w^xHw9wU3Igi)I77t?wY(OA|C+TK%QCPkTq zmnTgc?Q>tpx3#RO!yjrG4qx4_v0Z&!Y;2qhG?;Ed!1IuDwQ_&12GAqCm$14o7NRiY z*mUbj{2n%44X@`kPTlXJ__k)=!PYA`gUf!i<>vY8p=Z!UB-cm8p-oi+5P4d2Yzvt0 zgz5%WAU+S+5@>qdi7kLxS?5|?R~=oO=KML@89nIy!|4U+&d<<;OUwT5YW;z;wY7V5 z`P^Bd*$~WD!R$tuJqu=!!`T~Q_EP*m{VtfKaq=^-sFVJ|xBQ0++7=vG^~Hg2{|yZ7 Bfc5|Y literal 3578 zcmeAS@N?(olHy`uVBq!ia0y~yU~B{85Dqq=h(g)ygFx|vo-U3d6^w7MZuDgi6mbch zHcO*%g4BwmYj^NhMXRW!FR5RmEvsI(#n>wU>CSs`^N-i=ulvQvWwY*e%Iov%pa1>* zvDWC&W`9nZI`e;7zyDj5?)`lJ@AJPE_VcwG85o&ZyqMY+mgvTE3Md!|81OKs2(oYp zXmCojUD{fr>d&^W0fQ6ttnoQZ{l zhlS@bLnOztE R{{j1S44$rjF6*2UngGsGuA=|| diff --git a/docs/index.md b/docs/index.md index 20e637cd..cee477de 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,8 +5,11 @@ hide:
- +
+ + +

diff --git a/docs/reference/core/callbacks.md b/docs/reference/core/callbacks.md index 0e50d155..910f22ab 100644 --- a/docs/reference/core/callbacks.md +++ b/docs/reference/core/callbacks.md @@ -1,4 +1,4 @@ # Callbacks ## Writers -::: eva.core.callbacks.writers.EmbeddingsWriter \ No newline at end of file +::: eva.core.callbacks.writers.ClassificationEmbeddingsWriter \ No newline at end of file diff --git a/docs/reference/vision/data/datasets.md b/docs/reference/vision/data/datasets.md index 426df296..32a42f10 100644 --- a/docs/reference/vision/data/datasets.md +++ b/docs/reference/vision/data/datasets.md @@ -6,7 +6,6 @@ ## Classification datasets ::: eva.vision.data.datasets.BACH ::: eva.vision.data.datasets.PatchCamelyon -::: eva.vision.data.datasets.TotalSegmentatorClassification ## Segmentation datasets ::: eva.vision.data.datasets.ImageSegmentation diff --git a/docs/user-guide/advanced/model_wrappers.md b/docs/user-guide/advanced/model_wrappers.md index e3ae2dd9..957dad0a 100644 --- a/docs/user-guide/advanced/model_wrappers.md +++ b/docs/user-guide/advanced/model_wrappers.md @@ -1,7 +1,7 @@ # Model Wrappers -This document shows how to use *eva*'s [Model Wrapper API](../../../reference/core/models/networks/#wrappers) (`eva.models.networks.wrappers`) to load different model formats from a series of sources such as PyTorch Hub, HuggingFace Model Hub and ONNX. +This document shows how to use *eva*'s [Model Wrapper API](../../reference/core/models/networks.md#wrappers) (`eva.models.networks.wrappers`) to load different model formats from a series of sources such as PyTorch Hub, HuggingFace Model Hub and ONNX. ## Loading PyTorch models The *eva* framework is built on top of PyTorch Lightning and thus naturally supports loading PyTorch models. diff --git a/docs/user-guide/advanced/replicate_evaluations.md b/docs/user-guide/advanced/replicate_evaluations.md index 61c26f07..d3770586 100644 --- a/docs/user-guide/advanced/replicate_evaluations.md +++ b/docs/user-guide/advanced/replicate_evaluations.md @@ -4,7 +4,7 @@ To produce the evaluation results presented [here](../../index.md#evaluation-res Make sure to replace `` in the commands below with `bach`, `crc`, `mhist` or `patch_camelyon`. -*Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` in their respective config-files. In the case of [MHIST](../../datasets/mhist.md) you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* +Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` (either modify the config-files or set the environment variable `DOWNLOAD=true`). In the case of MHIST you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* ## DINO ViT-S16 (random weights) diff --git a/docs/user-guide/getting-started/how_to_use.md b/docs/user-guide/getting-started/how_to_use.md index 496d8785..21380fd7 100644 --- a/docs/user-guide/getting-started/how_to_use.md +++ b/docs/user-guide/getting-started/how_to_use.md @@ -34,7 +34,7 @@ The setup for an *eva* run is provided in a `.yaml` config file which is defined A config file specifies the setup for the *trainer* (including callback for the model backbone), the *model* (setup of the trainable decoder) and *data* module. -The config files for the datasets and models that *eva* supports out of the box, you can find on [GitHub](https://github.com/kaiko-ai/eva/tree/main/configs). We recommend that you inspect some of them to get a better understanding of their structure and content. +The config files for the datasets and models that *eva* supports out of the box, you can find on [GitHub](https://github.com/kaiko-ai/eva/tree/0.0.2). We recommend that you inspect some of them to get a better understanding of their structure and content. ### Environment variables diff --git a/docs/user-guide/tutorials/offline_vs_online.md b/docs/user-guide/tutorials/offline_vs_online.md index 8f4f25d3..059b0f83 100644 --- a/docs/user-guide/tutorials/offline_vs_online.md +++ b/docs/user-guide/tutorials/offline_vs_online.md @@ -3,11 +3,11 @@ In this tutorial we run *eva* with the three subcommands `predict`, `fit` and `predict_fit`, and take a look at the difference between *offline* and *online* workflows. ### Before you start -If you haven't downloaded the config files yet, please download them from [GitHub](https://github.com/kaiko-ai/eva/tree/main/configs). +If you haven't downloaded the config files yet, please download them from [GitHub](https://github.com/kaiko-ai/eva/tree/0.0.2). For this tutorial we use the [BACH](../../datasets/bach.md) classification task which is available on [Zenodo](https://zenodo.org/records/3632035) and is distributed under [*Attribution-NonCommercial-ShareAlike 4.0 International*](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) license. -To let *eva* automatically handle the dataset download, you can open `configs/vision/dino_vit/offline/bach.yaml` and set `download: true`. Before doing so, please make sure that your use case is compliant with the dataset license. +To let *eva* automatically handle the dataset download, set `download: true` in `configs/vision/dino_vit/offline/bach.yaml` (you may also enable automatic download by setting the environment variable DOWNLOAD=true). Before doing so, please make sure that your use case is compliant with the dataset license. ## *Offline* evaluations diff --git a/noxfile.py b/noxfile.py index 2a60bada..916a7779 100644 --- a/noxfile.py +++ b/noxfile.py @@ -24,9 +24,6 @@ import nox -PACKAGE = "eva" -"""The name of the library.""" - PYTHON_VERSIONS = ["3.10"] """The python versions to test on.""" diff --git a/pdm.lock b/pdm.lock index 5b0fb979..8555ed7c 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev", "docs", "all", "typecheck", "lint", "vision", "test"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:f1e852d1f3aa89e7061fc859e3446b3abfc3bbd019323fa885dc81a6b5cd2659" +content_hash = "sha256:d3ac381de08b8a3051ba9c70002f748ee6c600fe9b20da760cb71fc7686d8cda" [[package]] name = "absl-py" @@ -168,7 +168,7 @@ files = [ [[package]] name = "black" -version = "24.3.0" +version = "24.4.2" requires_python = ">=3.8" summary = "The uncompromising code formatter." groups = ["dev", "lint"] @@ -182,20 +182,20 @@ dependencies = [ "typing-extensions>=4.0.1; python_version < \"3.11\"", ] files = [ - {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, - {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, - {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, - {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, - {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, - {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, - {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, - {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, - {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, - {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, - {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, - {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, - {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, - {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, + {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, + {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, + {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, + {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, + {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, + {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, + {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, + {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, + {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, + {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, + {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, + {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, + {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, + {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, ] [[package]] @@ -597,7 +597,7 @@ files = [ [[package]] name = "h5py" -version = "3.10.0" +version = "3.11.0" requires_python = ">=3.8" summary = "Read and write HDF5 files from Python" groups = ["all", "vision"] @@ -605,26 +605,24 @@ dependencies = [ "numpy>=1.17.3", ] files = [ - {file = "h5py-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f"}, - {file = "h5py-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c"}, - {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03"}, - {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d"}, - {file = "h5py-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f"}, - {file = "h5py-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc"}, - {file = "h5py-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd"}, - {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7"}, - {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52"}, - {file = "h5py-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684"}, - {file = "h5py-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3"}, - {file = "h5py-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20"}, - {file = "h5py-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039"}, - {file = "h5py-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339"}, - {file = "h5py-3.10.0.tar.gz", hash = "sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049"}, + {file = "h5py-3.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1625fd24ad6cfc9c1ccd44a66dac2396e7ee74940776792772819fc69f3a3731"}, + {file = "h5py-3.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c072655ad1d5fe9ef462445d3e77a8166cbfa5e599045f8aa3c19b75315f10e5"}, + {file = "h5py-3.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77b19a40788e3e362b54af4dcf9e6fde59ca016db2c61360aa30b47c7b7cef00"}, + {file = "h5py-3.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:ef4e2f338fc763f50a8113890f455e1a70acd42a4d083370ceb80c463d803972"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd732a08187a9e2a6ecf9e8af713f1d68256ee0f7c8b652a32795670fb481ba"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75bd7b3d93fbeee40860fd70cdc88df4464e06b70a5ad9ce1446f5f32eb84007"}, + {file = "h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c416f8eb0daae39dabe71415cb531f95dce2d81e1f61a74537a50c63b28ab3"}, + {file = "h5py-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:083e0329ae534a264940d6513f47f5ada617da536d8dccbafc3026aefc33c90e"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a76cae64080210389a571c7d13c94a1a6cf8cb75153044fd1f822a962c97aeab"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3736fe21da2b7d8a13fe8fe415f1272d2a1ccdeff4849c1421d2fb30fd533bc"}, + {file = "h5py-3.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6ae84a14103e8dc19266ef4c3e5d7c00b68f21d07f2966f0ca7bdb6c2761fb"}, + {file = "h5py-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:21dbdc5343f53b2e25404673c4f00a3335aef25521bd5fa8c707ec3833934892"}, + {file = "h5py-3.11.0.tar.gz", hash = "sha256:7b7e8f78072a2edec87c9836f25f34203fd492a4475709a18b417a33cfb21fa9"}, ] [[package]] name = "huggingface-hub" -version = "0.21.4" +version = "0.23.2" requires_python = ">=3.8.0" summary = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" groups = ["all", "default", "vision"] @@ -638,8 +636,8 @@ dependencies = [ "typing-extensions>=3.7.4.3", ] files = [ - {file = "huggingface_hub-0.21.4-py3-none-any.whl", hash = "sha256:df37c2c37fc6c82163cdd8a67ede261687d80d1e262526d6c0ce73b6b3630a7b"}, - {file = "huggingface_hub-0.21.4.tar.gz", hash = "sha256:e1f4968c93726565a80edf6dc309763c7b546d0cfe79aa221206034d50155531"}, + {file = "huggingface_hub-0.23.2-py3-none-any.whl", hash = "sha256:48727a16e704d409c4bb5913613308499664f22a99743435dc3a13b23c485827"}, + {file = "huggingface_hub-0.23.2.tar.gz", hash = "sha256:f6829b62d5fdecb452a76fdbec620cba4c1573655a8d710c1df71735fd9edbd2"}, ] [[package]] @@ -703,6 +701,20 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "intel-openmp" +version = "2021.4.0" +summary = "Intel® OpenMP* Runtime Library" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +files = [ + {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, +] + [[package]] name = "isort" version = "5.13.2" @@ -730,7 +742,7 @@ files = [ [[package]] name = "jsonargparse" -version = "4.27.6" +version = "4.28.0" requires_python = ">=3.7" summary = "Implement minimal boilerplate CLIs derived from type hints and parse from command line, config files and environment variables." groups = ["default"] @@ -738,47 +750,47 @@ dependencies = [ "PyYAML>=3.13", ] files = [ - {file = "jsonargparse-4.27.6-py3-none-any.whl", hash = "sha256:f429b4a1b1fe92ef2e3e531615f53e81720a424f3f3181eca7a28c994515fc15"}, - {file = "jsonargparse-4.27.6.tar.gz", hash = "sha256:ebd2e0a4faef85a075bb6ef79c6b2f03f57a5f8e3db26c911b55518a1bca68ad"}, + {file = "jsonargparse-4.28.0-py3-none-any.whl", hash = "sha256:9dcda241349547e8035c630d51de73b8b4ba67bdc2b014d7f76734d404e82518"}, + {file = "jsonargparse-4.28.0.tar.gz", hash = "sha256:ac835a290ef18cc2a5309e6bfa8ada9c5d63f46ff18701583fc8f3e95314679c"}, ] [[package]] name = "jsonargparse" -version = "4.27.6" +version = "4.28.0" extras = ["omegaconf"] requires_python = ">=3.7" summary = "Implement minimal boilerplate CLIs derived from type hints and parse from command line, config files and environment variables." groups = ["default"] dependencies = [ - "jsonargparse==4.27.6", + "jsonargparse==4.28", "omegaconf>=2.1.1", ] files = [ - {file = "jsonargparse-4.27.6-py3-none-any.whl", hash = "sha256:f429b4a1b1fe92ef2e3e531615f53e81720a424f3f3181eca7a28c994515fc15"}, - {file = "jsonargparse-4.27.6.tar.gz", hash = "sha256:ebd2e0a4faef85a075bb6ef79c6b2f03f57a5f8e3db26c911b55518a1bca68ad"}, + {file = "jsonargparse-4.28.0-py3-none-any.whl", hash = "sha256:9dcda241349547e8035c630d51de73b8b4ba67bdc2b014d7f76734d404e82518"}, + {file = "jsonargparse-4.28.0.tar.gz", hash = "sha256:ac835a290ef18cc2a5309e6bfa8ada9c5d63f46ff18701583fc8f3e95314679c"}, ] [[package]] name = "lightning" -version = "2.2.4" +version = "2.3.0.dev20240609" requires_python = ">=3.8" summary = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." groups = ["default"] dependencies = [ "PyYAML<8.0,>=5.4", - "fsspec[http]<2025.0,>=2022.5.0", + "fsspec[http]<2026.0,>=2022.5.0", "lightning-utilities<2.0,>=0.8.0", "numpy<3.0,>=1.17.2", "packaging<25.0,>=20.0", "pytorch-lightning", - "torch<4.0,>=1.13.0", + "torch<4.0,>=2.0.0", "torchmetrics<3.0,>=0.7.0", "tqdm<6.0,>=4.57.0", "typing-extensions<6.0,>=4.4.0", ] files = [ - {file = "lightning-2.2.4-py3-none-any.whl", hash = "sha256:b44cb8692253f2719b2f84237e94ff84451fe219922c7f04447b52524471379e"}, - {file = "lightning-2.2.4.tar.gz", hash = "sha256:4cc3fb3edf04fcd63c0ecf75087d2fa06163759fc8c1fc500b16404ac1854f77"}, + {file = "lightning-2.3.0.dev20240609-py3-none-any.whl", hash = "sha256:15fb839cba66463239870bfba13b33b081b4b99930e79b0914cac20d4e9d6d42"}, + {file = "lightning-2.3.0.dev20240609.tar.gz", hash = "sha256:e3a3bd45058eede98202b52a13f339bf730ae3368b935eccad7e19a604ac5572"}, ] [[package]] @@ -825,7 +837,7 @@ files = [ [[package]] name = "markdown-exec" -version = "1.8.0" +version = "1.8.3" requires_python = ">=3.8" summary = "Utilities to execute code blocks in Markdown files." groups = ["dev", "docs"] @@ -833,8 +845,8 @@ dependencies = [ "pymdown-extensions>=9", ] files = [ - {file = "markdown_exec-1.8.0-py3-none-any.whl", hash = "sha256:e80cb766eff8d0bcd1cdd133dba58223b42edbd1b7b9672481c2189572401bff"}, - {file = "markdown_exec-1.8.0.tar.gz", hash = "sha256:0a932312f0ca89b82150e1638e84febb90eadd410dfd2417f05759c06deed727"}, + {file = "markdown_exec-1.8.3-py3-none-any.whl", hash = "sha256:77ebbaa4a20abb167fad0fa8a0037567121b9cf262349dbed84900ce96058af5"}, + {file = "markdown_exec-1.8.3.tar.gz", hash = "sha256:5e16a70f9f2c97738f128a88db2951fe3d8bb2bdc2b4809fae7fca0123ef3ae4"}, ] [[package]] @@ -915,7 +927,7 @@ files = [ [[package]] name = "mike" -version = "2.0.0" +version = "2.1.1" summary = "Manage multiple versions of your MkDocs-powered documentation" groups = ["dev", "docs"] dependencies = [ @@ -924,18 +936,19 @@ dependencies = [ "jinja2>=2.7", "mkdocs>=1.0", "pyparsing>=3.0", + "pyyaml-env-tag", "pyyaml>=5.1", "verspec", ] files = [ - {file = "mike-2.0.0-py3-none-any.whl", hash = "sha256:87f496a65900f93ba92d72940242b65c86f3f2f82871bc60ebdcffc91fad1d9e"}, - {file = "mike-2.0.0.tar.gz", hash = "sha256:566f1cab1a58cc50b106fb79ea2f1f56e7bfc8b25a051e95e6eaee9fba0922de"}, + {file = "mike-2.1.1-py3-none-any.whl", hash = "sha256:0b1d01a397a423284593eeb1b5f3194e37169488f929b860c9bfe95c0d5efb79"}, + {file = "mike-2.1.1.tar.gz", hash = "sha256:f39ed39f3737da83ad0adc33e9f885092ed27f8c9e7ff0523add0480352a2c22"}, ] [[package]] name = "mkdocs" -version = "1.5.3" -requires_python = ">=3.7" +version = "1.6.0" +requires_python = ">=3.8" summary = "Project documentation with Markdown." groups = ["dev", "docs"] dependencies = [ @@ -943,19 +956,19 @@ dependencies = [ "colorama>=0.4; platform_system == \"Windows\"", "ghp-import>=1.0", "jinja2>=2.11.1", - "markdown>=3.2.1", + "markdown>=3.3.6", "markupsafe>=2.0.1", "mergedeep>=1.3.4", + "mkdocs-get-deps>=0.2.0", "packaging>=20.5", "pathspec>=0.11.1", - "platformdirs>=2.2.0", "pyyaml-env-tag>=0.1", "pyyaml>=5.1", "watchdog>=2.0", ] files = [ - {file = "mkdocs-1.5.3-py3-none-any.whl", hash = "sha256:3b3a78e736b31158d64dbb2f8ba29bd46a379d0c6e324c2246c3bc3d2189cfc1"}, - {file = "mkdocs-1.5.3.tar.gz", hash = "sha256:eb7c99214dcb945313ba30426c2451b735992c73c2e10838f76d09e39ff4d0e2"}, + {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, + {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, ] [[package]] @@ -974,9 +987,25 @@ files = [ {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +requires_python = ">=3.8" +summary = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +groups = ["dev", "docs"] +dependencies = [ + "mergedeep>=1.3.4", + "platformdirs>=2.2.0", + "pyyaml>=5.1", +] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + [[package]] name = "mkdocs-material" -version = "9.5.14" +version = "9.5.26" requires_python = ">=3.8" summary = "Documentation that simply works" groups = ["dev", "docs"] @@ -986,7 +1015,7 @@ dependencies = [ "jinja2~=3.0", "markdown~=3.2", "mkdocs-material-extensions~=1.3", - "mkdocs~=1.5.3", + "mkdocs~=1.6", "paginate~=0.5", "pygments~=2.16", "pymdown-extensions~=10.2", @@ -994,8 +1023,8 @@ dependencies = [ "requests~=2.26", ] files = [ - {file = "mkdocs_material-9.5.14-py3-none-any.whl", hash = "sha256:a45244ac221fda46ecf8337f00ec0e5cb5348ab9ffb203ca2a0c313b0d4dbc27"}, - {file = "mkdocs_material-9.5.14.tar.gz", hash = "sha256:2a1f8e67cda2587ab93ecea9ba42d0ca61d1d7b5fad8cf690eeaeb39dcd4b9af"}, + {file = "mkdocs_material-9.5.26-py3-none-any.whl", hash = "sha256:5d01fb0aa1c7946a1e3ae8689aa2b11a030621ecb54894e35aabb74c21016312"}, + {file = "mkdocs_material-9.5.26.tar.gz", hash = "sha256:56aeb91d94cffa43b6296fa4fbf0eb7c840136e563eecfd12c2d9e92e50ba326"}, ] [[package]] @@ -1020,6 +1049,7 @@ dependencies = [ ] files = [ {file = "mkdocs-redirects-1.2.1.tar.gz", hash = "sha256:9420066d70e2a6bb357adf86e67023dcdca1857f97f07c7fe450f8f1fb42f861"}, + {file = "mkdocs_redirects-1.2.1-py3-none-any.whl", hash = "sha256:497089f9e0219e7389304cffefccdfa1cac5ff9509f2cb706f4c9b221726dffb"}, ] [[package]] @@ -1035,7 +1065,7 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.25.1" requires_python = ">=3.8" summary = "Automatic documentation from sources, for MkDocs." groups = ["dev", "docs"] @@ -1050,8 +1080,8 @@ dependencies = [ "pymdown-extensions>=6.3", ] files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkdocstrings-0.25.1-py3-none-any.whl", hash = "sha256:da01fcc2670ad61888e8fe5b60afe9fee5781017d67431996832d63e887c2e51"}, + {file = "mkdocstrings-0.25.1.tar.gz", hash = "sha256:c3a2515f31577f311a9ee58d089e4c51fc6046dbd9e9b4c3de4c3194667fe9bf"}, ] [[package]] @@ -1072,18 +1102,36 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.25.1" extras = ["python"] requires_python = ">=3.8" summary = "Automatic documentation from sources, for MkDocs." groups = ["dev", "docs"] dependencies = [ "mkdocstrings-python>=0.5.2", - "mkdocstrings==0.24.1", + "mkdocstrings==0.25.1", +] +files = [ + {file = "mkdocstrings-0.25.1-py3-none-any.whl", hash = "sha256:da01fcc2670ad61888e8fe5b60afe9fee5781017d67431996832d63e887c2e51"}, + {file = "mkdocstrings-0.25.1.tar.gz", hash = "sha256:c3a2515f31577f311a9ee58d089e4c51fc6046dbd9e9b4c3de4c3194667fe9bf"}, +] + +[[package]] +name = "mkl" +version = "2021.4.0" +summary = "Intel® oneAPI Math Kernel Library" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +dependencies = [ + "intel-openmp==2021.*", + "tbb==2021.*", ] files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, + {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, + {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, ] [[package]] @@ -1205,7 +1253,7 @@ files = [ [[package]] name = "nox" -version = "2024.3.2" +version = "2024.4.15" requires_python = ">=3.7" summary = "Flexible test automation." groups = ["dev", "typecheck"] @@ -1213,11 +1261,12 @@ dependencies = [ "argcomplete<4.0,>=1.9.4", "colorlog<7.0.0,>=2.6.1", "packaging>=20.9", + "tomli>=1; python_version < \"3.11\"", "virtualenv>=20.14.1", ] files = [ - {file = "nox-2024.3.2-py3-none-any.whl", hash = "sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be"}, - {file = "nox-2024.3.2.tar.gz", hash = "sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553"}, + {file = "nox-2024.4.15-py3-none-any.whl", hash = "sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565"}, + {file = "nox-2024.4.15.tar.gz", hash = "sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f"}, ] [[package]] @@ -1377,13 +1426,14 @@ files = [ [[package]] name = "nvidia-nccl-cu12" -version = "2.19.3" +version = "2.20.5" requires_python = ">=3" summary = "NVIDIA Collective Communication Library (NCCL) Runtime" groups = ["all", "default", "vision"] marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ - {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, ] [[package]] @@ -1428,7 +1478,7 @@ files = [ [[package]] name = "onnx" -version = "1.16.0" +version = "1.16.1" requires_python = ">=3.8" summary = "Open Neural Network Exchange" groups = ["default"] @@ -1437,61 +1487,58 @@ dependencies = [ "protobuf>=3.20.2", ] files = [ - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f"}, - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3"}, - {file = "onnx-1.16.0-cp310-cp310-win32.whl", hash = "sha256:66300197b52beca08bc6262d43c103289c5d45fde43fb51922ed1eb83658cf0c"}, - {file = "onnx-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:ae0029f5e47bf70a1a62e7f88c80bca4ef39b844a89910039184221775df5e43"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:f51179d4af3372b4f3800c558d204b592c61e4b4a18b8f61e0eea7f46211221a"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5202559070afec5144332db216c20f2fff8323cf7f6512b0ca11b215eacc5bf3"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77579e7c15b4df39d29465b216639a5f9b74026bdd9e4b6306cd19a32dcfe67c"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e60ca76ac24b65c25860d0f2d2cdd96d6320d062a01dd8ce87c5743603789b8"}, - {file = "onnx-1.16.0-cp311-cp311-win32.whl", hash = "sha256:81b4ee01bc554e8a2b11ac6439882508a5377a1c6b452acd69a1eebb83571117"}, - {file = "onnx-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:7449241e70b847b9c3eb8dae622df8c1b456d11032a9d7e26e0ee8a698d5bf86"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:03a627488b1a9975d95d6a55582af3e14c7f3bb87444725b999935ddd271d352"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c392faeabd9283ee344ccb4b067d1fea9dfc614fa1f0de7c47589efd79e15e78"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0efeb46985de08f0efe758cb54ad3457e821a05c2eaf5ba2ccb8cd1602c08084"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf14a3d32234f23e44abb73a755cb96a423fac7f004e8f046f36b10214151ee"}, - {file = "onnx-1.16.0-cp312-cp312-win32.whl", hash = "sha256:62a2e27ae8ba5fc9b4a2620301446a517b5ffaaf8566611de7a7c2160f5bcf4c"}, - {file = "onnx-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:3e0860fea94efde777e81a6f68f65761ed5e5f3adea2e050d7fbe373a9ae05b3"}, - {file = "onnx-1.16.0.tar.gz", hash = "sha256:237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7"}, + {file = "onnx-1.16.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:bb2d392e5b7060082c2fb38eb5c44f67eb34ff5f0681bd6f45beff9abc6f7094"}, + {file = "onnx-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15abf94a7868eed6db15a8b5024ba570c891cae77ca4d0e7258dabdad76980df"}, + {file = "onnx-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6251910e554f811fdd070164b0bc76d76b067b95576cb9dad4d52ae64fe014b5"}, + {file = "onnx-1.16.1-cp310-cp310-win32.whl", hash = "sha256:c11e3b15eee46cd20767e505cc3ba97457ef5ac93c3e459cdfb77943ff8fe9a7"}, + {file = "onnx-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:b3d10405706807ec2ef493b2a78519fa0264cf190363e89478585aac1179b596"}, + {file = "onnx-1.16.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:006ba5059c85ce43e89a1486cc0276d0f1a8ec9c6efd1a9334fd3fa0f6e33b64"}, + {file = "onnx-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1521ea7cd3497ecaf57d3b5e72d637ca5ebca632122a0806a9df99bedbeecdf8"}, + {file = "onnx-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45cf20421aeac03872bea5fd6ebf92abe15c4d1461a2572eb839add5059e2a09"}, + {file = "onnx-1.16.1-cp311-cp311-win32.whl", hash = "sha256:f98e275b4f46a617a9c527e60c02531eae03cf67a04c26db8a1c20acee539533"}, + {file = "onnx-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:95aa20aa65a9035d7543e81713e8b0f611e213fc02171959ef4ee09311d1bf28"}, + {file = "onnx-1.16.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:32e11d39bee04f927fab09f74c46cf76584094462311bab1aca9ccdae6ed3366"}, + {file = "onnx-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8884bf53b552873c0c9b072cb8625e7d4e8f3cc0529191632d24e3de58a3b93a"}, + {file = "onnx-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595b2830093f81361961295f7b0ebb6000423bcd04123d516d081c306002e387"}, + {file = "onnx-1.16.1-cp312-cp312-win32.whl", hash = "sha256:2fde4dd5bc278b3fc8148f460bce8807b2874c66f48529df9444cdbc9ecf456b"}, + {file = "onnx-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:e69ad8c110d8c37d759cad019d498fdf3fd24e0bfaeb960e52fed0469a5d2974"}, + {file = "onnx-1.16.1.tar.gz", hash = "sha256:8299193f0f2a3849bfc069641aa8e4f93696602da8d165632af8ee48ec7556b6"}, ] [[package]] name = "onnxruntime" -version = "1.17.1" +version = "1.18.0" summary = "ONNX Runtime is a runtime accelerator for Machine Learning models" groups = ["default"] dependencies = [ "coloredlogs", "flatbuffers", - "numpy>=1.21.6", + "numpy>=1.26.0", "packaging", "protobuf", "sympy", ] files = [ - {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, - {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55b5e92a4c76a23981c998078b9bf6145e4fb0b016321a8274b1607bd3c6bd35"}, - {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ebbcd2bc3a066cf54e6f18c75708eb4d309ef42be54606d22e5bdd78afc5b0d7"}, - {file = "onnxruntime-1.17.1-cp310-cp310-win32.whl", hash = "sha256:5e3716b5eec9092e29a8d17aab55e737480487deabfca7eac3cd3ed952b6ada9"}, - {file = "onnxruntime-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:fbb98cced6782ae1bb799cc74ddcbbeeae8819f3ad1d942a74d88e72b6511337"}, - {file = "onnxruntime-1.17.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:36fd6f87a1ecad87e9c652e42407a50fb305374f9a31d71293eb231caae18784"}, - {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99a8bddeb538edabc524d468edb60ad4722cff8a49d66f4e280c39eace70500b"}, - {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd7fddb4311deb5a7d3390cd8e9b3912d4d963efbe4dfe075edbaf18d01c024e"}, - {file = "onnxruntime-1.17.1-cp311-cp311-win32.whl", hash = "sha256:606a7cbfb6680202b0e4f1890881041ffc3ac6e41760a25763bd9fe146f0b335"}, - {file = "onnxruntime-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:53e4e06c0a541696ebdf96085fd9390304b7b04b748a19e02cf3b35c869a1e76"}, - {file = "onnxruntime-1.17.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:40f08e378e0f85929712a2b2c9b9a9cc400a90c8a8ca741d1d92c00abec60843"}, - {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac79da6d3e1bb4590f1dad4bb3c2979d7228555f92bb39820889af8b8e6bd472"}, - {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ae9ba47dc099004e3781f2d0814ad710a13c868c739ab086fc697524061695ea"}, - {file = "onnxruntime-1.17.1-cp312-cp312-win32.whl", hash = "sha256:2dff1a24354220ac30e4a4ce2fb1df38cb1ea59f7dac2c116238d63fe7f4c5ff"}, - {file = "onnxruntime-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:6226a5201ab8cafb15e12e72ff2a4fc8f50654e8fa5737c6f0bd57c5ff66827e"}, + {file = "onnxruntime-1.18.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:5a3b7993a5ecf4a90f35542a4757e29b2d653da3efe06cdd3164b91167bbe10d"}, + {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15b944623b2cdfe7f7945690bfb71c10a4531b51997c8320b84e7b0bb59af902"}, + {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e61ce5005118064b1a0ed73ebe936bc773a102f067db34108ea6c64dd62a179"}, + {file = "onnxruntime-1.18.0-cp310-cp310-win32.whl", hash = "sha256:a4fc8a2a526eb442317d280610936a9f73deece06c7d5a91e51570860802b93f"}, + {file = "onnxruntime-1.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:71ed219b768cab004e5cd83e702590734f968679bf93aa488c1a7ffbe6e220c3"}, + {file = "onnxruntime-1.18.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:3d24bd623872a72a7fe2f51c103e20fcca2acfa35d48f2accd6be1ec8633d960"}, + {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f15e41ca9b307a12550bfd2ec93f88905d9fba12bab7e578f05138ad0ae10d7b"}, + {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f45ca2887f62a7b847d526965686b2923efa72538c89b7703c7b3fe970afd59"}, + {file = "onnxruntime-1.18.0-cp311-cp311-win32.whl", hash = "sha256:9e24d9ecc8781323d9e2eeda019b4b24babc4d624e7d53f61b1fe1a929b0511a"}, + {file = "onnxruntime-1.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:f8608398976ed18aef450d83777ff6f77d0b64eced1ed07a985e1a7db8ea3771"}, + {file = "onnxruntime-1.18.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:f1d79941f15fc40b1ee67738b2ca26b23e0181bf0070b5fb2984f0988734698f"}, + {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e8caf3a8565c853a22d323a3eebc2a81e3de7591981f085a4f74f7a60aab2d"}, + {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:498d2b8380635f5e6ebc50ec1b45f181588927280f32390fb910301d234f97b8"}, + {file = "onnxruntime-1.18.0-cp312-cp312-win32.whl", hash = "sha256:ba7cc0ce2798a386c082aaa6289ff7e9bedc3dee622eef10e74830cff200a72e"}, + {file = "onnxruntime-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:1fa175bd43f610465d5787ae06050c81f7ce09da2bf3e914eb282cb8eab363ef"}, ] [[package]] name = "opencv-python-headless" -version = "4.9.0.80" +version = "4.10.0.82" requires_python = ">=3.6" summary = "Wrapper package for OpenCV python bindings." groups = ["all", "vision"] @@ -1506,13 +1553,13 @@ dependencies = [ "numpy>=1.26.0; python_version >= \"3.12\"", ] files = [ - {file = "opencv-python-headless-4.9.0.80.tar.gz", hash = "sha256:71a4cd8cf7c37122901d8e81295db7fb188730e33a0e40039a4e59c1030b0958"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:2ea8a2edc4db87841991b2fbab55fc07b97ecb602e0f47d5d485bd75cee17c1a"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:e0ee54e27be493e8f7850847edae3128e18b540dac1d7b2e4001b8944e11e1c6"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57ce2865e8fec431c6f97a81e9faaf23fa5be61011d0a75ccf47a3c0d65fa73d"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:976656362d68d9f40a5c66f83901430538002465f7db59142784f3893918f3df"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:11e3849d83e6651d4e7699aadda9ec7ed7c38957cbbcb99db074f2a2d2de9670"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:a8056c2cb37cd65dfcdf4153ca16f7362afcf3a50d600d6bb69c660fc61ee29c"}, + {file = "opencv-python-headless-4.10.0.82.tar.gz", hash = "sha256:de9e742c1b9540816fbd115b0b03841d41ed0c65566b0d7a5371f98b131b7e6d"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:a09ed50ba21cc5bf5d436cb0e784ad09c692d6b1d1454252772f6c8f2c7b4088"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:977a5fd21e1fe0d3d2134887db4441f8725abeae95150126302f31fcd9f548fa"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4ec6755838b0be12510bfc9ffb014779c612418f11f4f7e6f505c36124a3aa"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a37fa5276967ecf6eb297295b16b28b7a2eb3b568ca0ee469fb1a5954de298"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-win32.whl", hash = "sha256:94736e9b322d13db4768fd35588ad5e8995e78e207263076bfbee18aac835ad5"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-win_amd64.whl", hash = "sha256:c1822fa23d1641c0249ed5eb906f4c385f7959ff1bd601a776d56b0c18914af4"}, ] [[package]] @@ -1556,41 +1603,41 @@ files = [ [[package]] name = "pandas" -version = "2.2.1" +version = "2.2.2" requires_python = ">=3.9" summary = "Powerful data structures for data analysis, time series, and statistics" groups = ["default"] dependencies = [ - "numpy<2,>=1.22.4; python_version < \"3.11\"", - "numpy<2,>=1.23.2; python_version == \"3.11\"", - "numpy<2,>=1.26.0; python_version >= \"3.12\"", + "numpy>=1.22.4; python_version < \"3.11\"", + "numpy>=1.23.2; python_version == \"3.11\"", + "numpy>=1.26.0; python_version >= \"3.12\"", "python-dateutil>=2.8.2", "pytz>=2020.1", "tzdata>=2022.7", ] files = [ - {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, - {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, - {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, - {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, - {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, - {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, + {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, + {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, + {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, + {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, ] [[package]] @@ -1684,40 +1731,40 @@ files = [ [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" requires_python = ">=3.8" summary = "plugin and hook calling mechanisms for python" groups = ["dev", "test", "typecheck"] files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [[package]] name = "protobuf" -version = "5.26.0" +version = "4.25.3" requires_python = ">=3.8" summary = "" groups = ["default"] files = [ - {file = "protobuf-5.26.0-cp310-abi3-win32.whl", hash = "sha256:f9ecc8eb6f18037e0cbf43256db0325d4723f429bca7ef5cd358b7c29d65f628"}, - {file = "protobuf-5.26.0-cp310-abi3-win_amd64.whl", hash = "sha256:dfd29f6eb34107dccf289a93d44fb6b131e68888d090b784b691775ac84e8213"}, - {file = "protobuf-5.26.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:7e47c57303466c867374a17b2b5e99c5a7c8b72a94118e2f28efb599f19b4069"}, - {file = "protobuf-5.26.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e184175276edc222e2d5e314a72521e10049938a9a4961fe4bea9b25d073c03f"}, - {file = "protobuf-5.26.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:6ee9d1aa02f951c5ce10bf8c6cfb7604133773038e33f913183c8b5201350600"}, - {file = "protobuf-5.26.0-py3-none-any.whl", hash = "sha256:a49b6c5359bf34fb7bf965bf21abfab4476e4527d822ab5289ee3bf73f291159"}, - {file = "protobuf-5.26.0.tar.gz", hash = "sha256:82f5870d74c99addfe4152777bdf8168244b9cf0ac65f8eccf045ddfa9d80d9b"}, + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, ] [[package]] name = "pygments" -version = "2.17.2" -requires_python = ">=3.7" +version = "2.18.0" +requires_python = ">=3.8" summary = "Pygments is a syntax highlighting package written in Python." groups = ["default", "dev", "docs", "lint", "test"] files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [[package]] @@ -1759,7 +1806,7 @@ files = [ [[package]] name = "pyright" -version = "1.1.355" +version = "1.1.366" requires_python = ">=3.7" summary = "Command line wrapper for pyright" groups = ["dev", "typecheck"] @@ -1767,13 +1814,13 @@ dependencies = [ "nodeenv>=1.6.0", ] files = [ - {file = "pyright-1.1.355-py3-none-any.whl", hash = "sha256:bf30b6728fd68ae7d09c98292b67152858dd89738569836896df786e52b5fe48"}, - {file = "pyright-1.1.355.tar.gz", hash = "sha256:dca4104cd53d6484e6b1b50b7a239ad2d16d2ffd20030bcf3111b56f44c263bf"}, + {file = "pyright-1.1.366-py3-none-any.whl", hash = "sha256:c09e73ccc894976bcd6d6a5784aa84d724dbd9ceb7b873b39d475ca61c2de071"}, + {file = "pyright-1.1.366.tar.gz", hash = "sha256:10e4d60be411f6d960cd39b0b58bf2ff76f2c83b9aeb102ffa9d9fda2e1303cb"}, ] [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.2" requires_python = ">=3.8" summary = "pytest: simple powerful testing with Python" groups = ["dev", "test", "typecheck"] @@ -1782,18 +1829,18 @@ dependencies = [ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", "iniconfig", "packaging", - "pluggy<2.0,>=1.4", + "pluggy<2.0,>=1.5", "tomli>=1; python_version < \"3.11\"", ] files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, ] [[package]] name = "pytest-cov" -version = "4.1.0" -requires_python = ">=3.7" +version = "5.0.0" +requires_python = ">=3.8" summary = "Pytest plugin for measuring coverage." groups = ["dev", "test"] dependencies = [ @@ -1801,8 +1848,8 @@ dependencies = [ "pytest>=4.6", ] files = [ - {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, - {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, ] [[package]] @@ -1988,28 +2035,28 @@ files = [ [[package]] name = "ruff" -version = "0.3.3" +version = "0.4.8" requires_python = ">=3.7" summary = "An extremely fast Python linter and code formatter, written in Rust." groups = ["dev", "lint"] files = [ - {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:973a0e388b7bc2e9148c7f9be8b8c6ae7471b9be37e1cc732f8f44a6f6d7720d"}, - {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfa60d23269d6e2031129b053fdb4e5a7b0637fc6c9c0586737b962b2f834493"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eca7ff7a47043cf6ce5c7f45f603b09121a7cc047447744b029d1b719278eb5"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7d3f6762217c1da954de24b4a1a70515630d29f71e268ec5000afe81377642d"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b24c19e8598916d9c6f5a5437671f55ee93c212a2c4c569605dc3842b6820386"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5a6cbf216b69c7090f0fe4669501a27326c34e119068c1494f35aaf4cc683778"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352e95ead6964974b234e16ba8a66dad102ec7bf8ac064a23f95371d8b198aab"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d6ab88c81c4040a817aa432484e838aaddf8bfd7ca70e4e615482757acb64f8"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79bca3a03a759cc773fca69e0bdeac8abd1c13c31b798d5bb3c9da4a03144a9f"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2700a804d5336bcffe063fd789ca2c7b02b552d2e323a336700abb8ae9e6a3f8"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd66469f1a18fdb9d32e22b79f486223052ddf057dc56dea0caaf1a47bdfaf4e"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45817af234605525cdf6317005923bf532514e1ea3d9270acf61ca2440691376"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0da458989ce0159555ef224d5b7c24d3d2e4bf4c300b85467b08c3261c6bc6a8"}, - {file = "ruff-0.3.3-py3-none-win32.whl", hash = "sha256:f2831ec6a580a97f1ea82ea1eda0401c3cdf512cf2045fa3c85e8ef109e87de0"}, - {file = "ruff-0.3.3-py3-none-win_amd64.whl", hash = "sha256:be90bcae57c24d9f9d023b12d627e958eb55f595428bafcb7fec0791ad25ddfc"}, - {file = "ruff-0.3.3-py3-none-win_arm64.whl", hash = "sha256:0171aab5fecdc54383993389710a3d1227f2da124d76a2784a7098e818f92d61"}, - {file = "ruff-0.3.3.tar.gz", hash = "sha256:38671be06f57a2f8aba957d9f701ea889aa5736be806f18c0cd03d6ff0cbca8d"}, + {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, + {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, + {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, + {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, + {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, + {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, ] [[package]] @@ -2097,13 +2144,13 @@ files = [ [[package]] name = "setuptools" -version = "69.2.0" +version = "70.0.0" requires_python = ">=3.8" summary = "Easily download, build, install, upgrade, and uninstall Python packages" groups = ["default", "dev", "docs", "typecheck"] files = [ - {file = "setuptools-69.2.0-py3-none-any.whl", hash = "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"}, - {file = "setuptools-69.2.0.tar.gz", hash = "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e"}, + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, ] [[package]] @@ -2145,9 +2192,22 @@ files = [ {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, ] +[[package]] +name = "tbb" +version = "2021.12.0" +summary = "Intel® oneAPI Threading Building Blocks (oneTBB)" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +files = [ + {file = "tbb-2021.12.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:f2cc9a7f8ababaa506cbff796ce97c3bf91062ba521e15054394f773375d81d8"}, + {file = "tbb-2021.12.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:a925e9a7c77d3a46ae31c34b0bb7f801c4118e857d137b68f68a8e458fcf2bd7"}, + {file = "tbb-2021.12.0-py3-none-win32.whl", hash = "sha256:b1725b30c174048edc8be70bd43bb95473f396ce895d91151a474d0fa9f450a8"}, + {file = "tbb-2021.12.0-py3-none-win_amd64.whl", hash = "sha256:fc2772d850229f2f3df85f1109c4844c495a2db7433d38200959ee9265b34789"}, +] + [[package]] name = "tensorboard" -version = "2.16.2" +version = "2.17.0" requires_python = ">=3.9" summary = "TensorBoard lets you watch Tensors Flow" groups = ["default"] @@ -2156,14 +2216,14 @@ dependencies = [ "grpcio>=1.48.2", "markdown>=2.6.8", "numpy>=1.12.0", - "protobuf!=4.24.0,>=3.19.6", + "protobuf!=4.24.0,<5.0.0,>=3.19.6", "setuptools>=41.0.0", "six>1.9", "tensorboard-data-server<0.8.0,>=0.7.0", "werkzeug>=1.0.1", ] files = [ - {file = "tensorboard-2.16.2-py3-none-any.whl", hash = "sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45"}, + {file = "tensorboard-2.17.0-py3-none-any.whl", hash = "sha256:859a499a9b1fb68a058858964486627100b71fcb21646861c61d31846a6478fb"}, ] [[package]] @@ -2180,7 +2240,7 @@ files = [ [[package]] name = "timm" -version = "0.9.16" +version = "1.0.3" requires_python = ">=3.8" summary = "PyTorch Image Models" groups = ["all", "vision"] @@ -2192,13 +2252,13 @@ dependencies = [ "torchvision", ] files = [ - {file = "timm-0.9.16-py3-none-any.whl", hash = "sha256:bf5704014476ab011589d3c14172ee4c901fd18f9110a928019cac5be2945914"}, - {file = "timm-0.9.16.tar.gz", hash = "sha256:891e54f375d55adf31a71ab0c117761f0e472f9f3971858ecdd1e7376b7071e6"}, + {file = "timm-1.0.3-py3-none-any.whl", hash = "sha256:d1ec86f7765aa79fbc7491508fa6e285d38a38f10bf4fe44ba2e9c70f91f0f5b"}, + {file = "timm-1.0.3.tar.gz", hash = "sha256:83920a7efe2cfd503b2a1257dc8808d6ff7dcd18a4b79f451c283e7d71497329"}, ] [[package]] name = "tokenizers" -version = "0.15.2" +version = "0.19.1" requires_python = ">=3.7" summary = "" groups = ["default"] @@ -2206,80 +2266,70 @@ dependencies = [ "huggingface-hub<1.0,>=0.16.4", ] files = [ - {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, - {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, - {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, - {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, - {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, - {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, - {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, - {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, - {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, + {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, + {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, + {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, + {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, + {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, + {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, + {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, ] [[package]] @@ -2307,7 +2357,7 @@ files = [ [[package]] name = "torch" -version = "2.2.1" +version = "2.3.0" requires_python = ">=3.8.0" summary = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" groups = ["all", "default", "vision"] @@ -2315,6 +2365,7 @@ dependencies = [ "filelock", "fsspec", "jinja2", + "mkl<=2021.4.0,>=2021.1.1; platform_system == \"Windows\"", "networkx", "nvidia-cublas-cu12==12.1.3.1; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cuda-cupti-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", @@ -2325,28 +2376,25 @@ dependencies = [ "nvidia-curand-cu12==10.3.2.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cusolver-cu12==11.4.5.107; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cusparse-cu12==12.1.0.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-nccl-cu12==2.19.3; platform_system == \"Linux\" and platform_machine == \"x86_64\"", + "nvidia-nccl-cu12==2.20.5; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-nvtx-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "sympy", - "triton==2.2.0; platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"", + "triton==2.3.0; platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"", "typing-extensions>=4.8.0", ] files = [ - {file = "torch-2.2.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8d3bad336dd2c93c6bcb3268e8e9876185bda50ebde325ef211fb565c7d15273"}, - {file = "torch-2.2.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5297f13370fdaca05959134b26a06a7f232ae254bf2e11a50eddec62525c9006"}, - {file = "torch-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:5f5dee8433798888ca1415055f5e3faf28a3bad660e4c29e1014acd3275ab11a"}, - {file = "torch-2.2.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b6d78338acabf1fb2e88bf4559d837d30230cf9c3e4337261f4d83200df1fcbe"}, - {file = "torch-2.2.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:6ab3ea2e29d1aac962e905142bbe50943758f55292f1b4fdfb6f4792aae3323e"}, - {file = "torch-2.2.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:d86664ec85902967d902e78272e97d1aff1d331f7619d398d3ffab1c9b8e9157"}, - {file = "torch-2.2.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d6227060f268894f92c61af0a44c0d8212e19cb98d05c20141c73312d923bc0a"}, - {file = "torch-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:77e990af75fb1675490deb374d36e726f84732cd5677d16f19124934b2409ce9"}, - {file = "torch-2.2.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:46085e328d9b738c261f470231e987930f4cc9472d9ffb7087c7a1343826ac51"}, - {file = "torch-2.2.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:2d9e7e5ecbb002257cf98fae13003abbd620196c35f85c9e34c2adfb961321ec"}, - {file = "torch-2.2.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ada53aebede1c89570e56861b08d12ba4518a1f8b82d467c32665ec4d1f4b3c8"}, - {file = "torch-2.2.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be21d4c41ecebed9e99430dac87de1439a8c7882faf23bba7fea3fea7b906ac1"}, - {file = "torch-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:79848f46196750367dcdf1d2132b722180b9d889571e14d579ae82d2f50596c5"}, - {file = "torch-2.2.1-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:7ee804847be6be0032fbd2d1e6742fea2814c92bebccb177f0d3b8e92b2d2b18"}, - {file = "torch-2.2.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:84b2fb322ab091039fdfe74e17442ff046b258eb5e513a28093152c5b07325a7"}, + {file = "torch-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d8ea5a465dbfd8501f33c937d1f693176c9aef9d1c1b0ca1d44ed7b0a18c52ac"}, + {file = "torch-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c"}, + {file = "torch-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1bf023aa20902586f614f7682fedfa463e773e26c58820b74158a72470259459"}, + {file = "torch-2.3.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:758ef938de87a2653bba74b91f703458c15569f1562bf4b6c63c62d9c5a0c1f5"}, + {file = "torch-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:493d54ee2f9df100b5ce1d18c96dbb8d14908721f76351e908c9d2622773a788"}, + {file = "torch-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bce43af735c3da16cc14c7de2be7ad038e2fbf75654c2e274e575c6c05772ace"}, + {file = "torch-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:729804e97b7cf19ae9ab4181f91f5e612af07956f35c8b2c8e9d9f3596a8e877"}, + {file = "torch-2.3.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:d24e328226d8e2af7cf80fcb1d2f1d108e0de32777fab4aaa2b37b9765d8be73"}, + {file = "torch-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b0de2bdc0486ea7b14fc47ff805172df44e421a7318b7c4d92ef589a75d27410"}, + {file = "torch-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a306c87a3eead1ed47457822c01dfbd459fe2920f2d38cbdf90de18f23f72542"}, + {file = "torch-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9b98bf1a3c8af2d4c41f0bf1433920900896c446d1ddc128290ff146d1eb4bd"}, + {file = "torch-2.3.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:dca986214267b34065a79000cee54232e62b41dff1ec2cab9abc3fc8b3dee0ad"}, ] [[package]] @@ -2368,31 +2416,28 @@ files = [ [[package]] name = "torchvision" -version = "0.17.1" +version = "0.18.0" requires_python = ">=3.8" summary = "image and video datasets and models for torch deep learning" groups = ["all", "vision"] dependencies = [ "numpy", "pillow!=8.3.*,>=5.3.0", - "torch==2.2.1", + "torch==2.3.0", ] files = [ - {file = "torchvision-0.17.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:06418880212b66e45e855dd39f536e7fd48b4e6b034a11dd9fe9e2384afb51ec"}, - {file = "torchvision-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33d65d0c7fdcb3f7bc1dd8ed30ea3cd7e0587b4ad1b104b5677c8191a8bad9f1"}, - {file = "torchvision-0.17.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:aaefef2be6a02f206085ce4bb6c0078b03ebf48cb6ff82bd762ff6248475e08e"}, - {file = "torchvision-0.17.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:ebe5fdb466aff8a8e8e755de84a843418b6f8d500624752c05eaa638d7700f3d"}, - {file = "torchvision-0.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:9d4d45a996f4313e9c5db4da71d31508d44f7ccfbf29d3442bdcc2ad13e0b6f3"}, - {file = "torchvision-0.17.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:ea2ccdbf5974e0bf27fd6644a33b19cb0700297cf397bb0469e762c11c6c4105"}, - {file = "torchvision-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9106e32c9f1e70afa8172cf1b064cf9c2998d8dff0769ec69d537b20209ee43d"}, - {file = "torchvision-0.17.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5966936c669a08870f6547cd0a90d08b157aeda03293f79e2adbb934687175ed"}, - {file = "torchvision-0.17.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:e74f5a26ef8190eab0c38b3f63914fea94e58e3b2f0e5466611c9f63bd91a80b"}, - {file = "torchvision-0.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:a2109c1a1dcf71e8940d43e91f78c4dd5bf0fcefb3a0a42244102752009f5862"}, - {file = "torchvision-0.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5d241d2a5fb4e608677fccf6f80b34a124446d324ee40c7814ce54bce888275b"}, - {file = "torchvision-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e0fe98d9d92c23d2262ff82f973242951b9357fb640f8888ac50848bd00f5b45"}, - {file = "torchvision-0.17.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:32dc5de86d2ade399e11087095674ca08a1649fb322cfe69336d28add467edcb"}, - {file = "torchvision-0.17.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:54902877410ffb5458ee52b6d0de4b25cf01496bee736d6825301a5f0398536e"}, - {file = "torchvision-0.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:cc22c1ed0f1aba3f98fd72b6f60021f57aec1d2f6af518522e8a0a83848de3a8"}, + {file = "torchvision-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dd61628a3d189c6852a12dc5ed4cd2eece66d2d67f35a866cb16f1dcb06c8c62"}, + {file = "torchvision-0.18.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:493c45f9937dad37aa1b64b14da17c7a589c72b91adc4837d431009cfe29bd53"}, + {file = "torchvision-0.18.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5337f6acfa1fe959d5cb340d01a00614d6b31ce7a4824ccb95435a85c5273b95"}, + {file = "torchvision-0.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd8e6f3b5beb49965f15c461302488edfa3d8c2d01d3bb79b150d6fb62711e3a"}, + {file = "torchvision-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6896a52168befe1105fb3c9335287390ed227e71d1e4ec4d68b62e8a3099fc09"}, + {file = "torchvision-0.18.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:3d7955398d4ceaad77c487c2c44f6f7813112402c9bab8cd906d346005891048"}, + {file = "torchvision-0.18.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:e5a24d620cea14a4bb89f24aa2b506230c0a16a3ada57fc53ad80cfd256a2128"}, + {file = "torchvision-0.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:6ad70ddfa879bda5ed886b2518fe562640e0059787cbd65cb2bffa7674541410"}, + {file = "torchvision-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:eb9d83c0e1dbb54ecb0fb04c87f786333e3a6fb8b9c400aca7c31081f9aa5707"}, + {file = "torchvision-0.18.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b657d052d146f24cb3b2a78219bfc82ae70a9706671c50f632528907d10cccec"}, + {file = "torchvision-0.18.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a964afbc7ddf50a46b941477f6c35729b416deedd139756befd488245e2e226d"}, + {file = "torchvision-0.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:7c770f0f748e0b17f57c0297508d7254f686cdf03fc2e2949f422b20574f4c0f"}, ] [[package]] @@ -2411,30 +2456,30 @@ files = [ [[package]] name = "transformers" -version = "4.39.0" +version = "4.41.2" requires_python = ">=3.8.0" summary = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" groups = ["default"] dependencies = [ "filelock", - "huggingface-hub<1.0,>=0.19.3", + "huggingface-hub<1.0,>=0.23.0", "numpy>=1.17", "packaging>=20.0", "pyyaml>=5.1", "regex!=2019.12.17", "requests", "safetensors>=0.4.1", - "tokenizers<0.19,>=0.14", + "tokenizers<0.20,>=0.19", "tqdm>=4.27", ] files = [ - {file = "transformers-4.39.0-py3-none-any.whl", hash = "sha256:7801785b1f016d667467e8c372c1c3653c18fe32ba97952059e3bea79ba22b08"}, - {file = "transformers-4.39.0.tar.gz", hash = "sha256:517a13cd633b10bea01c92ab0b3059762872c7c29da3d223db9d28e926fe330d"}, + {file = "transformers-4.41.2-py3-none-any.whl", hash = "sha256:05555d20e43f808de1ef211ab64803cdb513170cef70d29a888b589caebefc67"}, + {file = "transformers-4.41.2.tar.gz", hash = "sha256:80a4db216533d573e9cc7388646c31ed9480918feb7c55eb211249cb23567f87"}, ] [[package]] name = "triton" -version = "2.2.0" +version = "2.3.0" summary = "A language and compiler for custom Deep Learning operations" groups = ["all", "default", "vision"] marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"" @@ -2442,9 +2487,9 @@ dependencies = [ "filelock", ] files = [ - {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, - {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, - {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ce4b8ff70c48e47274c66f269cce8861cf1dc347ceeb7a67414ca151b1822d8"}, + {file = "triton-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3d9607f85103afdb279938fc1dd2a66e4f5999a58eb48a346bd42738f986dd"}, + {file = "triton-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:218d742e67480d9581bafb73ed598416cc8a56f6316152e5562ee65e33de01c0"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index d60cf684..97c2dfb3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "pdm.backend" [project] name = "kaiko-eva" -version = "0.0.1" +version = "0.0.2" description = "Evaluation Framework for oncology foundation models." keywords = [ "machine-learning", @@ -34,8 +34,9 @@ maintainers = [ ] requires-python = ">=3.10" dependencies = [ + "torch==2.3.0", "lightning>=2.2.2", - "jsonargparse[omegaconf]>=4.27.4", + "jsonargparse[omegaconf]==4.28", "tensorboard>=2.16.2", "loguru>=0.7.2", "pandas>=2.2.0", diff --git a/src/eva/core/callbacks/__init__.py b/src/eva/core/callbacks/__init__.py index dc14697c..3b36a6db 100644 --- a/src/eva/core/callbacks/__init__.py +++ b/src/eva/core/callbacks/__init__.py @@ -1,5 +1,6 @@ """Callbacks API.""" -from eva.core.callbacks.writers import EmbeddingsWriter +from eva.core.callbacks.config import ConfigurationLogger +from eva.core.callbacks.writers import ClassificationEmbeddingsWriter -__all__ = ["EmbeddingsWriter"] +__all__ = ["ConfigurationLogger", "ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/config.py b/src/eva/core/callbacks/config.py new file mode 100644 index 00000000..79cae704 --- /dev/null +++ b/src/eva/core/callbacks/config.py @@ -0,0 +1,143 @@ +"""Configuration logger callback.""" + +import ast +import os +import sys +from types import BuiltinFunctionType +from typing import Any, Dict, List + +import lightning.pytorch as pl +import yaml +from lightning_fabric.utilities import cloud_io +from loguru import logger as cli_logger +from omegaconf import OmegaConf +from typing_extensions import TypeGuard, override + +from eva.core import loggers + + +class ConfigurationLogger(pl.Callback): + """Logs the submitted configuration to the experimental logger.""" + + _save_as: str = "config.yaml" + + def __init__(self, verbose: bool = True) -> None: + """Initializes the callback. + + Args: + verbose: Whether to print the configurations to print the + configuration to the terminal. + """ + super().__init__() + + self._verbose = verbose + + @override + def setup( + self, + trainer: pl.Trainer, + pl_module: pl.LightningModule, + stage: str | None = None, + ) -> None: + log_dir = trainer.log_dir + if not _logdir_exists(log_dir): + return + + configuration = _load_submitted_config() + + if self._verbose: + config_as_text = yaml.dump(configuration, sort_keys=False) + print(f"Configuration:\033[94m\n---\n{config_as_text}\033[0m") + + save_as = os.path.join(log_dir, self._save_as) + fs = cloud_io.get_filesystem(log_dir) + with fs.open(save_as, "w") as output_file: + yaml.dump(configuration, output_file, sort_keys=False) + + loggers.log_parameters(trainer.loggers, tag="configuration", parameters=configuration) + + +def _logdir_exists(logdir: str | None, verbose: bool = True) -> TypeGuard[str]: + """Checks if the trainer has a log directory. + + Args: + logdir: Trainer's logdir. + name: The name to log with. + verbose: Whether to log if it does not exist. + + Returns: + A bool indicating if the log directory exists or not. + """ + exists = isinstance(logdir, str) + if not exists and verbose: + print("\n") + cli_logger.warning("Log directory is `None`. Configuration file will not be logged.\n") + return exists + + +def _load_submitted_config() -> Dict[str, Any]: + """Retrieves and loads the submitted configuration. + + Returns: + The path to the configuration file. + """ + config_paths = _fetch_submitted_config_path() + return _load_yaml_files(config_paths) + + +def _fetch_submitted_config_path() -> List[str]: + """Fetches the config path from command line arguments. + + Returns: + The path to the configuration file. + """ + return list(filter(lambda f: f.endswith(".yaml"), sys.argv)) + + +def _load_yaml_files(paths: List[str]) -> Dict[str, Any]: + """Loads yaml files and merge them from multiple paths. + + Args: + paths: The paths to the yaml files. + + Returns: + The merged configurations as a dictionary. + """ + merged_config = {} + for config_path in paths: + fs = cloud_io.get_filesystem(config_path) + with fs.open(config_path, "r") as file: + omegaconf_file = OmegaConf.load(file) # type: ignore + config_dict = OmegaConf.to_object(omegaconf_file) # type: ignore + parsed_config = _type_resolver(config_dict) # type: ignore + merged_config.update(parsed_config) + return merged_config + + +def _type_resolver(mapping: Dict[str, Any]) -> Dict[str, Any]: + """Parses the string values of a dictionary in-place. + + Args: + mapping: A dictionary object. + + Returns: + The mapping with the formatted values. + """ + for key, value in mapping.items(): + if isinstance(value, dict): + formatted_value = _type_resolver(value) + elif isinstance(value, list) and isinstance(value[0], dict): + formatted_value = [_type_resolver(subvalue) for subvalue in value] + else: + try: + parsed_value = ast.literal_eval(value) # type: ignore + formatted_value = ( + value if isinstance(parsed_value, BuiltinFunctionType) else parsed_value + ) + + except Exception: + formatted_value = value + + mapping[key] = formatted_value + + return mapping diff --git a/src/eva/core/callbacks/writers/__init__.py b/src/eva/core/callbacks/writers/__init__.py index a731f06f..8d907e66 100644 --- a/src/eva/core/callbacks/writers/__init__.py +++ b/src/eva/core/callbacks/writers/__init__.py @@ -1,5 +1,5 @@ """Callbacks API.""" -from eva.core.callbacks.writers.embeddings import EmbeddingsWriter +from eva.core.callbacks.writers.embeddings import ClassificationEmbeddingsWriter -__all__ = ["EmbeddingsWriter"] +__all__ = ["ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py deleted file mode 100644 index 4c8d4520..00000000 --- a/src/eva/core/callbacks/writers/embeddings.py +++ /dev/null @@ -1,269 +0,0 @@ -"""Embeddings writer.""" - -import csv -import io -import os -from typing import Any, Dict, List, Sequence, Tuple - -import lightning.pytorch as pl -import torch -from lightning.pytorch import callbacks -from loguru import logger -from torch import multiprocessing, nn -from typing_extensions import override - -from eva.core.callbacks.writers.typings import ITEM_DICT_ENTRY, QUEUE_ITEM -from eva.core.models.modules.typings import INPUT_BATCH -from eva.core.utils import multiprocessing as eva_multiprocessing - - -class EmbeddingsWriter(callbacks.BasePredictionWriter): - """Callback for writing generated embeddings to disk.""" - - def __init__( - self, - output_dir: str, - backbone: nn.Module | None = None, - dataloader_idx_map: Dict[int, str] | None = None, - metadata_keys: List[str] | None = None, - overwrite: bool = True, - save_every_n: int = 100, - ) -> None: - """Initializes a new EmbeddingsWriter instance. - - This callback writes the embedding files in a separate process to avoid blocking the - main process where the model forward pass is executed. - - Args: - output_dir: The directory where the embeddings will be saved. - backbone: A model to be used as feature extractor. If `None`, - it will be expected that the input batch returns the features directly. - dataloader_idx_map: A dictionary mapping dataloader indices to their respective - names (e.g. train, val, test). - metadata_keys: An optional list of keys to extract from the batch metadata and store - as additional columns in the manifest file. - overwrite: Whether to overwrite the output directory. Defaults to True. - save_every_n: Interval for number of iterations to save the embeddings to disk. - During this interval, the embeddings are accumulated in memory. - """ - super().__init__(write_interval="batch") - - self._output_dir = output_dir - self._backbone = backbone - self._dataloader_idx_map = dataloader_idx_map or {} - self._overwrite = overwrite - self._save_every_n = save_every_n - self._metadata_keys = metadata_keys or [] - - self._write_queue: multiprocessing.Queue - self._write_process: eva_multiprocessing.Process - - @override - def on_predict_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: - os.makedirs(self._output_dir, exist_ok=self._overwrite) - self._initialize_write_process() - self._write_process.start() - - if self._backbone is not None: - self._backbone = self._backbone.to(pl_module.device) - self._backbone.eval() - - @override - def write_on_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - prediction: Any, - batch_indices: Sequence[int], - batch: INPUT_BATCH, - batch_idx: int, - dataloader_idx: int, - ) -> None: - dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore - _, targets, metadata = INPUT_BATCH(*batch) - split = self._dataloader_idx_map.get(dataloader_idx) - - embeddings = self._get_embeddings(prediction) - for local_idx, global_idx in enumerate(batch_indices[: len(embeddings)]): - input_name = dataset.filename(global_idx) - save_name = os.path.splitext(input_name)[0] + ".pt" - embeddings_buffer, target_buffer = io.BytesIO(), io.BytesIO() - torch.save(embeddings[local_idx].clone(), embeddings_buffer) - torch.save(targets[local_idx], target_buffer) # type: ignore - item_metadata = self._get_item_metadata(metadata, local_idx) - item = QUEUE_ITEM( - embeddings_buffer, target_buffer, input_name, save_name, split, item_metadata - ) - self._write_queue.put(item) - - self._write_process.check_exceptions() - - @override - def on_predict_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: - self._write_queue.put(None) - self._write_process.join() - logger.info(f"Predictions and manifest saved to {self._output_dir}") - - def _initialize_write_process(self) -> None: - self._write_queue = multiprocessing.Queue() - self._write_process = eva_multiprocessing.Process( - target=_process_write_queue, - args=( - self._write_queue, - self._output_dir, - self._metadata_keys, - self._save_every_n, - self._overwrite, - ), - ) - - def _get_embeddings(self, prediction: torch.Tensor) -> torch.Tensor: - """Returns the embeddings from predictions.""" - if self._backbone is None: - return prediction - - with torch.no_grad(): - return self._backbone(prediction) - - def _get_item_metadata( - self, metadata: Dict[str, Any] | None, local_idx: int - ) -> Dict[str, Any] | None: - """Returns the metadata for the item at the given local index.""" - if not metadata: - if self._metadata_keys: - raise ValueError("Metadata keys are provided but the batch metadata is empty.") - else: - return None - - item_metadata = {} - for key in self._metadata_keys: - if key not in metadata: - raise KeyError(f"Metadata key '{key}' not found in the batch metadata.") - item_metadata[key] = metadata[key][local_idx] - - return item_metadata - - -def _process_write_queue( - write_queue: multiprocessing.Queue, - output_dir: str, - metadata_keys: List[str], - save_every_n: int, - overwrite: bool = False, -) -> None: - """This function receives and processes items added by the main process to the queue.""" - manifest_file, manifest_writer = _init_manifest(output_dir, metadata_keys, overwrite) - - name_to_items: Dict[str, ITEM_DICT_ENTRY] = {} - - counter = 0 - while True: - item = write_queue.get() - if item is None: - break - - item = QUEUE_ITEM(*item) - - if item.save_name in name_to_items: - name_to_items[item.save_name].items.append(item) - else: - name_to_items[item.save_name] = ITEM_DICT_ENTRY(items=[item], save_count=0) - - if counter > 0 and counter % save_every_n == 0: - name_to_items = _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) - - counter += 1 - - if len(name_to_items) > 0: - _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) - - manifest_file.close() - - -def _save_items( - name_to_items: Dict[str, ITEM_DICT_ENTRY], - metadata_keys: List[str], - output_dir: str, - manifest_writer: Any, -) -> Dict[str, ITEM_DICT_ENTRY]: - """Saves predictions to disk and updates the manifest file. - - If multiple items share the same filename, the predictions are concatenated and saved - to the same file. Furthermore, the manifest file will only contain one entry for each - filename, which is why this function checks if it's the first time saving to a file. - - Args: - name_to_items: A dictionary mapping save names to the corresponding queue items - holding the prediction tensors and the information for the manifest file. - metadata_keys: A list of keys to extract from the batch metadata. These will be - stored as additional columns in the manifest file. - output_dir: The directory where the embedding tensors & manifest will be saved. - manifest_writer: The CSV writer for the writing to the manifest file. - """ - for save_name, entry in name_to_items.items(): - if len(entry.items) > 0: - save_path = os.path.join(output_dir, save_name) - is_first_save = entry.save_count == 0 - if is_first_save: - _, target, input_name, _, split, metadata = QUEUE_ITEM(*entry.items[0]) - metadata = [metadata[key] for key in metadata_keys] # type: ignore - _update_manifest(target, input_name, save_name, split, metadata, manifest_writer) - prediction_buffers = [item.prediction_buffer for item in entry.items] - _save_predictions(prediction_buffers, save_path, is_first_save) - name_to_items[save_name].save_count += 1 - name_to_items[save_name].items = [] - - return name_to_items - - -def _save_predictions( - prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool -) -> None: - """Saves the embedding tensors as list to .pt files. - - If it's not the first save to this save_path, the new predictions are concatenated - with the existing ones and saved to the same file. - - Example Usecase: Save all patch embeddings corresponding to the same WSI to a single file. - """ - predictions = [ - torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") - for buffer in prediction_buffers - ] - - if not is_first_save: - previous_predictions = torch.load(save_path, map_location="cpu") - if not isinstance(previous_predictions, list): - raise ValueError("Previous predictions should be a list of tensors.") - predictions = predictions + previous_predictions - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - torch.save(predictions, save_path) - - -def _init_manifest( - output_dir: str, metadata_keys: List[str] | None, overwrite: bool = False -) -> Tuple[io.TextIOWrapper, Any]: - manifest_path = os.path.join(output_dir, "manifest.csv") - if os.path.exists(manifest_path) and not overwrite: - raise FileExistsError( - f"Manifest file already exists at {manifest_path}. This likely means that the " - "embeddings have been computed before. Consider using `eva fit` instead " - "of `eva predict_fit` or `eva predict`." - ) - manifest_file = open(manifest_path, "w", newline="") - manifest_writer = csv.writer(manifest_file) - manifest_writer.writerow(["origin", "embeddings", "target", "split"] + (metadata_keys or [])) - return manifest_file, manifest_writer - - -def _update_manifest( - target_buffer: io.BytesIO, - input_name: str, - save_name: str, - split: str | None, - metadata: List[str], - manifest_writer, -) -> None: - target = torch.load(io.BytesIO(target_buffer.getbuffer()), map_location="cpu") - manifest_writer.writerow([input_name, save_name, target.item(), split] + metadata) diff --git a/src/eva/core/callbacks/writers/embeddings/__init__.py b/src/eva/core/callbacks/writers/embeddings/__init__.py new file mode 100644 index 00000000..63cf7099 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/__init__.py @@ -0,0 +1,5 @@ +"""Embedding callback writers.""" + +from eva.core.callbacks.writers.embeddings.classification import ClassificationEmbeddingsWriter + +__all__ = ["ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/writers/embeddings/_manifest.py b/src/eva/core/callbacks/writers/embeddings/_manifest.py new file mode 100644 index 00000000..3b1a49f6 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/_manifest.py @@ -0,0 +1,68 @@ +"""Manifest file manager.""" + +import csv +import io +import os +from typing import Any, Dict, List + +import _csv + + +class ManifestManager: + """Class for writing the embedding manifest files.""" + + def __init__( + self, + output_dir: str, + metadata_keys: List[str] | None = None, + overwrite: bool = False, + ) -> None: + """Initializes the writing manager. + + Args: + output_dir: The directory where the embeddings will be saved. + metadata_keys: An optional list of keys to extract from the batch + metadata and store as additional columns in the manifest file. + overwrite: Whether to overwrite the output directory. + """ + self._output_dir = output_dir + self._metadata_keys = metadata_keys or [] + self._overwrite = overwrite + + self._manifest_file: io.TextIOWrapper + self._manifest_writer: _csv.Writer # type: ignore + + self._setup() + + def _setup(self) -> None: + """Initializes the manifest file and sets the file object and writer.""" + manifest_path = os.path.join(self._output_dir, "manifest.csv") + if os.path.exists(manifest_path) and not self._overwrite: + raise FileExistsError( + f"Manifest file already exists at {manifest_path}. This likely means that the " + "embeddings have been computed before. Consider using `eva fit` instead " + "of `eva predict_fit` or `eva predict`." + ) + + self._manifest_file = open(manifest_path, "w", newline="") + self._manifest_writer = csv.writer(self._manifest_file) + self._manifest_writer.writerow( + ["origin", "embeddings", "target", "split"] + self._metadata_keys + ) + + def update( + self, + input_name: str, + save_name: str, + target: str, + split: str | None, + metadata: Dict[str, Any] | None = None, + ) -> None: + """Adds a new entry to the manifest file.""" + metadata_entries = list(metadata.values()) if metadata else [] + self._manifest_writer.writerow([input_name, save_name, target, split] + metadata_entries) + + def close(self) -> None: + """Closes the manifest file.""" + if self._manifest_file: + self._manifest_file.close() diff --git a/src/eva/core/callbacks/writers/embeddings/base.py b/src/eva/core/callbacks/writers/embeddings/base.py new file mode 100644 index 00000000..92c12bcc --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/base.py @@ -0,0 +1,172 @@ +"""Embeddings writer base class.""" + +import abc +import io +import os +from typing import Any, Dict, List, Sequence + +import lightning.pytorch as pl +import torch +from lightning.pytorch import callbacks +from loguru import logger +from torch import multiprocessing, nn +from typing_extensions import override + +from eva.core.callbacks.writers.embeddings.typings import QUEUE_ITEM +from eva.core.models.modules.typings import INPUT_BATCH +from eva.core.utils import multiprocessing as eva_multiprocessing + + +class EmbeddingsWriter(callbacks.BasePredictionWriter, abc.ABC): + """Callback for writing generated embeddings to disk.""" + + def __init__( + self, + output_dir: str, + backbone: nn.Module | None = None, + dataloader_idx_map: Dict[int, str] | None = None, + metadata_keys: List[str] | None = None, + overwrite: bool = True, + save_every_n: int = 100, + ) -> None: + """Initializes a new EmbeddingsWriter instance. + + This callback writes the embedding files in a separate process to avoid blocking the + main process where the model forward pass is executed. + + Args: + output_dir: The directory where the embeddings will be saved. + backbone: A model to be used as feature extractor. If `None`, + it will be expected that the input batch returns the features directly. + dataloader_idx_map: A dictionary mapping dataloader indices to their respective + names (e.g. train, val, test). + metadata_keys: An optional list of keys to extract from the batch metadata and store + as additional columns in the manifest file. + overwrite: Whether to overwrite the output directory. + save_every_n: Interval for number of iterations to save the embeddings to disk. + During this interval, the embeddings are accumulated in memory. + """ + super().__init__(write_interval="batch") + + self._output_dir = output_dir + self._backbone = backbone + self._dataloader_idx_map = dataloader_idx_map or {} + self._overwrite = overwrite + self._save_every_n = save_every_n + self._metadata_keys = metadata_keys or [] + + self._write_queue: multiprocessing.Queue + self._write_process: eva_multiprocessing.Process + + @staticmethod + @abc.abstractmethod + def _process_write_queue( + write_queue: multiprocessing.Queue, + output_dir: str, + metadata_keys: List[str], + save_every_n: int, + overwrite: bool = False, + ) -> None: + """This function receives and processes items added by the main process to the queue. + + Queue items contain the embedding tensors, targets and metadata which need to be + saved to disk (.pt files and manifest). + """ + + @override + def on_predict_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: + os.makedirs(self._output_dir, exist_ok=self._overwrite) + self._initialize_write_process() + self._write_process.start() + + if self._backbone is not None: + self._backbone = self._backbone.to(pl_module.device) + self._backbone.eval() + + @override + def write_on_batch_end( + self, + trainer: pl.Trainer, + pl_module: pl.LightningModule, + prediction: Any, + batch_indices: Sequence[int], + batch: INPUT_BATCH, + batch_idx: int, + dataloader_idx: int, + ) -> None: + dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore + _, targets, metadata = INPUT_BATCH(*batch) + split = self._dataloader_idx_map.get(dataloader_idx) + if not isinstance(targets, torch.Tensor): + raise ValueError(f"Targets ({type(targets)}) should be `torch.Tensor`.") + + embeddings = self._get_embeddings(prediction) + + for local_idx, global_idx in enumerate(batch_indices[: len(embeddings)]): + data_name = dataset.filename(global_idx) + save_name = os.path.splitext(data_name)[0] + ".pt" + embeddings_buffer, target_buffer = _as_io_buffers( + embeddings[local_idx], targets[local_idx] + ) + item_metadata = self._get_item_metadata(metadata, local_idx) + item = QUEUE_ITEM( + prediction_buffer=embeddings_buffer, + target_buffer=target_buffer, + data_name=data_name, + save_name=save_name, + split=split, + metadata=item_metadata, + ) + self._write_queue.put(item) + + self._write_process.check_exceptions() + + @override + def on_predict_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: + self._write_queue.put(None) + self._write_process.join() + logger.info(f"Predictions and manifest saved to {self._output_dir}") + + def _initialize_write_process(self) -> None: + self._write_queue = multiprocessing.Queue() + self._write_process = eva_multiprocessing.Process( + target=self._process_write_queue, + args=( + self._write_queue, + self._output_dir, + self._metadata_keys, + self._save_every_n, + self._overwrite, + ), + ) + + @torch.no_grad() + def _get_embeddings(self, tensor: torch.Tensor) -> torch.Tensor: + """Returns the embeddings from predictions.""" + return self._backbone(tensor) if self._backbone else tensor + + def _get_item_metadata( + self, metadata: Dict[str, Any] | None, local_idx: int + ) -> Dict[str, Any] | None: + """Returns the metadata for the item at the given local index.""" + if not metadata: + if self._metadata_keys: + raise ValueError("Metadata keys are provided but the batch metadata is empty.") + else: + return None + + item_metadata = {} + for key in self._metadata_keys: + if key not in metadata: + raise KeyError(f"Metadata key '{key}' not found in the batch metadata.") + item_metadata[key] = metadata[key][local_idx] + + return item_metadata + + +def _as_io_buffers(*items: torch.Tensor) -> Sequence[io.BytesIO]: + """Loads torch tensors as io buffers.""" + buffers = [io.BytesIO() for _ in range(len(items))] + for tensor, buffer in zip(items, buffers, strict=False): + torch.save(tensor.clone(), buffer) + return buffers diff --git a/src/eva/core/callbacks/writers/embeddings/classification.py b/src/eva/core/callbacks/writers/embeddings/classification.py new file mode 100644 index 00000000..1a3b3cb7 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/classification.py @@ -0,0 +1,112 @@ +"""Embeddings writer for classification.""" + +import io +import os +from typing import Dict, List + +import torch +from torch import multiprocessing +from typing_extensions import override + +from eva.core.callbacks.writers.embeddings import base +from eva.core.callbacks.writers.embeddings._manifest import ManifestManager +from eva.core.callbacks.writers.embeddings.typings import ITEM_DICT_ENTRY, QUEUE_ITEM + + +class ClassificationEmbeddingsWriter(base.EmbeddingsWriter): + """Callback for writing generated embeddings to disk for classification tasks.""" + + @staticmethod + @override + def _process_write_queue( + write_queue: multiprocessing.Queue, + output_dir: str, + metadata_keys: List[str], + save_every_n: int, + overwrite: bool = False, + ) -> None: + """Processes the write queue and saves the predictions to disk. + + Note that in Multi Instance Learning (MIL) scenarios, we can have multiple + embeddings per input data point. In that case, this function will save all + embeddings that correspond to the same data point as a list of tensors to + the same .pt file. + """ + manifest_manager = ManifestManager(output_dir, metadata_keys, overwrite) + name_to_items: Dict[str, ITEM_DICT_ENTRY] = {} + + counter = 0 + while True: + item = write_queue.get() + if item is None: + break + item = QUEUE_ITEM(*item) + + if item.save_name in name_to_items: + name_to_items[item.save_name].items.append(item) + else: + name_to_items[item.save_name] = ITEM_DICT_ENTRY(items=[item], save_count=0) + + if counter > 0 and counter % save_every_n == 0: + name_to_items = _save_items(name_to_items, output_dir, manifest_manager) + counter += 1 + + if len(name_to_items) > 0: + _save_items(name_to_items, output_dir, manifest_manager) + + manifest_manager.close() + + +def _save_items( + name_to_items: Dict[str, ITEM_DICT_ENTRY], + output_dir: str, + manifest_manager: ManifestManager, +) -> Dict[str, ITEM_DICT_ENTRY]: + """Saves predictions to disk and updates the manifest file. + + Args: + name_to_items: A dictionary mapping save data point names to the corresponding queue items + holding the prediction tensors and the information for the manifest file. + output_dir: The directory where the embedding tensors & manifest will be saved. + manifest_manager: The manifest manager instance to update the manifest file. + """ + for save_name, entry in name_to_items.items(): + if len(entry.items) > 0: + save_path = os.path.join(output_dir, save_name) + is_first_save = entry.save_count == 0 + if is_first_save: + _, target, input_name, _, split, metadata = QUEUE_ITEM(*entry.items[0]) + target = torch.load(io.BytesIO(target.getbuffer()), map_location="cpu").item() + manifest_manager.update(input_name, save_name, target, split, metadata) + + prediction_buffers = [item.prediction_buffer for item in entry.items] + _save_predictions(prediction_buffers, save_path, is_first_save) + name_to_items[save_name].save_count += 1 + name_to_items[save_name].items = [] + + return name_to_items + + +def _save_predictions( + prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool +) -> None: + """Saves the embedding tensors as list to .pt files. + + If it's not the first save to this save_path, the new predictions are appended to + the existing ones and saved to the same file. + + Example use-case: Save all patch embeddings corresponding to the same WSI to a single file. + """ + predictions = [ + torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") + for buffer in prediction_buffers + ] + + if not is_first_save: + previous_predictions = torch.load(save_path, map_location="cpu") + if not isinstance(previous_predictions, list): + raise ValueError("Previous predictions should be a list of tensors.") + predictions = predictions + previous_predictions + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + torch.save(predictions, save_path) diff --git a/src/eva/core/callbacks/writers/typings.py b/src/eva/core/callbacks/writers/embeddings/typings.py similarity index 89% rename from src/eva/core/callbacks/writers/typings.py rename to src/eva/core/callbacks/writers/embeddings/typings.py index 9e061de8..377a57e4 100644 --- a/src/eva/core/callbacks/writers/typings.py +++ b/src/eva/core/callbacks/writers/embeddings/typings.py @@ -14,8 +14,8 @@ class QUEUE_ITEM(NamedTuple): target_buffer: io.BytesIO """IO buffer containing the target tensor.""" - input_name: str - """Name of the original input file that was used to generate the embedding.""" + data_name: str + """Name of the input data that was used to generate the embedding.""" save_name: str """Name to store the generated embedding.""" diff --git a/src/eva/core/data/datasets/__init__.py b/src/eva/core/data/datasets/__init__.py index b9826b5b..ba4da0cf 100644 --- a/src/eva/core/data/datasets/__init__.py +++ b/src/eva/core/data/datasets/__init__.py @@ -1,11 +1,11 @@ """Datasets API.""" from eva.core.data.datasets.base import Dataset -from eva.core.data.datasets.dataset import TorchDataset -from eva.core.data.datasets.embeddings import ( +from eva.core.data.datasets.classification import ( EmbeddingsClassificationDataset, MultiEmbeddingsClassificationDataset, ) +from eva.core.data.datasets.dataset import TorchDataset __all__ = [ "Dataset", diff --git a/src/eva/core/data/datasets/classification/__init__.py b/src/eva/core/data/datasets/classification/__init__.py new file mode 100644 index 00000000..442bbe19 --- /dev/null +++ b/src/eva/core/data/datasets/classification/__init__.py @@ -0,0 +1,8 @@ +"""Embedding cllassification datasets API.""" + +from eva.core.data.datasets.classification.embeddings import EmbeddingsClassificationDataset +from eva.core.data.datasets.classification.multi_embeddings import ( + MultiEmbeddingsClassificationDataset, +) + +__all__ = ["EmbeddingsClassificationDataset", "MultiEmbeddingsClassificationDataset"] diff --git a/src/eva/core/data/datasets/embeddings/classification/embeddings.py b/src/eva/core/data/datasets/classification/embeddings.py similarity index 87% rename from src/eva/core/data/datasets/embeddings/classification/embeddings.py rename to src/eva/core/data/datasets/classification/embeddings.py index 8904394d..7b4cce07 100644 --- a/src/eva/core/data/datasets/embeddings/classification/embeddings.py +++ b/src/eva/core/data/datasets/classification/embeddings.py @@ -3,14 +3,13 @@ import os from typing import Callable, Dict, Literal -import numpy as np import torch from typing_extensions import override -from eva.core.data.datasets.embeddings import base +from eva.core.data.datasets import embeddings as embeddings_base -class EmbeddingsClassificationDataset(base.EmbeddingsDataset): +class EmbeddingsClassificationDataset(embeddings_base.EmbeddingsDataset[torch.Tensor]): """Embeddings dataset class for classification tasks.""" def __init__( @@ -18,7 +17,7 @@ def __init__( root: str, manifest_file: str, split: Literal["train", "val", "test"] | None = None, - column_mapping: Dict[str, str] = base.default_column_mapping, + column_mapping: Dict[str, str] = embeddings_base.default_column_mapping, embeddings_transforms: Callable | None = None, target_transforms: Callable | None = None, ) -> None: @@ -63,9 +62,9 @@ def _load_embeddings(self, index: int) -> torch.Tensor: return tensor.squeeze(0) @override - def _load_target(self, index: int) -> np.ndarray: + def _load_target(self, index: int) -> torch.Tensor: target = self._data.at[index, self._column_mapping["target"]] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.int64) @override def __len__(self) -> int: diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/classification/multi_embeddings.py similarity index 94% rename from src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py rename to src/eva/core/data/datasets/classification/multi_embeddings.py index 4cb031da..130c17b7 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/classification/multi_embeddings.py @@ -7,10 +7,10 @@ import torch from typing_extensions import override -from eva.core.data.datasets.embeddings import base +from eva.core.data.datasets import embeddings as embeddings_base -class MultiEmbeddingsClassificationDataset(base.EmbeddingsDataset): +class MultiEmbeddingsClassificationDataset(embeddings_base.EmbeddingsDataset[torch.Tensor]): """Dataset class for where a sample corresponds to multiple embeddings. Example use case: Slide level dataset where each slide has multiple patch embeddings. @@ -21,7 +21,7 @@ def __init__( root: str, manifest_file: str, split: Literal["train", "val", "test"], - column_mapping: Dict[str, str] = base.default_column_mapping, + column_mapping: Dict[str, str] = embeddings_base.default_column_mapping, embeddings_transforms: Callable | None = None, target_transforms: Callable | None = None, ): diff --git a/src/eva/core/data/datasets/embeddings/base.py b/src/eva/core/data/datasets/embeddings.py similarity index 91% rename from src/eva/core/data/datasets/embeddings/base.py rename to src/eva/core/data/datasets/embeddings.py index 37b78138..81b22ad1 100644 --- a/src/eva/core/data/datasets/embeddings/base.py +++ b/src/eva/core/data/datasets/embeddings.py @@ -2,9 +2,8 @@ import abc import os -from typing import Callable, Dict, Literal, Tuple +from typing import Callable, Dict, Generic, Literal, Tuple, TypeVar -import numpy as np import pandas as pd import torch from typing_extensions import override @@ -12,6 +11,10 @@ from eva.core.data.datasets import base from eva.core.utils import io +TargetType = TypeVar("TargetType") +"""The target data type.""" + + default_column_mapping: Dict[str, str] = { "path": "embeddings", "target": "target", @@ -21,7 +24,7 @@ """The default column mapping of the variables to the manifest columns.""" -class EmbeddingsDataset(base.Dataset): +class EmbeddingsDataset(base.Dataset, Generic[TargetType]): """Abstract base class for embedding datasets.""" def __init__( @@ -62,32 +65,6 @@ def __init__( self._data: pd.DataFrame - @abc.abstractmethod - def _load_embeddings(self, index: int) -> torch.Tensor: - """Returns the `index`'th embedding sample. - - Args: - index: The index of the data sample to load. - - Returns: - The embedding sample as a tensor. - """ - - @abc.abstractmethod - def _load_target(self, index: int) -> np.ndarray: - """Returns the `index`'th target sample. - - Args: - index: The index of the data sample to load. - - Returns: - The sample target as an array. - """ - - @abc.abstractmethod - def __len__(self) -> int: - """Returns the total length of the data.""" - def filename(self, index: int) -> str: """Returns the filename of the `index`'th data sample. @@ -105,7 +82,11 @@ def filename(self, index: int) -> str: def setup(self): self._data = self._load_manifest() - def __getitem__(self, index) -> Tuple[torch.Tensor, np.ndarray]: + @abc.abstractmethod + def __len__(self) -> int: + """Returns the total length of the data.""" + + def __getitem__(self, index) -> Tuple[torch.Tensor, TargetType]: """Returns the `index`'th data sample. Args: @@ -118,6 +99,28 @@ def __getitem__(self, index) -> Tuple[torch.Tensor, np.ndarray]: target = self._load_target(index) return self._apply_transforms(embeddings, target) + @abc.abstractmethod + def _load_embeddings(self, index: int) -> torch.Tensor: + """Returns the `index`'th embedding sample. + + Args: + index: The index of the data sample to load. + + Returns: + The embedding sample as a tensor. + """ + + @abc.abstractmethod + def _load_target(self, index: int) -> TargetType: + """Returns the `index`'th target sample. + + Args: + index: The index of the data sample to load. + + Returns: + The sample target as an array. + """ + def _load_manifest(self) -> pd.DataFrame: """Loads manifest file and filters the data based on the split column. @@ -132,8 +135,8 @@ def _load_manifest(self) -> pd.DataFrame: return data def _apply_transforms( - self, embeddings: torch.Tensor, target: np.ndarray - ) -> Tuple[torch.Tensor, np.ndarray]: + self, embeddings: torch.Tensor, target: TargetType + ) -> Tuple[torch.Tensor, TargetType]: """Applies the transforms to the provided data and returns them. Args: diff --git a/src/eva/core/data/datasets/embeddings/__init__.py b/src/eva/core/data/datasets/embeddings/__init__.py deleted file mode 100644 index efd0eae4..00000000 --- a/src/eva/core/data/datasets/embeddings/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Datasets API.""" - -from eva.core.data.datasets.embeddings.base import EmbeddingsDataset -from eva.core.data.datasets.embeddings.classification import ( - EmbeddingsClassificationDataset, - MultiEmbeddingsClassificationDataset, -) - -__all__ = [ - "EmbeddingsDataset", - "EmbeddingsClassificationDataset", - "MultiEmbeddingsClassificationDataset", -] diff --git a/src/eva/core/data/datasets/embeddings/classification/__init__.py b/src/eva/core/data/datasets/embeddings/classification/__init__.py deleted file mode 100644 index 3f5d8ee7..00000000 --- a/src/eva/core/data/datasets/embeddings/classification/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Embedding cllassification datasets API.""" - -from eva.core.data.datasets.embeddings.classification.embeddings import ( - EmbeddingsClassificationDataset, -) -from eva.core.data.datasets.embeddings.classification.multi_embeddings import ( - MultiEmbeddingsClassificationDataset, -) - -__all__ = ["EmbeddingsClassificationDataset", "MultiEmbeddingsClassificationDataset"] diff --git a/src/eva/core/loggers/__init__.py b/src/eva/core/loggers/__init__.py index f5ccfaa2..61ecbc70 100644 --- a/src/eva/core/loggers/__init__.py +++ b/src/eva/core/loggers/__init__.py @@ -1,5 +1,7 @@ -"""Loggers API.""" +"""Experimental loggers API.""" from eva.core.loggers.dummy import DummyLogger +from eva.core.loggers.experimental_loggers import ExperimentalLoggers +from eva.core.loggers.log import log_parameters -__all__ = ["DummyLogger"] +__all__ = ["DummyLogger", "ExperimentalLoggers", "log_parameters"] diff --git a/src/eva/core/loggers/experimental_loggers.py b/src/eva/core/loggers/experimental_loggers.py new file mode 100644 index 00000000..53b24138 --- /dev/null +++ b/src/eva/core/loggers/experimental_loggers.py @@ -0,0 +1,8 @@ +"""Experiment loggers.""" + +from typing import Union + +from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger + +"""Supported loggers.""" +ExperimentalLoggers = Union[CSVLogger, TensorBoardLogger] diff --git a/src/eva/core/loggers/log/__init__.py b/src/eva/core/loggers/log/__init__.py new file mode 100644 index 00000000..a6b65238 --- /dev/null +++ b/src/eva/core/loggers/log/__init__.py @@ -0,0 +1,5 @@ +"""Experiment loggers actions.""" + +from eva.core.loggers.log.parameters import log_parameters + +__all__ = ["log_parameters"] diff --git a/src/eva/core/loggers/log/parameters.py b/src/eva/core/loggers/log/parameters.py new file mode 100644 index 00000000..53bd2c97 --- /dev/null +++ b/src/eva/core/loggers/log/parameters.py @@ -0,0 +1,64 @@ +"""Text log functionality.""" + +import functools +from typing import Any, Dict + +import yaml + +from eva.core.loggers import experimental_loggers as loggers_lib +from eva.core.loggers.log import utils + + +@functools.singledispatch +def log_parameters( + logger, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to the logger. + + Args: + logger: The desired logger. + tag: The log tag. + parameters: The parameters to log. + """ + utils.raise_not_supported(logger, "parameters") + + +@log_parameters.register +def _( + loggers: list, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to a list of supported loggers.""" + for logger in loggers: + log_parameters(logger, tag=tag, parameters=parameters) + + +@log_parameters.register +def _( + logger: loggers_lib.TensorBoardLogger, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to a TensorBoard logger.""" + as_markdown_text = _yaml_to_markdown(parameters) + logger.experiment.add_text( + tag=tag, + text_string=as_markdown_text, + global_step=0, + ) + + +def _yaml_to_markdown(data: Dict[str, Any]) -> str: + """Casts yaml data to markdown. + + Args: + data: The yaml data. + + Returns: + A string markdown friendly formatted. + """ + text = yaml.dump(data, sort_keys=False) + return f"```yaml\n{text}```" diff --git a/src/eva/core/loggers/log/utils.py b/src/eva/core/loggers/log/utils.py new file mode 100644 index 00000000..47ff3895 --- /dev/null +++ b/src/eva/core/loggers/log/utils.py @@ -0,0 +1,13 @@ +"""Logging related utilities.""" + +from loguru import logger as cli_logger + +from eva.core.loggers import ExperimentalLoggers + + +def raise_not_supported(logger: ExperimentalLoggers, data_type: str) -> None: + """Raises a warning for not supported tasks from the given logger.""" + print("\n") + cli_logger.debug( + f"Logger '{logger.__class__.__name__}' is not supported for " f"'{data_type}' data." + ) diff --git a/src/eva/core/models/modules/module.py b/src/eva/core/models/modules/module.py index cb5e222a..d1e2ab64 100644 --- a/src/eva/core/models/modules/module.py +++ b/src/eva/core/models/modules/module.py @@ -4,6 +4,7 @@ import lightning.pytorch as pl import torch +from lightning.pytorch.strategies.single_device import SingleDeviceStrategy from lightning.pytorch.utilities import memory from lightning.pytorch.utilities.types import STEP_OUTPUT from typing_extensions import override @@ -46,6 +47,21 @@ def default_postprocess(self) -> batch_postprocess.BatchPostProcess: """The default post-processes.""" return batch_postprocess.BatchPostProcess() + @property + def metrics_device(self) -> torch.device: + """Returns the device by which the metrics should be calculated. + + We allocate the metrics to CPU when operating on single device, as + it is much faster, but to GPU when employing multiple ones, as DDP + strategy requires the metrics to be allocated to the module's GPU. + """ + move_to_cpu = isinstance(self.trainer.strategy, SingleDeviceStrategy) + return torch.device("cpu") if move_to_cpu else self.device + + @override + def on_fit_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_train_batch_end( self, @@ -59,6 +75,10 @@ def on_train_batch_end( batch_outputs=outputs, ) + @override + def on_validation_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_validation_batch_end( self, @@ -78,6 +98,10 @@ def on_validation_batch_end( def on_validation_epoch_end(self) -> None: self._compute_and_log_metrics(self.metrics.validation_metrics) + @override + def on_test_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_test_batch_end( self, @@ -110,7 +134,7 @@ def _common_batch_end(self, outputs: STEP_OUTPUT) -> STEP_OUTPUT: The updated outputs. """ self._postprocess(outputs) - return memory.recursive_detach(outputs, to_cpu=self.device.type == "cpu") + return memory.recursive_detach(outputs, to_cpu=self.metrics_device.type == "cpu") def _forward_and_log_metrics( self, diff --git a/src/eva/core/models/modules/typings.py b/src/eva/core/models/modules/typings.py index fa476bd1..e9c56675 100644 --- a/src/eva/core/models/modules/typings.py +++ b/src/eva/core/models/modules/typings.py @@ -16,7 +16,7 @@ class INPUT_BATCH(NamedTuple): data: torch.Tensor """The data batch.""" - targets: torch.Tensor | Dict[str, Any] | None = None + targets: torch.Tensor | None = None """The target batch.""" metadata: Dict[str, Any] | None = None diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index 864d5a4a..7d05c16e 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -7,7 +7,6 @@ PANDA, Camelyon16, PatchCamelyon, - TotalSegmentatorClassification, WsiClassificationDataset, ) from eva.vision.data.datasets.segmentation import ImageSegmentation, TotalSegmentator2D @@ -22,7 +21,6 @@ "PatchCamelyon", "PANDA", "Camelyon16", - "TotalSegmentatorClassification", "TotalSegmentator2D", "VisionDataset", "WsiDataset", diff --git a/src/eva/vision/data/datasets/_utils.py b/src/eva/vision/data/datasets/_utils.py index 2d2fe30b..1a17d7e9 100644 --- a/src/eva/vision/data/datasets/_utils.py +++ b/src/eva/vision/data/datasets/_utils.py @@ -1,6 +1,6 @@ """Dataset related function and helper functions.""" -from typing import List, Tuple +from typing import List, Sequence, Tuple def indices_to_ranges(indices: List[int]) -> List[Tuple[int, int]]: @@ -33,11 +33,11 @@ def indices_to_ranges(indices: List[int]) -> List[Tuple[int, int]]: return ranges -def ranges_to_indices(ranges: List[Tuple[int, int]]) -> List[int]: +def ranges_to_indices(ranges: Sequence[Tuple[int, int]]) -> List[int]: """Unpacks a list of ranges to individual indices. Args: - ranges: The list of ranges to produce the indices from. + ranges: A sequence of ranges to produce the indices from. Return: A list of the indices. diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index 0b86ee5c..c9daabbe 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -6,7 +6,6 @@ from eva.vision.data.datasets.classification.mhist import MHIST from eva.vision.data.datasets.classification.panda import PANDA from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon -from eva.vision.data.datasets.classification.total_segmentator import TotalSegmentatorClassification from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset __all__ = [ @@ -14,7 +13,6 @@ "CRC", "MHIST", "PatchCamelyon", - "TotalSegmentatorClassification", "WsiClassificationDataset", "PANDA", "Camelyon16", diff --git a/src/eva/vision/data/datasets/classification/bach.py b/src/eva/vision/data/datasets/classification/bach.py index 935ab609..b8009701 100644 --- a/src/eva/vision/data/datasets/classification/bach.py +++ b/src/eva/vision/data/datasets/classification/bach.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import folder, utils from typing_extensions import override @@ -52,8 +53,7 @@ def __init__( root: str, split: Literal["train", "val"] | None = None, download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initialize the dataset. @@ -68,15 +68,10 @@ def __init__( Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not yet exist on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -130,14 +125,14 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_path, _ = self._samples[self._indices[index]] - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, target = self._samples[self._indices[index]] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.long) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/base.py b/src/eva/vision/data/datasets/classification/base.py index 56f95082..1127f6db 100644 --- a/src/eva/vision/data/datasets/classification/base.py +++ b/src/eva/vision/data/datasets/classification/base.py @@ -3,32 +3,29 @@ import abc from typing import Any, Callable, Dict, List, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import vision -class ImageClassification(vision.VisionDataset[Tuple[np.ndarray, np.ndarray]], abc.ABC): +class ImageClassification(vision.VisionDataset[Tuple[tv_tensors.Image, torch.Tensor]], abc.ABC): """Image classification abstract dataset.""" def __init__( self, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the image classification dataset. Args: - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ super().__init__() - self._image_transforms = image_transforms - self._target_transforms = target_transforms + self._transforms = transforms @property def classes(self) -> List[str] | None: @@ -49,7 +46,7 @@ def load_metadata(self, index: int) -> Dict[str, Any] | None: """ @abc.abstractmethod - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: """Returns the `index`'th image sample. Args: @@ -60,7 +57,7 @@ def load_image(self, index: int) -> np.ndarray: """ @abc.abstractmethod - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: """Returns the `index`'th target sample. Args: @@ -76,15 +73,15 @@ def __len__(self) -> int: raise NotImplementedError @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: image = self.load_image(index) target = self.load_target(index) image, target = self._apply_transforms(image, target) return image, target, self.load_metadata(index) or {} def _apply_transforms( - self, image: np.ndarray, target: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + self, image: tv_tensors.Image, target: torch.Tensor + ) -> Tuple[tv_tensors.Image, torch.Tensor]: """Applies the transforms to the provided data and returns them. Args: @@ -94,10 +91,6 @@ def _apply_transforms( Returns: A tuple with the image and the target transformed. """ - if self._image_transforms is not None: - image = self._image_transforms(image) - - if self._target_transforms is not None: - target = self._target_transforms(target) - + if self._transforms is not None: + image, target = self._transforms(image, target) return image, target diff --git a/src/eva/vision/data/datasets/classification/camelyon16.py b/src/eva/vision/data/datasets/classification/camelyon16.py index e0072906..10846440 100644 --- a/src/eva/vision/data/datasets/classification/camelyon16.py +++ b/src/eva/vision/data/datasets/classification/camelyon16.py @@ -5,9 +5,10 @@ import os from typing import Any, Callable, Dict, List, Literal, Tuple -import numpy as np import pandas as pd import torch +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.vision.data.datasets import _validators, wsi @@ -193,18 +194,19 @@ def filename(self, index: int) -> str: return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override - def load_image(self, index: int) -> torch.Tensor: - return wsi.MultiWsiDataset.__getitem__(self, index) + def load_image(self, index: int) -> tv_tensors.Image: + image_array = wsi.MultiWsiDataset.__getitem__(self, index) + return functional.to_image(image_array) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: file_path = self._file_paths[self._get_dataset_idx(index)] class_name = self.annotations[self._get_id_from_path(file_path)] - return np.asarray(self.class_to_idx[class_name], dtype=np.int64) + return torch.tensor(self.class_to_idx[class_name], dtype=torch.int64) @override def load_metadata(self, index: int) -> Dict[str, Any]: diff --git a/src/eva/vision/data/datasets/classification/crc.py b/src/eva/vision/data/datasets/classification/crc.py index 5c661d45..618aa208 100644 --- a/src/eva/vision/data/datasets/classification/crc.py +++ b/src/eva/vision/data/datasets/classification/crc.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import folder, utils from typing_extensions import override @@ -37,8 +38,7 @@ def __init__( root: str, split: Literal["train", "val"], download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the dataset. @@ -56,15 +56,10 @@ def __init__( Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not yet exist on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -122,14 +117,14 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_path, _ = self._samples[index] - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, target = self._samples[index] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.long) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/mhist.py b/src/eva/vision/data/datasets/classification/mhist.py index 75297183..7453e75c 100644 --- a/src/eva/vision/data/datasets/classification/mhist.py +++ b/src/eva/vision/data/datasets/classification/mhist.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import _validators @@ -18,23 +19,17 @@ def __init__( self, root: str, split: Literal["train", "test"], - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initialize the dataset. Args: root: Path to the root directory of the dataset. split: Dataset split to use. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -74,16 +69,16 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_filename, _ = self._samples[index] image_path = os.path.join(self._dataset_path, image_filename) - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, label = self._samples[index] target = self.class_to_idx[label] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.float32) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/panda.py b/src/eva/vision/data/datasets/classification/panda.py index 000099e0..b8d2f49c 100644 --- a/src/eva/vision/data/datasets/classification/panda.py +++ b/src/eva/vision/data/datasets/classification/panda.py @@ -5,10 +5,11 @@ import os from typing import Any, Callable, Dict, List, Literal, Tuple -import numpy as np import pandas as pd import torch +from torchvision import tv_tensors from torchvision.datasets import utils +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.core.data import splitting @@ -120,17 +121,18 @@ def filename(self, index: int) -> str: return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override - def load_image(self, index: int) -> torch.Tensor: - return wsi.MultiWsiDataset.__getitem__(self, index) + def load_image(self, index: int) -> tv_tensors.Image: + image_array = wsi.MultiWsiDataset.__getitem__(self, index) + return functional.to_image(image_array) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: file_path = self._file_paths[self._get_dataset_idx(index)] - return np.asarray(self._get_target_from_path(file_path)) + return torch.tensor(self._get_target_from_path(file_path), dtype=torch.int64) @override def load_metadata(self, index: int) -> Dict[str, Any]: diff --git a/src/eva/vision/data/datasets/classification/patch_camelyon.py b/src/eva/vision/data/datasets/classification/patch_camelyon.py index e9eaa5f5..5891bc41 100644 --- a/src/eva/vision/data/datasets/classification/patch_camelyon.py +++ b/src/eva/vision/data/datasets/classification/patch_camelyon.py @@ -4,8 +4,10 @@ from typing import Callable, Dict, List, Literal import h5py -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import utils +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.vision.data.datasets import _validators, structs @@ -70,8 +72,7 @@ def __init__( root: str, split: Literal["train", "val", "test"], download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the dataset. @@ -82,15 +83,10 @@ def __init__( download: Whether to download the data for the specified split. Note that the download will be executed only by additionally calling the :meth:`prepare_data` method. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -131,13 +127,13 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: return self._load_from_h5("x", index) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: target = self._load_from_h5("y", index).squeeze() - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.float32) @override def __len__(self) -> int: @@ -162,7 +158,7 @@ def _load_from_h5( self, data_key: Literal["x", "y"], index: int | None = None, - ) -> np.ndarray: + ) -> tv_tensors.Image: """Load data or targets from an HDF5 file. Args: @@ -176,7 +172,8 @@ def _load_from_h5( h5_file = self._h5_file(data_key) with h5py.File(h5_file, "r") as file: data = file[data_key] - return data[:] if index is None else data[index] # type: ignore + image_array = data[:] if index is None else data[index] # type: ignore + return functional.to_image(image_array) # type: ignore def _fetch_dataset_length(self) -> int: """Fetches the dataset split length from its HDF5 file.""" diff --git a/src/eva/vision/data/datasets/classification/total_segmentator.py b/src/eva/vision/data/datasets/classification/total_segmentator.py deleted file mode 100644 index c7c0c88d..00000000 --- a/src/eva/vision/data/datasets/classification/total_segmentator.py +++ /dev/null @@ -1,213 +0,0 @@ -"""TotalSegmentator 2D segmentation dataset class.""" - -import functools -import os -from glob import glob -from typing import Callable, Dict, List, Literal, Tuple - -import numpy as np -from torchvision.datasets import utils -from typing_extensions import override - -from eva.vision.data.datasets import _utils, _validators, structs -from eva.vision.data.datasets.classification import base -from eva.vision.utils import io - - -class TotalSegmentatorClassification(base.ImageClassification): - """TotalSegmentator multi-label classification dataset.""" - - _train_index_ranges: List[Tuple[int, int]] = [(0, 83)] - """Train range indices.""" - - _val_index_ranges: List[Tuple[int, int]] = [(83, 103)] - """Validation range indices.""" - - _n_slices_per_image: int = 20 - """The amount of slices to sample per 3D CT scan image.""" - - _resources_full: List[structs.DownloadResource] = [ - structs.DownloadResource( - filename="Totalsegmentator_dataset_v201.zip", - url="https://zenodo.org/records/10047292/files/Totalsegmentator_dataset_v201.zip", - md5="fe250e5718e0a3b5df4c4ea9d58a62fe", - ), - ] - """Resources for the full dataset version.""" - - _resources_small: List[structs.DownloadResource] = [ - structs.DownloadResource( - filename="Totalsegmentator_dataset_small_v201.zip", - url="https://zenodo.org/records/10047263/files/Totalsegmentator_dataset_small_v201.zip", - md5="6b5524af4b15e6ba06ef2d700c0c73e0", - ), - ] - """Resources for the small dataset version.""" - - def __init__( - self, - root: str, - split: Literal["train", "val"] | None, - version: Literal["small", "full"] = "small", - download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, - ) -> None: - """Initialize dataset. - - Args: - root: Path to the root directory of the dataset. The dataset will - be downloaded and extracted here, if it does not already exist. - split: Dataset split to use. If None, the entire dataset is used. - version: The version of the dataset to initialize. - download: Whether to download the data for the specified split. - Note that the download will be executed only by additionally - calling the :meth:`prepare_data` method and if the data does not - exist yet on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. - """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) - - self._root = root - self._split = split - self._version = version - self._download = download - - self._samples_dirs: List[str] = [] - self._indices: List[int] = [] - - @functools.cached_property - @override - def classes(self) -> List[str]: - def get_filename(path: str) -> str: - """Returns the filename from the full path.""" - return os.path.basename(path).split(".")[0] - - first_sample_labels = os.path.join( - self._root, self._samples_dirs[0], "segmentations", "*.nii.gz" - ) - return sorted(map(get_filename, glob(first_sample_labels))) - - @property - @override - def class_to_idx(self) -> Dict[str, int]: - return {label: index for index, label in enumerate(self.classes)} - - @override - def filename(self, index: int) -> str: - sample_dir = self._samples_dirs[self._indices[index]] - return os.path.join(sample_dir, "ct.nii.gz") - - @override - def prepare_data(self) -> None: - if self._download: - self._download_dataset() - _validators.check_dataset_exists(self._root, True) - - @override - def configure(self) -> None: - self._samples_dirs = self._fetch_samples_dirs() - self._indices = self._create_indices() - - @override - def validate(self) -> None: - _validators.check_dataset_integrity( - self, - length=1660 if self._split == "train" else 400, - n_classes=117, - first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"), - ) - - @override - def __len__(self) -> int: - return len(self._indices) * self._n_slices_per_image - - @override - def load_image(self, index: int) -> np.ndarray: - image_path = self._get_image_path(index) - slice_index = self._get_sample_slice_index(index) - image_array = io.read_nifti_slice(image_path, slice_index) - return image_array.repeat(3, axis=2) - - @override - def load_target(self, index: int) -> np.ndarray: - masks = self._load_masks(index) - targets = [1 in masks[..., mask_index] for mask_index in range(masks.shape[-1])] - return np.asarray(targets, dtype=np.int64) - - def _load_masks(self, index: int) -> np.ndarray: - """Returns the `index`'th target mask sample.""" - masks_dir = self._get_masks_dir(index) - slice_index = self._get_sample_slice_index(index) - mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes) - masks = [io.read_nifti_slice(path, slice_index) for path in mask_paths] - return np.concatenate(masks, axis=-1) - - def _get_masks_dir(self, index: int) -> str: - """Returns the directory of the corresponding masks.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "segmentations") - - def _get_image_path(self, index: int) -> str: - """Returns the corresponding image path.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "ct.nii.gz") - - def _get_sample_dir(self, index: int) -> str: - """Returns the corresponding sample directory.""" - sample_index = self._indices[index // self._n_slices_per_image] - return self._samples_dirs[sample_index] - - def _get_sample_slice_index(self, index: int) -> int: - """Returns the corresponding slice index.""" - image_path = self._get_image_path(index) - total_slices = io.fetch_total_nifti_slices(image_path) - slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int) - return slice_indices[index % self._n_slices_per_image] - - def _fetch_samples_dirs(self) -> List[str]: - """Returns the name of all the samples of all the splits of the dataset.""" - sample_filenames = [ - filename - for filename in os.listdir(self._root) - if os.path.isdir(os.path.join(self._root, filename)) - ] - return sorted(sample_filenames) - - def _create_indices(self) -> List[int]: - """Builds the dataset indices for the specified split.""" - split_index_ranges = { - "train": self._train_index_ranges, - "val": self._val_index_ranges, - None: [(0, 103)], - } - index_ranges = split_index_ranges.get(self._split) - if index_ranges is None: - raise ValueError("Invalid data split. Use 'train', 'val' or `None`.") - - return _utils.ranges_to_indices(index_ranges) - - def _download_dataset(self) -> None: - """Downloads the dataset.""" - dataset_resources = { - "small": self._resources_small, - "full": self._resources_full, - None: (0, 103), - } - resources = dataset_resources.get(self._version) - if resources is None: - raise ValueError("Invalid data version. Use 'small' or 'full'.") - - for resource in resources: - utils.download_and_extract_archive( - resource.url, - download_root=self._root, - filename=resource.filename, - remove_finished=True, - ) diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py index d34cde8b..3889be1e 100644 --- a/src/eva/vision/data/datasets/classification/wsi.py +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -5,6 +5,8 @@ import numpy as np import pandas as pd +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import wsi @@ -72,7 +74,7 @@ def filename(self, index: int) -> str: return os.path.basename(path) if os.path.isabs(path) else path @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override diff --git a/src/eva/vision/data/datasets/segmentation/total_segmentator.py b/src/eva/vision/data/datasets/segmentation/total_segmentator.py index 4892e6b6..92bb8992 100644 --- a/src/eva/vision/data/datasets/segmentation/total_segmentator.py +++ b/src/eva/vision/data/datasets/segmentation/total_segmentator.py @@ -18,14 +18,14 @@ class TotalSegmentator2D(base.ImageSegmentation): """TotalSegmentator 2D segmentation dataset.""" - _train_index_ranges: List[Tuple[int, int]] = [(0, 83)] - """Train range indices.""" + _expected_dataset_lengths: Dict[str, int] = { + "train_small": 29892, + "val_small": 6480, + } + """Dataset version and split to the expected size.""" - _val_index_ranges: List[Tuple[int, int]] = [(83, 103)] - """Validation range indices.""" - - _n_slices_per_image: int = 20 - """The amount of slices to sample per 3D CT scan image.""" + _sample_every_n_slices: int | None = None + """The amount of slices to sub-sample per 3D CT scan image.""" _resources_full: List[structs.DownloadResource] = [ structs.DownloadResource( @@ -49,7 +49,7 @@ def __init__( self, root: str, split: Literal["train", "val"] | None, - version: Literal["small", "full"] = "small", + version: Literal["small", "full"] | None = "small", download: bool = False, as_uint8: bool = True, transforms: Callable | None = None, @@ -60,7 +60,8 @@ def __init__( root: Path to the root directory of the dataset. The dataset will be downloaded and extracted here, if it does not already exist. split: Dataset split to use. If `None`, the entire dataset is used. - version: The version of the dataset to initialize. + version: The version of the dataset to initialize. If `None`, it will + use the files located at root as is and wont perform any checks. download: Whether to download the data for the specified split. Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not @@ -78,7 +79,7 @@ def __init__( self._as_uint8 = as_uint8 self._samples_dirs: List[str] = [] - self._indices: List[int] = [] + self._indices: List[Tuple[int, int]] = [] @functools.cached_property @override @@ -99,7 +100,8 @@ def class_to_idx(self) -> Dict[str, int]: @override def filename(self, index: int) -> str: - sample_dir = self._samples_dirs[self._indices[index]] + sample_idx, _ = self._indices[index] + sample_dir = self._samples_dirs[sample_idx] return os.path.join(sample_dir, "ct.nii.gz") @override @@ -114,21 +116,24 @@ def configure(self) -> None: @override def validate(self) -> None: + if self._version is None: + return + _validators.check_dataset_integrity( self, - length=1660 if self._split == "train" else 400, + length=self._expected_dataset_lengths.get(f"{self._split}_{self._version}", 0), n_classes=117, first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"), ) @override def __len__(self) -> int: - return len(self._indices) * self._n_slices_per_image + return len(self._indices) @override def load_image(self, index: int) -> tv_tensors.Image: - image_path = self._get_image_path(index) - slice_index = self._get_sample_slice_index(index) + sample_index, slice_index = self._indices[index] + image_path = self._get_image_path(sample_index) image_array = io.read_nifti_slice(image_path, slice_index) if self._as_uint8: image_array = convert.to_8bit(image_array) @@ -137,8 +142,8 @@ def load_image(self, index: int) -> tv_tensors.Image: @override def load_mask(self, index: int) -> tv_tensors.Mask: - masks_dir = self._get_masks_dir(index) - slice_index = self._get_sample_slice_index(index) + sample_index, slice_index = self._indices[index] + masks_dir = self._get_masks_dir(sample_index) mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes) one_hot_encoded = np.concatenate( [io.read_nifti_slice(path, slice_index) for path in mask_paths], @@ -149,27 +154,20 @@ def load_mask(self, index: int) -> tv_tensors.Mask: segmentation_label = np.argmax(one_hot_encoded_with_bg, axis=2) return tv_tensors.Mask(segmentation_label) - def _get_masks_dir(self, index: int) -> str: - """Returns the directory of the corresponding masks.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "segmentations") - - def _get_image_path(self, index: int) -> str: + def _get_image_path(self, sample_index: int) -> str: """Returns the corresponding image path.""" - sample_dir = self._get_sample_dir(index) + sample_dir = self._samples_dirs[sample_index] return os.path.join(self._root, sample_dir, "ct.nii.gz") - def _get_sample_dir(self, index: int) -> str: - """Returns the corresponding sample directory.""" - sample_index = self._indices[index // self._n_slices_per_image] - return self._samples_dirs[sample_index] + def _get_masks_dir(self, sample_index: int) -> str: + """Returns the directory of the corresponding masks.""" + sample_dir = self._samples_dirs[sample_index] + return os.path.join(self._root, sample_dir, "segmentations") - def _get_sample_slice_index(self, index: int) -> int: - """Returns the corresponding slice index.""" - image_path = self._get_image_path(index) - total_slices = io.fetch_total_nifti_slices(image_path) - slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int) - return slice_indices[index % self._n_slices_per_image] + def _get_number_of_slices_per_sample(self, sample_index: int) -> int: + """Returns the total amount of slices of a sample.""" + image_path = self._get_image_path(sample_index) + return io.fetch_total_nifti_slices(image_path) def _fetch_samples_dirs(self) -> List[str]: """Returns the name of all the samples of all the splits of the dataset.""" @@ -180,29 +178,46 @@ def _fetch_samples_dirs(self) -> List[str]: ] return sorted(sample_filenames) - def _create_indices(self) -> List[int]: - """Builds the dataset indices for the specified split.""" - split_index_ranges = { - "train": self._train_index_ranges, - "val": self._val_index_ranges, - None: [(0, 103)], - } - index_ranges = split_index_ranges.get(self._split) - if index_ranges is None: - raise ValueError("Invalid data split. Use 'train', 'val' or `None`.") + def _get_split_indices(self) -> List[int]: + """Returns the samples indices that corresponding the dataset split and version.""" + key = f"{self._split}_{self._version}" + match key: + case "train_small": + index_ranges = [(0, 83)] + case "val_small": + index_ranges = [(83, 102)] + case _: + index_ranges = [(0, len(self._samples_dirs))] return _utils.ranges_to_indices(index_ranges) + def _create_indices(self) -> List[Tuple[int, int]]: + """Builds the dataset indices for the specified split. + + Returns: + A list of tuples, where the first value indicates the + sample index which the second its corresponding slice + index. + """ + indices = [ + (sample_idx, slide_idx) + for sample_idx in self._get_split_indices() + for slide_idx in range(self._get_number_of_slices_per_sample(sample_idx)) + if slide_idx % (self._sample_every_n_slices or 1) == 0 + ] + return indices + def _download_dataset(self) -> None: """Downloads the dataset.""" dataset_resources = { "small": self._resources_small, "full": self._resources_full, - None: (0, 103), } - resources = dataset_resources.get(self._version) + resources = dataset_resources.get(self._version or "") if resources is None: - raise ValueError("Invalid data version. Use 'small' or 'full'.") + raise ValueError( + f"Can't download data version '{self._version}'. Use 'small' or 'full'." + ) for resource in resources: if os.path.isdir(self._root): diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index 07bd5542..3557bfc5 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -4,9 +4,10 @@ import os from typing import Callable, List -import numpy as np from loguru import logger from torch.utils.data import dataset as torch_datasets +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.core.data.datasets import base @@ -71,14 +72,15 @@ def _coords(self) -> wsi.PatchCoordinates: ) @override - def __getitem__(self, index: int) -> np.ndarray: + def __getitem__(self, index: int) -> tv_tensors.Image: x, y = self._coords.x_y[index] width, height, level_idx = self._coords.width, self._coords.height, self._coords.level_idx patch = self._wsi.read_region((x, y), level_idx, (width, height)) + patch = functional.to_image(patch) patch = self._apply_transforms(patch) return patch - def _apply_transforms(self, image: np.ndarray) -> np.ndarray: + def _apply_transforms(self, image: tv_tensors.Image) -> tv_tensors.Image: if self._image_transforms is not None: image = self._image_transforms(image) return image diff --git a/src/eva/vision/data/transforms/common/resize_and_crop.py b/src/eva/vision/data/transforms/common/resize_and_crop.py index f1956a66..46b5aa67 100644 --- a/src/eva/vision/data/transforms/common/resize_and_crop.py +++ b/src/eva/vision/data/transforms/common/resize_and_crop.py @@ -3,10 +3,10 @@ from typing import Callable, Sequence import torch -import torchvision.transforms.v2 as torch_transforms +from torchvision.transforms import v2 -class ResizeAndCrop(torch_transforms.Compose): +class ResizeAndCrop(v2.Compose): """Resizes, crops and normalizes an input image while preserving its aspect ratio.""" def __init__( @@ -32,11 +32,10 @@ def __init__( def _build_transforms(self) -> Sequence[Callable]: """Builds and returns the list of transforms.""" transforms = [ - torch_transforms.ToImage(), - torch_transforms.Resize(size=self._size), - torch_transforms.CenterCrop(size=self._size), - torch_transforms.ToDtype(torch.float32, scale=True), - torch_transforms.Normalize( + v2.Resize(size=self._size), + v2.CenterCrop(size=self._size), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize( mean=self._mean, std=self._std, ), diff --git a/src/eva/vision/utils/io/__init__.py b/src/eva/vision/utils/io/__init__.py index 85d669b1..8fe1177b 100644 --- a/src/eva/vision/utils/io/__init__.py +++ b/src/eva/vision/utils/io/__init__.py @@ -1,11 +1,12 @@ """Vision I/O utilities.""" -from eva.vision.utils.io.image import read_image +from eva.vision.utils.io.image import read_image, read_image_as_tensor from eva.vision.utils.io.nifti import fetch_total_nifti_slices, read_nifti_slice from eva.vision.utils.io.text import read_csv __all__ = [ "read_image", + "read_image_as_tensor", "fetch_total_nifti_slices", "read_nifti_slice", "read_csv", diff --git a/src/eva/vision/utils/io/image.py b/src/eva/vision/utils/io/image.py index b20137d1..13f62187 100644 --- a/src/eva/vision/utils/io/image.py +++ b/src/eva/vision/utils/io/image.py @@ -3,6 +3,8 @@ import cv2 import numpy as np import numpy.typing as npt +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from eva.vision.utils.io import _utils @@ -14,7 +16,7 @@ def read_image(path: str) -> npt.NDArray[np.uint8]: path: The path of the image file. Returns: - The RGB image as a numpy array. + The RGB image as a numpy array (HxWxC). Raises: FileExistsError: If the path does not exist or it is unreachable. @@ -23,6 +25,23 @@ def read_image(path: str) -> npt.NDArray[np.uint8]: return read_image_as_array(path, cv2.IMREAD_COLOR) +def read_image_as_tensor(path: str) -> tv_tensors.Image: + """Reads and loads the image from a file path as a RGB torch tensor. + + Args: + path: The path of the image file. + + Returns: + The RGB image as a torch tensor (CxHxW). + + Raises: + FileExistsError: If the path does not exist or it is unreachable. + IOError: If the image could not be loaded. + """ + image_array = read_image(path) + return functional.to_image(image_array) + + def read_image_as_array(path: str, flags: int = cv2.IMREAD_UNCHANGED) -> npt.NDArray[np.uint8]: """Reads and loads an image file as a numpy array. @@ -51,4 +70,4 @@ def read_image_as_array(path: str, flags: int = cv2.IMREAD_UNCHANGED) -> npt.NDA if image.ndim == 2 and flags == cv2.IMREAD_COLOR: image = image[:, :, np.newaxis] - return np.asarray(image).astype(np.uint8) + return np.asarray(image, dtype=np.uint8) diff --git a/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..05b28f407391fec935fe6d1f2ac66f82753a0bbf GIT binary patch literal 79 zcmb2|=3oE;mjB&}DGFQ$#vBZa#>~nhi?WiE9x!m5+%PtHa9Dbd;-?k6*!zyR9M76y ie}Qph5)V)1t+p4l+~nhi?WiE9x!m5+%PtHa9Dbd;-?k6*!zyR9M76y ie}Qph5)V)1t+p4l+~nhi?WiE9x!m5+%PtHa9Dbd;-?k6*!zyR9M76y ie}Qph5)V)1t+p4l+ None: ) def test_sample(bach_dataset: datasets.BACH, index: int) -> None: """Tests the format of a dataset sample.""" - sample = bach_dataset[index] # assert data sample is a tuple + sample = bach_dataset[index] assert isinstance(sample, tuple) assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 16, 16) + assert isinstance(target, torch.Tensor) assert target in [0, 1, 2, 3] diff --git a/tests/eva/vision/data/datasets/classification/test_camelyon16.py b/tests/eva/vision/data/datasets/classification/test_camelyon16.py index 26125254..e198dc87 100644 --- a/tests/eva/vision/data/datasets/classification/test_camelyon16.py +++ b/tests/eva/vision/data/datasets/classification/test_camelyon16.py @@ -3,10 +3,10 @@ import os from typing import Any, Literal -import numpy as np import pytest import torch import torchvision.transforms.v2 as torch_transforms +from torchvision import tv_tensors from eva.vision.data import datasets from eva.vision.data import transforms as eva_transforms @@ -62,12 +62,10 @@ def _check_batch_shape(batch: Any): assert len(batch) == 3 image, target, metadata = batch - assert isinstance(image, torch.Tensor) + assert isinstance(image, tv_tensors.Image) assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) - assert isinstance(target, np.ndarray) - assert target.size == 1 - + assert isinstance(target, torch.Tensor) assert isinstance(metadata, dict) assert "wsi_id" in metadata diff --git a/tests/eva/vision/data/datasets/classification/test_crc.py b/tests/eva/vision/data/datasets/classification/test_crc.py index 1fb276bd..c3f5ba09 100644 --- a/tests/eva/vision/data/datasets/classification/test_crc.py +++ b/tests/eva/vision/data/datasets/classification/test_crc.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -26,9 +27,9 @@ def test_sample(crc_dataset: datasets.CRC, index: int) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 16, 16) + assert isinstance(target, torch.Tensor) assert target in [0, 1, 2, 3, 4, 5, 6, 7, 8] diff --git a/tests/eva/vision/data/datasets/classification/test_mhist.py b/tests/eva/vision/data/datasets/classification/test_mhist.py index f9e70105..5249e52e 100644 --- a/tests/eva/vision/data/datasets/classification/test_mhist.py +++ b/tests/eva/vision/data/datasets/classification/test_mhist.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -35,9 +36,9 @@ def test_sample(mhist_dataset: datasets.MHIST, index: int) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (224, 224, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 224, 224) + assert isinstance(target, torch.Tensor) assert target in [0, 1] diff --git a/tests/eva/vision/data/datasets/classification/test_panda.py b/tests/eva/vision/data/datasets/classification/test_panda.py index 8f523aeb..6b901344 100644 --- a/tests/eva/vision/data/datasets/classification/test_panda.py +++ b/tests/eva/vision/data/datasets/classification/test_panda.py @@ -8,6 +8,7 @@ import pytest import torch import torchvision.transforms.v2 as torch_transforms +from torchvision import tv_tensors from eva.vision.data import datasets from eva.vision.data import transforms as eva_transforms @@ -85,12 +86,10 @@ def _check_batch_shape(batch: Any): assert len(batch) == 3 image, target, metadata = batch - assert isinstance(image, torch.Tensor) + assert isinstance(image, tv_tensors.Image) assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) - assert isinstance(target, np.ndarray) - assert target.size == 1 - + assert isinstance(target, torch.Tensor) assert isinstance(metadata, dict) assert "wsi_id" in metadata diff --git a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py index 9f9270f3..30ecb73a 100644 --- a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py +++ b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -30,9 +31,9 @@ def test_sample(patch_camelyon_dataset: datasets.PatchCamelyon) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (96, 96, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 96, 96) + assert isinstance(target, torch.Tensor) assert target in [0, 1] diff --git a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py b/tests/eva/vision/data/datasets/classification/test_total_segmentator.py deleted file mode 100644 index 1c694f7b..00000000 --- a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py +++ /dev/null @@ -1,63 +0,0 @@ -"""TotalSegmentator dataset tests.""" - -import os -from typing import Literal - -import numpy as np -import pytest - -from eva.vision.data import datasets - - -@pytest.mark.parametrize( - "split, expected_length", - [("train", 1660), ("val", 400), (None, 2060)], -) -def test_length( - total_segmentator_dataset: datasets.TotalSegmentatorClassification, expected_length: int -) -> None: - """Tests the length of the dataset.""" - assert len(total_segmentator_dataset) == expected_length - - -@pytest.mark.parametrize( - "split", - [ - None, - "train", - ], -) -def test_sample(total_segmentator_dataset: datasets.TotalSegmentatorClassification) -> None: - """Tests the format of a dataset sample.""" - sample = total_segmentator_dataset[0] - # assert data sample is a tuple - assert isinstance(sample, tuple) - assert len(sample) == 3 - # assert the format of the `image` and `target` - image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) - assert all(target == [0, 0, 0]) - - -@pytest.fixture(scope="function") -def total_segmentator_dataset( - split: Literal["train", "val"], - assets_path: str, -) -> datasets.TotalSegmentatorClassification: - """TotalSegmentator dataset fixture.""" - dataset = datasets.TotalSegmentatorClassification( - root=os.path.join( - assets_path, - "vision", - "datasets", - "total_segmentator", - "Totalsegmentator_dataset_v201", - ), - split=split, - download=False, - ) - dataset.prepare_data() - dataset.configure() - return dataset diff --git a/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py b/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py index 3e7f09e6..9607a2a8 100644 --- a/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py +++ b/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py @@ -11,7 +11,7 @@ @pytest.mark.parametrize( "split, expected_length", - [("train", 1660), ("val", 400), (None, 2060)], + [("train", 9), ("val", 9), (None, 9)], ) def test_length( total_segmentator_dataset: datasets.TotalSegmentator2D, expected_length: int @@ -25,6 +25,7 @@ def test_length( [ (None, 0), ("train", 0), + ("val", 0), ], ) def test_sample(total_segmentator_dataset: datasets.TotalSegmentator2D, index: int) -> None: @@ -43,7 +44,7 @@ def test_sample(total_segmentator_dataset: datasets.TotalSegmentator2D, index: i @pytest.fixture(scope="function") def total_segmentator_dataset( - split: Literal["train", "val"], assets_path: str + split: Literal["train", "val"] | None, assets_path: str ) -> datasets.TotalSegmentator2D: """TotalSegmentator2D dataset fixture.""" dataset = datasets.TotalSegmentator2D( @@ -55,6 +56,7 @@ def total_segmentator_dataset( "Totalsegmentator_dataset_v201", ), split=split, + version=None, ) dataset.prepare_data() dataset.configure() diff --git a/tests/eva/vision/data/datasets/test_wsi.py b/tests/eva/vision/data/datasets/test_wsi.py index de816475..87959a60 100644 --- a/tests/eva/vision/data/datasets/test_wsi.py +++ b/tests/eva/vision/data/datasets/test_wsi.py @@ -66,7 +66,7 @@ def test_patch_shape(width: int, height: int, target_mpp: float, root: str, back dataset._wsi.mpp * dataset._wsi.level_downsamples[dataset._coords.level_idx] ) scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) - assert dataset[0].shape == (scaled_width, scaled_height, 3) + assert dataset[0].shape == (3, scaled_width, scaled_height) def test_multi_dataset(root: str): diff --git a/tests/eva/vision/data/transforms/common/test_resize_and_crop.py b/tests/eva/vision/data/transforms/common/test_resize_and_crop.py index 4f3bd2ba..4399b057 100644 --- a/tests/eva/vision/data/transforms/common/test_resize_and_crop.py +++ b/tests/eva/vision/data/transforms/common/test_resize_and_crop.py @@ -2,11 +2,10 @@ from typing import Tuple -import numpy as np -import numpy.typing as npt import pytest import torch from torch import testing +from torchvision import tv_tensors from eva.vision.data.transforms import common @@ -14,23 +13,23 @@ @pytest.mark.parametrize( "image_size, target_size, expected_size, expected_mean", [ - ((512, 224, 3), [112, 224], (3, 112, 224), -0.00392), - ((224, 512, 3), [112, 224], (3, 112, 224), -0.00392), - ((512, 224, 3), [112, 97], (3, 112, 97), -0.00392), - ((512, 512, 3), 224, (3, 224, 224), -0.00392), - ((512, 224, 3), 224, (3, 224, 224), -0.00392), - ((224, 224, 3), 224, (3, 224, 224), -0.00392), - ((97, 97, 3), 224, (3, 224, 224), -0.00392), + ((3, 512, 224), [112, 224], (3, 112, 224), -0.00392), + ((3, 224, 512), [112, 224], (3, 112, 224), -0.00392), + ((3, 512, 224), [112, 97], (3, 112, 97), -0.00392), + ((3, 512, 512), 224, (3, 224, 224), -0.00392), + ((3, 512, 224), 224, (3, 224, 224), -0.00392), + ((3, 224, 224), 224, (3, 224, 224), -0.00392), + ((3, 97, 97), 224, (3, 224, 224), -0.00392), ], ) def test_resize_and_crop( - image_array: npt.NDArray, + image_tensor: tv_tensors.Image, resize_and_crop: common.ResizeAndCrop, expected_size: Tuple[int, int, int], expected_mean: float, ) -> None: """Tests the ResizeAndCrop transform.""" - output = resize_and_crop(image_array) + output = resize_and_crop(image_tensor) assert output.shape == expected_size testing.assert_close(output.mean(), torch.tensor(expected_mean)) @@ -42,6 +41,7 @@ def resize_and_crop(target_size: Tuple[int, int, int]) -> common.ResizeAndCrop: @pytest.fixture(scope="function") -def image_array(image_size: Tuple[int, int, int]) -> npt.NDArray: - """Image array fixture.""" - return 127 * np.ones(image_size, np.uint8) +def image_tensor(image_size: Tuple[int, int, int]) -> tv_tensors.Image: + """Image tensor fixture.""" + image_tensor = 127 * torch.ones(image_size, dtype=torch.uint8) + return tv_tensors.Image(image_tensor) From ecfd9663023191ea953187c3e489b65221338558 Mon Sep 17 00:00:00 2001 From: roman807 Date: Wed, 12 Jun 2024 11:51:27 +0200 Subject: [PATCH 26/29] Updated documentation with new datasets and leaderboard (#531) * updated layout * updated layout * addressed comment --- docs/datasets/camelyon16.md | 9 +-- docs/datasets/index.md | 14 ++-- docs/datasets/panda.md | 4 +- docs/index.md | 68 +++--------------- docs/leaderboards.md | 69 +++++++++++++++++++ .../advanced/replicate_evaluations.md | 2 +- mkdocs.yml | 1 + 7 files changed, 94 insertions(+), 73 deletions(-) create mode 100644 docs/leaderboards.md diff --git a/docs/datasets/camelyon16.md b/docs/datasets/camelyon16.md index 96584406..56bb69ff 100644 --- a/docs/datasets/camelyon16.md +++ b/docs/datasets/camelyon16.md @@ -2,7 +2,7 @@ The Camelyon16 dataset consists of 400 WSIs of lymph nodes for breast cancer metastasis classification. The dataset is a combination of two independent datasets, collected from two separate medical centers in the Netherlands (Radboud University Medical Center and University Medical Center Utrecht). The dataset contains the slides from which [PatchCamelyon](patch_camelyon.md)-patches were extracted. -The dataset is divided in a train set (270 slides) and test set (130 slides), both containing images from both centers. +The dataset is divided in a train set (270 slides) and test set (130 slides), both containing images from both centers. Note that one test set slide was a duplicate has been removed (see [here](https://github.com/DIDSR/dldp?tab=readme-ov-file#04-data-description-important)). The task was part of [Grand Challenge](https://grand-challenge.org/) in 2016 and has later been replaced by Camelyon17. @@ -14,14 +14,14 @@ Source: https://camelyon16.grand-challenge.org | | | |---------------------------|----------------------------------------------------------| -| **Modality** | Vision (Slide-level) | +| **Modality** | Vision (WSI) | | **Task** | Binary classification | | **Cancer type** | Breast | | **Data size** | ~700 GB | | **Image dimension** | ~100-250k x ~100-250k x 3 | | **Magnification (μm/px)** | 40x (0.25) - Level 0 | | **Files format** | `.tif` | -| **Number of images** | 400 (270 train, 130 test) | +| **Number of images** | 399 (270 train, 129 test) | ### Organization @@ -55,13 +55,14 @@ The dataset is split into train / test. Additionally, we split the train set int | Splits | Train | Validation | Test | |----------|-------------|-------------|------------| -| #Samples | 216 (54%) | 54 (13.5%) | 130 (32.5%) | +| #Samples | 216 (54.1%) | 54 (13.5%) | 129 (32.3%)| ## Relevant links * [Grand Challenge dataset description](https://camelyon16.grand-challenge.org/Data/) * [Download links](https://camelyon17.grand-challenge.org/Data/) +* [GitHub with dataset description by DIDSR](https://github.com/DIDSR/dldp) ## References diff --git a/docs/datasets/index.md b/docs/datasets/index.md index a2a46920..cc947e80 100644 --- a/docs/datasets/index.md +++ b/docs/datasets/index.md @@ -6,13 +6,6 @@ ### Whole Slide (WSI) and microscopy image datasets -#### Slide-level -| Dataset | #Slides | Slide Size | Magnification (μm/px) | Task | Cancer Type | -|------------------------------------|----------|---------------------------|------------------------|----------------------------|------------------| -| [Camelyon16](camelyon16.md) | 400 | ~100-250k x ~100-250k x 3 | 40x (0.25) | Classification (2 classes) | Breast | -| [PANDA](panda.md) | 10,616 | ~20k x 20k x 3 | 20x (0.5) | Classification (6 classes) | Prostate | - - #### Patch-level | Dataset | #Patches | Patch Size | Magnification (μm/px) | Task | Cancer Type | |------------------------------------|----------|------------|------------------------|----------------------------|------------------| @@ -23,6 +16,13 @@ \* Downsampled from 40x (0.25 μm/px) to increase the field of view. +#### Slide-level +| Dataset | #Slides | Slide Size | Magnification (μm/px) | Task | Cancer Type | +|------------------------------------|----------|---------------------------|------------------------|----------------------------|------------------| +| [Camelyon16](camelyon16.md) | 400 | ~100-250k x ~100-250k x 3 | 40x (0.25) | Classification (2 classes) | Breast | +| [PANDA](panda.md) | 10,616 | ~20k x 20k x 3 | 20x (0.5) | Classification (6 classes) | Prostate | + + ### Radiology datasets | Dataset | #Images | Image Size | Task | Download provided diff --git a/docs/datasets/panda.md b/docs/datasets/panda.md index 16e7da8d..cf29488e 100644 --- a/docs/datasets/panda.md +++ b/docs/datasets/panda.md @@ -1,6 +1,6 @@ # PANDA (Prostate cANcer graDe Assessment) -The PANDA datasets consists of 10616 whole-slide images of digitized H&E-stained prostate tissue biopsies originating from two medical centers. After the biopsy, the slides were classified into Gleason patterns (3, 4 or 5) based on the architectural growth patterns of the tumor, which are then converted into an ISUP grade on a 0-5 scale. +The PANDA datasets consists of 10,616 whole-slide images of digitized H&E-stained prostate tissue biopsies originating from two medical centers. After the biopsy, the slides were classified into Gleason patterns (3, 4 or 5) based on the architectural growth patterns of the tumor, which are then converted into an ISUP grade on a 0-5 scale. The Gleason grading system is the most important prognostic marker for prostate cancer and the ISUP grade has a crucial role when deciding how a patient should be treated. However, the system suffers from significant inter-observer variability between pathologists, leading to imperfect and noisy labels. @@ -20,7 +20,7 @@ Source: https://www.kaggle.com/competitions/prostate-cancer-grade-assessment | **Image dimension** | ~20k x 20k x 3 | | **Magnification (μm/px)** | 20x (0.5) - Level 0 | | **Files format** | `.tiff` | -| **Number of images** | 10616 (9555 after removing noisy labels) | +| **Number of images** | 10,616 (9,555 after removing noisy labels) | ### Organization diff --git a/docs/index.md b/docs/index.md index cee477de..3a0448ec 100644 --- a/docs/index.md +++ b/docs/index.md @@ -31,17 +31,17 @@ hide: _Oncology FM Evaluation Framework by [kaiko.ai](https://www.kaiko.ai/)_ -With the first release, *eva* supports performance evaluation for vision Foundation Models ("FMs") and supervised machine learning models on WSI-patch-level image classification task. Support for radiology (CT-scans) segmentation tasks will be added soon. +*eva* currently supports performance evaluation for vision Foundation Models ("FMs") and supervised machine learning models on WSI (patch- and slide-level) as well as radiology image classification tasks. With *eva* we provide the open-source community with an easy-to-use framework that follows industry best practices to deliver a robust, reproducible and fair evaluation benchmark across FMs of different sizes and architectures. -Support for additional modalities and tasks will be added in future releases. +Support for additional modalities and tasks will be added soon. ## Use cases ### 1. Evaluate your own FMs on public benchmark datasets -With a specified FM as input, you can run *eva* on several publicly available datasets & tasks. One evaluation run will download and preprocess the relevant data, compute embeddings, fit and evaluate a downstream head and report the mean and standard deviation of the relevant performance metrics. +With a specified FM as input, you can run *eva* on several publicly available datasets & tasks. One evaluation run will download (if supported) and preprocess the relevant data, compute embeddings, fit and evaluate a downstream head and report the mean and standard deviation of the relevant performance metrics. Supported datasets & tasks include: @@ -52,6 +52,11 @@ Supported datasets & tasks include: - **[CRC](datasets/crc.md)**: multiclass colorectal cancer classification - **[MHIST](datasets/mhist.md)**: binary colorectal polyp cancer classification +*WSI slide-level pathology datasets* + +- **[Camelyon16](datasets/camelyon16.md)**: binary breast cancer classification +- **[PANDA](datasets/panda.md)**: multiclass prostate cancer classification + *Radiology datasets* - **[TotalSegmentator](datasets/total_segmentator.md)**: radiology/CT-scan for segmentation of anatomical structures (*support coming soon*) @@ -65,62 +70,7 @@ If you have your own labeled dataset, all that is needed is to implement a datas ## Evaluation results -We evaluated the following FMs on the 4 supported WSI-patch-level image classification tasks. On the table below we report *Balanced Accuracy* for binary & multiclass tasks and show the average performance & standard deviation over 5 runs. - - -
- -| FM-backbone | pretraining | BACH | CRC | MHIST | PCam/val | PCam/test | -|-----------------------------|-------------|------------------ |----------------- |----------------- |----------------- |-------------- | -| DINO ViT-S16 | N/A | 0.410 (±0.009) | 0.617 (±0.008) | 0.501 (±0.004) | 0.753 (±0.002) | 0.728 (±0.003) | -| DINO ViT-S16 | ImageNet | 0.695 (±0.004) | 0.935 (±0.003) | 0.831 (±0.002) | 0.864 (±0.007) | 0.849 (±0.007) | -| DINO ViT-B8 | ImageNet | 0.710 (±0.007) | 0.939 (±0.001) | 0.814 (±0.003) | 0.870 (±0.003) | 0.856 (±0.004) | -| DINOv2 ViT-L14 | ImageNet | 0.707 (±0.008) | 0.916 (±0.002) | 0.832 (±0.003) | 0.873 (±0.001) | 0.888 (±0.001) | -| Lunit - ViT-S16 | TCGA | 0.801 (±0.005) | 0.934 (±0.001) | 0.768 (±0.004) | 0.889 (±0.002) | 0.895 (±0.006) | -| Owkin - iBOT ViT-B16 | TCGA | 0.725 (±0.004) | 0.935 (±0.001) | 0.777 (±0.005) | 0.912 (±0.002) | 0.915 (±0.003) | -| UNI - DINOv2 ViT-L16 | Mass-100k | 0.814 (±0.008) | 0.950 (±0.001) | **0.837 (±0.001)** | **0.936 (±0.001)** | **0.938 (±0.001)**| -| kaiko.ai - DINO ViT-S16 | TCGA | 0.797 (±0.003) | 0.943 (±0.001) | 0.828 (±0.003) | 0.903 (±0.001) | 0.893 (±0.005) | -| kaiko.ai - DINO ViT-S8 | TCGA | 0.834 (±0.012) | 0.946 (±0.002) | 0.832 (±0.006) | 0.897 (±0.001) | 0.887 (±0.002) | -| kaiko.ai - DINO ViT-B16 | TCGA | 0.810 (±0.008) | **0.960 (±0.001)** | 0.826 (±0.003) | 0.900 (±0.002) | 0.898 (±0.003) | -| kaiko.ai - DINO ViT-B8 | TCGA | 0.865 (±0.019) | 0.956 (±0.001) | 0.809 (±0.021) | 0.913 (±0.001) | 0.921 (±0.002) | -| kaiko.ai - DINOv2 ViT-L14 | TCGA | **0.870 (±0.005)**| 0.930 (±0.001) | 0.809 (±0.001) | 0.908 (±0.001) | 0.898 (±0.002) | - -
- -The runs use the default setup described in the section below. - -*eva* trains the decoder on the "train" split and uses the "validation" split for monitoring, early stopping and checkpoint selection. Evaluation results are reported on the "validation" split and, if available, on the "test" split. - -For more details on the FM-backbones and instructions to replicate the results, check out [Replicate evaluations](user-guide/advanced/replicate_evaluations.md). - -## Evaluation setup - -*Note that the current version of eva implements the task- & model-independent and fixed default set up following the standard evaluation protocol proposed by [1] and described in the table below. We selected this approach to prioritize reliable, robust and fair FM-evaluation while being in line with common literature. Additionally, with future versions we are planning to allow the use of cross-validation and hyper-parameter tuning to find the optimal setup to achieve best possible performance on the implemented downstream tasks.* - -With a provided FM, *eva* computes embeddings for all input images (WSI patches) which are then used to train a downstream head consisting of a single linear layer in a supervised setup for each of the benchmark datasets. We use early stopping with a patience of 5% of the maximal number of epochs. - -| | | -|-------------------------|---------------------------| -| **Backbone** | frozen | -| **Hidden layers** | none | -| **Dropout** | 0.0 | -| **Activation function** | none | -| **Number of steps** | 12,500 | -| **Base Batch size** | 4,096 | -| **Batch size** | dataset specific* | -| **Base learning rate** | 0.01 | -| **Learning Rate** | [Base learning rate] * [Batch size] / [Base batch size] | -| **Max epochs** | [Number of samples] * [Number of steps] / [Batch size] | -| **Early stopping** | 5% * [Max epochs] | -| **Optimizer** | SGD | -| **Momentum** | 0.9 | -| **Weight Decay** | 0.0 | -| **Nesterov momentum** | true | -| **LR Schedule** | Cosine without warmup | - -\* For smaller datasets (e.g. BACH with 400 samples) we reduce the batch size to 256 and scale the learning rate accordingly. - -- [1]: [Virchow: A Million-Slide Digital Pathology Foundation Model, 2024](https://arxiv.org/pdf/2309.07778.pdf) +Check out our [Leaderboards](leaderboards.md) to inspect evaluation results of publicly available FMs. ## License diff --git a/docs/leaderboards.md b/docs/leaderboards.md new file mode 100644 index 00000000..69e7c836 --- /dev/null +++ b/docs/leaderboards.md @@ -0,0 +1,69 @@ +--- +hide: + - navigation +--- + +# Leaderboards + +We evaluated the following FMs on the 6 supported WSI-classification tasks. We report *Balanced Accuracy* for binary & multiclass tasks. The score shows the average performance over 5 runs. + +
+ +
+ +| Vision FM | pretraining | BACH | CRC | MHIST | PCam |Camelyon16| PANDA | +|-----------------------------|-------------|--------- |-----------|-----------|----------|----------|----------| +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | N/A | 0.410 | 0.617 | 0.501 | 0.728 | TBD | TBD | +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | ImageNet | 0.695 | 0.935 | 0.831 | 0.849 | TBD | TBD | +| [Lunit - ViT-S16](https://github.com/lunit-io/benchmark-ssl-pathology/releases/) | TCGA | 0.801 | 0.934 | 0.768 | 0.895 | TBD | TBD | +| [Owkin (Phikon) - iBOT ViT-B16](https://huggingface.co/owkin/phikon) | TCGA | 0.725 | 0.935 | 0.777 | 0.915 | TBD | TBD | +| [UNI - DINOv2 ViT-L16](https://huggingface.co/MahmoodLab/UNI) | Mass-100k | 0.814 | 0.950 | **0.837** | **0.938**| TBD | TBD | +| [kaiko.ai - DINO ViT-S16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.797 | 0.943 | 0.828 | 0.893 | TBD | TBD | +| [kaiko.ai - DINO ViT-S8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.834 | 0.946 | 0.832 | 0.887 | TBD | TBD | +| [kaiko.ai - DINO ViT-B16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.810 | **0.960** | 0.826 | 0.898 | TBD | TBD | +| [kaiko.ai - DINO ViT-B8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.865 | 0.956 | 0.809 | 0.921 | TBD | TBD | +| [kaiko.ai - DINOv2 ViT-L14](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | **0.870**| 0.930 | 0.809 | 0.898 | TBD | TBD | + +
+ +The runs use the default setup described in the section below. + +*eva* trains the decoder on the "train" split and uses the "validation" split for monitoring, early stopping and checkpoint selection. Evaluation results are reported on the "test" split if available and otherwise on the "validation" split. + +For details on the FM-backbones and instructions to replicate the results, check out [Replicate evaluations](user-guide/advanced/replicate_evaluations.md). + +## Evaluation protocol + +*eva* uses a task- & model-independent and fixed default set up which closely follows the standard evaluation protocol proposed by [1] (with adjustments for slide-level tasks to ensure convergence and computational efficiency). + +We selected this approach to prioritize reliable, robust and fair FM-evaluation while being in line with common literature. + +| | WSI patch-level tasks | WSI slide-level tasks | +|--------------------------------|---------------------------|---------------------------| +| **Backbone** | frozen | frozen | +| **Head** | single layer MLP | ABMIL | +| **Dropout** | 0.0 | 0.0 | +| **Hidden activation function** | n/a | ReLU | +| **Output activation function** | none | none | +| **Number of steps** | 12,500 | 12,500 (2) | +| **Base batch size** | 4,096 (1) | 32 | +| **Base learning rate** | 0.01 (1) | 0.001 | +| **Early stopping** | 5% * [Max epochs] | 10% * [Max epochs] (3) | +| **Optimizer** | SGD | AdamW | +| **Momentum** | 0.9 | n/a | +| **Weight Decay** | 0.0 | n/a | +| **betas** | n/a | [0.9, 0.999] | +| **LR Schedule** | Cosine without warmup | Cosine without warmup | +| **number of patches per slide**| 1 | dataset specific (4) | + + +(1) For smaller datasets (e.g. BACH with 400 samples) we reduce the batch size to 256 and scale the learning rate accordingly. + +(2) Upper cap at a maximum of 100 epochs. + +(3) Lower cap at a minimum of 8 epochs. + +(4) Number of patches per slide depends on task and slide size. For PANDA and Camelyon16 we use a max of 1,000 and 10,000 random patches per slide respectively. + + +- [1]: [Virchow: A Million-Slide Digital Pathology Foundation Model, 2024](https://arxiv.org/pdf/2309.07778.pdf) diff --git a/docs/user-guide/advanced/replicate_evaluations.md b/docs/user-guide/advanced/replicate_evaluations.md index d3770586..407cc0b4 100644 --- a/docs/user-guide/advanced/replicate_evaluations.md +++ b/docs/user-guide/advanced/replicate_evaluations.md @@ -4,7 +4,7 @@ To produce the evaluation results presented [here](../../index.md#evaluation-res Make sure to replace `` in the commands below with `bach`, `crc`, `mhist` or `patch_camelyon`. -Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` (either modify the config-files or set the environment variable `DOWNLOAD=true`). In the case of MHIST you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* +*Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` (either modify the config-files or set the environment variable `DOWNLOAD=true`). In the case of MHIST you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* ## DINO ViT-S16 (random weights) diff --git a/mkdocs.yml b/mkdocs.yml index 4acc4511..e19b1bef 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -65,6 +65,7 @@ markdown_extensions: - pymdownx.superfences nav: - Introduction: index.md + - Leaderboards: leaderboards.md - User Guide: - user-guide/index.md - Getting started: From 4e714cb38665baee39dc47e87af7a87d0da81a61 Mon Sep 17 00:00:00 2001 From: roman807 Date: Mon, 17 Jun 2024 16:00:12 +0200 Subject: [PATCH 27/29] 532 update leaderboard results with slide level tasks (#538) * updated docs * update leaderboard * update docs and links --- README.md | 30 +++++----- docs/leaderboards.md | 24 ++++---- .../advanced/replicate_evaluations.md | 58 ++++++------------- mkdocs.yml | 5 +- 4 files changed, 47 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index 4cbbdf5f..5f86678b 100644 --- a/README.md +++ b/README.md @@ -104,29 +104,27 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate In this section you will find model benchmarks which were generated with _`eva`_. -### Table I: WSI patch-level benchmark +### Table I: WSI classification tasks
-| Model | BACH | CRC | MHIST | PCam/val | PCam/test | -|--------------------------------------------------|-------|-------|-------|----------|-----------| -| ViT-S/16 _(random)_ [1] | 0.410 | 0.617 | 0.501 | 0.753 | 0.728 | -| ViT-S/16 _(ImageNet)_ [1] | 0.695 | 0.935 | 0.831 | 0.864 | 0.849 | -| ViT-B/8 _(ImageNet)_ [1] | 0.710 | 0.939 | 0.814 | 0.870 | 0.856 | -| ViT-L/14 _(ImageNet)_ [1] | 0.707 | 0.916 | 0.832 | 0.873 | 0.888 | -| DINO(p=16) [2] | 0.801 | 0.934 | 0.768 | 0.889 | 0.895 | -| Phikon [3] | 0.725 | 0.935 | 0.777 | 0.912 | 0.915 | -| UNI [4] | 0.814 | 0.950 | 0.837 | 0.936 | 0.938 | -| ViT-S/16 _(kaiko.ai)_ [5] | 0.797 | 0.943 | 0.828 | 0.903 | 0.893 | -| ViT-S/8 _(kaiko.ai)_ [5] | 0.834 | 0.946 | 0.832 | 0.897 | 0.887 | -| ViT-B/16 _(kaiko.ai)_ [5] | 0.810 | 0.960 | 0.826 | 0.900 | 0.898 | -| ViT-B/8 _(kaiko.ai)_ [5] | 0.865 | 0.956 | 0.809 | 0.913 | 0.921 | -| ViT-L/14 _(kaiko.ai)_ [5] | 0.870 | 0.930 | 0.809 | 0.908 | 0.898 | +| Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | +|--------------------------------------------------|-------|-------|-------|--------|------------|-------| +| ViT-S/16 _(random)_ [1] | 0.410 | 0.617 | 0.501 | 0.728 | 0.532 | 0.350 | +| ViT-S/16 _(ImageNet)_ [1] | 0.695 | 0.935 | 0.831 | 0.849 | 0.759 | 0.678 | +| DINO(p=16) [2] | 0.801 | 0.934 | 0.768 | 0.895 | 0.890 | 0.753 | +| Phikon [3] | 0.725 | 0.935 | 0.777 | 0.915 | 0.916 | 0.771 | +| UNI [4] | 0.814 | 0.950 | 0.837 | 0.938 | 0.942 | 0.775 | +| ViT-S/16 _(kaiko.ai)_ [5] | 0.797 | 0.943 | 0.828 | 0.893 | 0.915 | 0.770 | +| ViT-S/8 _(kaiko.ai)_ [5] | 0.834 | 0.946 | 0.832 | 0.887 | 0.903 | 0.744 | +| ViT-B/16 _(kaiko.ai)_ [5] | 0.810 | 0.960 | 0.826 | 0.898 | 0.889 | 0.753 | +| ViT-B/8 _(kaiko.ai)_ [5] | 0.865 | 0.956 | 0.809 | 0.921 | 0.922 | 0.759 | +| ViT-L/14 _(kaiko.ai)_ [5] | 0.870 | 0.930 | 0.809 | 0.898 | 0.931 | 0.774 | _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.
We report averaged balanced accuracy -over 5 runs, with an average standard deviation of ±0.003._ +over 5 runs. Results are reported on the "test" split if available and otherwise on the "validation" split.
diff --git a/docs/leaderboards.md b/docs/leaderboards.md index 69e7c836..bfa6f9bf 100644 --- a/docs/leaderboards.md +++ b/docs/leaderboards.md @@ -11,18 +11,18 @@ We evaluated the following FMs on the 6 supported WSI-classification tasks. We r
-| Vision FM | pretraining | BACH | CRC | MHIST | PCam |Camelyon16| PANDA | +| Vision FM | pretraining | [BACH](datasets/bach.md) | [CRC](datasets/crc.md) | [MHIST](datasets/mhist.md) | [PCam](datasets/patch_camelyon.md) |[Camelyon16](datasets/camelyon16.md)| [PANDA](datasets/panda.md) | |-----------------------------|-------------|--------- |-----------|-----------|----------|----------|----------| -| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | N/A | 0.410 | 0.617 | 0.501 | 0.728 | TBD | TBD | -| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | ImageNet | 0.695 | 0.935 | 0.831 | 0.849 | TBD | TBD | -| [Lunit - ViT-S16](https://github.com/lunit-io/benchmark-ssl-pathology/releases/) | TCGA | 0.801 | 0.934 | 0.768 | 0.895 | TBD | TBD | -| [Owkin (Phikon) - iBOT ViT-B16](https://huggingface.co/owkin/phikon) | TCGA | 0.725 | 0.935 | 0.777 | 0.915 | TBD | TBD | -| [UNI - DINOv2 ViT-L16](https://huggingface.co/MahmoodLab/UNI) | Mass-100k | 0.814 | 0.950 | **0.837** | **0.938**| TBD | TBD | -| [kaiko.ai - DINO ViT-S16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.797 | 0.943 | 0.828 | 0.893 | TBD | TBD | -| [kaiko.ai - DINO ViT-S8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.834 | 0.946 | 0.832 | 0.887 | TBD | TBD | -| [kaiko.ai - DINO ViT-B16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.810 | **0.960** | 0.826 | 0.898 | TBD | TBD | -| [kaiko.ai - DINO ViT-B8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.865 | 0.956 | 0.809 | 0.921 | TBD | TBD | -| [kaiko.ai - DINOv2 ViT-L14](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | **0.870**| 0.930 | 0.809 | 0.898 | TBD | TBD | +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | N/A | 0.410 | 0.617 | 0.501 | 0.728 | 0.532 | 0.350 | +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | ImageNet | 0.695 | 0.935 | 0.831 | 0.849 | 0.759 | 0.678 | +| [Lunit - ViT-S16](https://github.com/lunit-io/benchmark-ssl-pathology/releases/) | TCGA | 0.801 | 0.934 | 0.768 | 0.895 | 0.890 | 0.753 | +| [Owkin (Phikon) - iBOT ViT-B16](https://huggingface.co/owkin/phikon) | TCGA | 0.725 | 0.935 | 0.777 | 0.915 | 0.916 | 0.771 | +| [UNI - DINOv2 ViT-L16](https://huggingface.co/MahmoodLab/UNI) | Mass-100k | 0.814 | 0.950 | **0.837** | **0.938**| **0.942**| **0.775**| +| [kaiko.ai - DINO ViT-S16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.797 | 0.943 | 0.828 | 0.893 | 0.915 | 0.770 | +| [kaiko.ai - DINO ViT-S8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.834 | 0.946 | 0.832 | 0.887 | 0.903 | 0.744 | +| [kaiko.ai - DINO ViT-B16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.810 | **0.960** | 0.826 | 0.898 | 0.889 | 0.753 | +| [kaiko.ai - DINO ViT-B8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.865 | 0.956 | 0.809 | 0.921 | 0.922 | 0.759 | +| [kaiko.ai - DINOv2 ViT-L14](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | **0.870**| 0.930 | 0.809 | 0.898 | 0.931 | 0.774 |
@@ -30,7 +30,7 @@ The runs use the default setup described in the section below. *eva* trains the decoder on the "train" split and uses the "validation" split for monitoring, early stopping and checkpoint selection. Evaluation results are reported on the "test" split if available and otherwise on the "validation" split. -For details on the FM-backbones and instructions to replicate the results, check out [Replicate evaluations](user-guide/advanced/replicate_evaluations.md). +For details on the FM-backbones and instructions to replicate the results, check out [Replicate evaluations](user-guide/advanced/replicate_evaluations.md). For information on the tasks, check out [Datasets](datasets/index.md). ## Evaluation protocol diff --git a/docs/user-guide/advanced/replicate_evaluations.md b/docs/user-guide/advanced/replicate_evaluations.md index 407cc0b4..964fa711 100644 --- a/docs/user-guide/advanced/replicate_evaluations.md +++ b/docs/user-guide/advanced/replicate_evaluations.md @@ -25,29 +25,6 @@ EMBEDDINGS_ROOT="./data/embeddings/dino_vits16_imagenet" \ eva predict_fit --config configs/vision/dino_vit/offline/.yaml ``` -## DINO ViT-B8 (ImageNet) - -To evaluate performance on the larger ViT-B8 backbone pretrained on ImageNet, run: -``` -EMBEDDINGS_ROOT="./data/embeddings/dino_vitb8_imagenet" \ -DINO_BACKBONE=dino_vitb8 \ -IN_FEATURES=768 \ -eva predict_fit --config configs/vision/dino_vit/offline/.yaml -``` - -## DINOv2 ViT-L14 (ImageNet) - -To evaluate performance on Dino v2 ViT-L14 backbone pretrained on ImageNet, run: -``` -PRETRAINED=true \ -EMBEDDINGS_ROOT="./data/embeddings/dinov2_vitl14_kaiko" \ -REPO_OR_DIR=facebookresearch/dinov2:main \ -DINO_BACKBONE=dinov2_vitl14_reg \ -FORCE_RELOAD=true \ -IN_FEATURES=1024 \ -eva predict_fit --config configs/vision/dino_vit/offline/.yaml -``` - ## Lunit - DINO ViT-S16 (TCGA) [Lunit](https://www.lunit.io/en), released the weights for a DINO ViT-S16 backbone, pretrained on TCGA data @@ -110,12 +87,13 @@ eva predict_fit --config path/to/.yaml ## kaiko.ai - DINO ViT-S16 (TCGA) To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with DINO ViT-S16 backbone, pretrained on TCGA data -on [GitHub](https://github.com/lunit-io/benchmark-ssl-pathology/releases/), run: +and available on [GitHub](https://github.com/kaiko-ai/towards_large_pathology_fms), run: ``` PRETRAINED=false \ EMBEDDINGS_ROOT="./data/embeddings/dino_vits16_kaiko" \ -CHECKPOINT_PATH=[TBD*] \ +REPO_OR_DIR="kaiko-ai/towards_large_pathology_fms" \ +DINO_BACKBONE=="vits16" \ NORMALIZE_MEAN=[0.5,0.5,0.5] \ NORMALIZE_STD=[0.5,0.5,0.5] \ eva predict_fit --config configs/vision/dino_vit/offline/.yaml @@ -126,13 +104,13 @@ eva predict_fit --config configs/vision/dino_vit/offline/.yaml ## kaiko.ai - DINO ViT-S8 (TCGA) To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with DINO ViT-S8 backbone, pretrained on TCGA data -on [GitHub](https://github.com/lunit-io/benchmark-ssl-pathology/releases/), run: +and available on [GitHub](https://github.com/kaiko-ai/towards_large_pathology_fms), run: ``` PRETRAINED=false \ EMBEDDINGS_ROOT="./data/embeddings/dino_vits8_kaiko" \ -DINO_BACKBONE=dino_vits8 \ -CHECKPOINT_PATH=[TBD*] \ +REPO_OR_DIR="kaiko-ai/towards_large_pathology_fms" \ +DINO_BACKBONE=="vits8" \ NORMALIZE_MEAN=[0.5,0.5,0.5] \ NORMALIZE_STD=[0.5,0.5,0.5] \ eva predict_fit --config configs/vision/dino_vit/offline/.yaml @@ -142,14 +120,14 @@ eva predict_fit --config configs/vision/dino_vit/offline/.yaml ## kaiko.ai - DINO ViT-B16 (TCGA) -To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with the larger DINO ViT-B16 backbone, pretrained on TCGA data, -run: +To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with DINO ViT-B16 backbone, pretrained on TCGA data +and available on [GitHub](https://github.com/kaiko-ai/towards_large_pathology_fms), run: ``` PRETRAINED=false \ EMBEDDINGS_ROOT="./data/embeddings/dino_vitb16_kaiko" \ -DINO_BACKBONE=dino_vitb16 \ -CHECKPOINT_PATH=[TBD*] \ +REPO_OR_DIR="kaiko-ai/towards_large_pathology_fms" \ +DINO_BACKBONE=="vitb16" \ IN_FEATURES=768 \ NORMALIZE_MEAN=[0.5,0.5,0.5] \ NORMALIZE_STD=[0.5,0.5,0.5] \ @@ -160,14 +138,14 @@ eva predict_fit --config configs/vision/dino_vit/offline/.yaml ## kaiko.ai - DINO ViT-B8 (TCGA) -To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with the larger DINO ViT-B8 backbone, pretrained on TCGA data, -run: +To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with DINO ViT-B8 backbone, pretrained on TCGA data +and available on [GitHub](https://github.com/kaiko-ai/towards_large_pathology_fms), run: ``` PRETRAINED=false \ EMBEDDINGS_ROOT="./data/embeddings/dino_vitb8_kaiko" \ -DINO_BACKBONE=dino_vitb8 \ -CHECKPOINT_PATH=[TBD*] \ +REPO_OR_DIR="kaiko-ai/towards_large_pathology_fms" \ +DINO_BACKBONE=="vitb8" \ IN_FEATURES=768 \ NORMALIZE_MEAN=[0.5,0.5,0.5] \ NORMALIZE_STD=[0.5,0.5,0.5] \ @@ -178,14 +156,14 @@ eva predict_fit --config configs/vision/dino_vit/offline/.yaml ## kaiko.ai - DINOv2 ViT-L14 (TCGA) -To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with the larger DINOv2 ViT-L14 backbone, pretrained on TCGA data, -run: +To evaluate [kaiko.ai's](https://www.kaiko.ai/) FM with DINOv2 ViT-L14 backbone, pretrained on TCGA data +and available on [GitHub](https://github.com/kaiko-ai/towards_large_pathology_fms), run: ``` PRETRAINED=false \ EMBEDDINGS_ROOT="./data/embeddings/dinov2_vitl14_kaiko" \ -REPO_OR_DIR=facebookresearch/dinov2:main \ -DINO_BACKBONE=dinov2_vitl14_reg \ +REPO_OR_DIR="kaiko-ai/towards_large_pathology_fms" \ +DINO_BACKBONE=="vitbl14" \ FORCE_RELOAD=true \ CHECKPOINT_PATH=[TBD*] \ IN_FEATURES=1024 \ diff --git a/mkdocs.yml b/mkdocs.yml index e19b1bef..6584e7e6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -80,13 +80,14 @@ nav: - Datasets: - datasets/index.md - WSI: - - Slide-level: - - PANDA: datasets/panda.md - Patch-level: - BACH: datasets/bach.md - CRC: datasets/crc.md - MHIST: datasets/mhist.md - PatchCamelyon: datasets/patch_camelyon.md + - Slide-level: + - Camelyon16: datasets/camelyon16.md + - PANDA: datasets/panda.md - Radiology: - TotalSegmentator: datasets/total_segmentator.md - Reference API: From 0656202af368f551107dbc6e442a6427c750137d Mon Sep 17 00:00:00 2001 From: roman807 Date: Tue, 18 Jun 2024 09:06:14 +0200 Subject: [PATCH 28/29] updated configs (#539) --- configs/vision/dino_vit/offline/camelyon16.yaml | 1 + configs/vision/owkin/phikon/offline/camelyon16.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/configs/vision/dino_vit/offline/camelyon16.yaml b/configs/vision/dino_vit/offline/camelyon16.yaml index c165b37e..19886da4 100644 --- a/configs/vision/dino_vit/offline/camelyon16.yaml +++ b/configs/vision/dino_vit/offline/camelyon16.yaml @@ -25,6 +25,7 @@ trainer: - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16} + save_every_n: 10_000 dataloader_idx_map: 0: train 1: val diff --git a/configs/vision/owkin/phikon/offline/camelyon16.yaml b/configs/vision/owkin/phikon/offline/camelyon16.yaml index d44bbc58..b3bf1dca 100644 --- a/configs/vision/owkin/phikon/offline/camelyon16.yaml +++ b/configs/vision/owkin/phikon/offline/camelyon16.yaml @@ -25,6 +25,7 @@ trainer: - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/camelyon16} + save_every_n: 10_000 dataloader_idx_map: 0: train 1: val From 6de261ff5bc2013a7101a2dfa7fe49528b9e599f Mon Sep 17 00:00:00 2001 From: roman807 Date: Fri, 21 Jun 2024 09:10:48 +0200 Subject: [PATCH 29/29] updated leaderboard (#543) --- README.md | 24 ++++++++++++------------ docs/images/starplot.png | Bin 0 -> 302767 bytes docs/leaderboards.md | 30 ++++++++++++++++++------------ 3 files changed, 30 insertions(+), 24 deletions(-) create mode 100644 docs/images/starplot.png diff --git a/README.md b/README.md index 5f86678b..2e150690 100644 --- a/README.md +++ b/README.md @@ -110,18 +110,18 @@ In this section you will find model benchmarks which were generated with _`eva`_
-| Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | -|--------------------------------------------------|-------|-------|-------|--------|------------|-------| -| ViT-S/16 _(random)_ [1] | 0.410 | 0.617 | 0.501 | 0.728 | 0.532 | 0.350 | -| ViT-S/16 _(ImageNet)_ [1] | 0.695 | 0.935 | 0.831 | 0.849 | 0.759 | 0.678 | -| DINO(p=16) [2] | 0.801 | 0.934 | 0.768 | 0.895 | 0.890 | 0.753 | -| Phikon [3] | 0.725 | 0.935 | 0.777 | 0.915 | 0.916 | 0.771 | -| UNI [4] | 0.814 | 0.950 | 0.837 | 0.938 | 0.942 | 0.775 | -| ViT-S/16 _(kaiko.ai)_ [5] | 0.797 | 0.943 | 0.828 | 0.893 | 0.915 | 0.770 | -| ViT-S/8 _(kaiko.ai)_ [5] | 0.834 | 0.946 | 0.832 | 0.887 | 0.903 | 0.744 | -| ViT-B/16 _(kaiko.ai)_ [5] | 0.810 | 0.960 | 0.826 | 0.898 | 0.889 | 0.753 | -| ViT-B/8 _(kaiko.ai)_ [5] | 0.865 | 0.956 | 0.809 | 0.921 | 0.922 | 0.759 | -| ViT-L/14 _(kaiko.ai)_ [5] | 0.870 | 0.930 | 0.809 | 0.898 | 0.931 | 0.774 | +| Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | +|---------|-------|-------|-------|--------|------------|-------| +| ViT-S/16 _(random)_ [1] | 0.411|0.613|0.5|0.752|0.551|0.347| +| ViT-S/16 _(ImageNet)_ [1] | 0.675|0.936|0.827|0.861|0.751|0.676| +| DINO(p=16) [2] | 0.77|0.936|0.751|0.905|0.869|0.737| +| Phikon [3] | 0.715|0.942|0.766|0.925|0.879|0.784| +| UNI [4] | 0.797|0.95|0.835|0.939|0.933|0.774| +| ViT-S/16 _(kaiko.ai)_ [5] | 0.8|0.949|0.831|0.902|0.897|0.77| +| ViT-S/8 _(kaiko.ai)_ [5] | 0.825|0.948|0.826|0.887|0.879|0.741| +| ViT-B/16 _(kaiko.ai)_ [5] | 0.846|0.959|0.839|0.906|0.891|0.753| +| ViT-B/8 _(kaiko.ai)_ [5] | 0.867|0.952|0.814|0.921|0.939|0.761| +| ViT-L/14 _(kaiko.ai)_ [5] | 0.862|0.935|0.822|0.907|0.941|0.769| _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.
We report averaged balanced accuracy over 5 runs. Results are reported on the "test" split if available and otherwise on the "validation" split. diff --git a/docs/images/starplot.png b/docs/images/starplot.png new file mode 100644 index 0000000000000000000000000000000000000000..50d20d970efcb5ee27f0a65ba111f7257098ae83 GIT binary patch literal 302767 zcmbTecRZJEA3v-uDhf%8G!SJ(Mn)x6C@Fgu8A*2bswj~(RCXaLGh}5}wyX--GnBou zdEQ6Ybzj$g|DNZM=XvhetNXsME56_JJkI0zeBSGG-aU8r<7RfX}GC+HHABr0PoX;0A%raQ_m=&mz;mv-~xxaz+O7Bz?(ZYjLQm+5=GhJKRBJ%$I6#PFc-v{UP{^z4JWlp{3 z{jaa0m1^0&_Z#E?e6soGEx)M$=aVxi*H3x<_a}R6dE#=s2O>Fr1|n)ry#4$R{!VwD zn=DIPth>FDS+yhOCVl!DTibV68?#69v^pf?Rk?i`d3(o7SYzylnjD;+yN8C(n3!jakyn6L2#cgqp zSv>NQm~B7pnGm5LcR9i@Hl`ScirU=%{8{0UfPkp;?*jw%2?m4TlfNZsWqj}B`uFps zuRO%ED8>3P+a{?euS{)pxpnfwAx0kU7tYheJ4CHLeHge8xz5>cVi7+jCAISI-Mc%) z9Ui>75OY34YE4&nH1lohYQdn@>DuKO=$*&R<><>#KeqqnT#vm6&h)^E?HYN^D`Y{4?`nT371sAD_y5sPnw!?@#Zfal5qYl9c87xXKHftvwgd{ zxp`u>gt++V@85Y=9J~L$TXU`RZ+Ja98E7OXX(c5kU&ogFA6-)Ke?K-hX4d+iA?)nQ ze78mOFOe}TbO){tfTI* z|9gF_cVX8?(r?@@Na=xFvRqo2UD;rh-iB2kJ;S7CRTry5?Xge(sbxpO4XK7}-`}z= zFHF#ArJF@uAE*~LaQLsiOZGOWL~FKvNqal1iVAfopRkb7Q=M$P&fZ?x_B>aMaY|}o zv+sNq*dSNxVi~XyA{EbgIr#aL&DQ+OP>Szv0X#IefxG* z^7el%UBd~STj@7W3u7fKPEv;l1_p{(R9qd{NfCCo&>=&rrKP3BJ{6T8qn)2V z2MLrgjMHr6^LWgm__X!nzsIm}=jlHm^y^DB_3`7!|9Y;0R~xZUzh_vmwzaj1kC*P* z*JR=3E;`WBD#IdVsn!3(Ihk3L!3lxto7JgMcibH6Uc>Tau>Z6O3bbidjz4Iga z`H9`@|2_XoTmCG@ld~~V?#oLQ2ei@;G#oO1;`Ut9b#!!;v(_y)H`jNn=BI76H4FYT zIndyrl)n@#U|eN6+#zKLp$ zlE2H+91ZBJ7n6CTn|tKMolS;gT}7c1&h>}iB-WY!i1L@@YX0!=<#~(UCY9)XL$bk1 zoFdQI*u?cV{yRiJc7D3Eo{B0`E5qDS{9_JzHdm8!i*a{)J@0SaxN$)Jt;hUuHq9}v z|Eg?-XZHNLkSl`Q*KXKG(LpukKGV650{`DbH;Fejnc?#6_z#!yG7cA9XJBQo{X-CsrlyOP=d|3X5jm?M*x7jvb?* zz{WH9`sxe{hFY&&fFZ6oV{A5wC#y4EVud)-*IZslm5n%;{0zg+!Zxh#EO zzhT4WpI@T4ZCIOVcpNKF*FrV4|W&crQ6HFVS@)(pP*IpSW)uyNLy}l^U&n4 zI$yREw*#g|i>^tjs0`YPsm3v+Hr{JAD>hBj&uCUsNsGV3@t-^Kwmzm-zY4u2kHvk# z%II@oC)FOZ#bbtBvr}=oC~8Gzk2z*%`<|0O6FO>2^x=-ZOp^110`D}3&ak}Uq~7Z5 zg-VXgvv&PFTWZl*S1o$9X~?eO6H_lELkqXhP1z_b*Ka0*CTW`D5)xzM<6PrW6E+qW zBQDPz9UUXSmT9f}&klyapS%tM}ps9ANUd}ofh~-TeebtDdhp3v&tOe$RKQnYeF}?;tbTsk*VWY};=Uw~lD&_e{r2SK zqM*V}m${GKFtbn`Ne&qXHX80aHd?+m|i^Y0&b*G=~i67-pbPj6& z`u65T*pch6RpPedViLOxhcW%|_3qqii3(A& zekN#}sP3PvjOpy{_#hRJp7);+6z zE#pYf%v^zE_Wj)zx|8>Ju!)K`UXM`(EMq!i%ZYm^w{O0`Q!K@x>DTeY-4`@hX;p7GX-ok}L@_)`w!)Mbp z>z>lhOEcXxzen1u(Wxes&#T2M-<_Kpu)%GSKVM%f?fL1)Bd3Dhi*2y5INiQO(j_qJ zq`x(KkoJJcttGTCfAqC4Z!eudp<(hA{I9uynw7ort#*Zb+MP zgSAs{knzmiK+=kB!1UCNJO|!Zet)!2e%qnTl(Dg2;xz=87A6#fxfHLH zonHE2$%|39W1{-#x(-k(gVG0VMceQEy~e+zxt$nLC+(@?>dI@QPTF|QFflPD{y2QK z{v|5iyKBu)u~B+|9z7`SxOK;l&wKpEKjP2Io{h5^*P4rQ6BMIkV-YgGTi==1`QS&- z=exPCpS9S0;g!OQf{F^?<;4;AqNd(3`?gDqb5nxmEkLb-14rCYcvqZkU%;JnsU;3- z3OSBnrV!k|5hvkva_n%s%fy+yg>iY)hQu@a`d!!k&iu8ER?ZLPKwUF$p_|=jktk<% zUr@T}-5x1J-IR8fgDo#}Nc6-~yW6~<>-_H@RQ5@mW|;*|c3nGH9zl*N4#%p|LOR9K zp^Wye&du-M3A(tt+DO3&m^qK?;Uiot7-<2SESE}VUkY;~v6zJd+T+tRLrk|R2 z4LbI@hC2$Y>_LiCNg(R%KKyZYD(Vid0`!*w2R+v_6B1kAC`I9sEE3UA(c)Wr) z|E*!g)}^@tUQ#S#p1a9C;Zo_xm7uvq;#p)hC3}B)W)r@uZ|7;@TdCK-o}iH^1cmSc zoEJ8r`wL;UxDf@!TT30CM$B&TB09}rOV;DTK{JYlg$0M1&O1Nba>Y=|KQ%XJ-@_^l zC79O5ZeSL=cj?n@)aDO(4vm>sdr(rka5TH|=M&Bho)=lbBewm=@E;AD`E( z+1Mh!{dS&`&H}=v<2cKQGlQn0g$EAILMYqp+Bsosv%jv*TkMbSW956--`^OQVAhiP zaMij^FJiemC~=R!q_Kam)WAsH5qDU2b8^0pzvHS^+Bn|(?ac-3>^I+=o6OSnms`FKObMEOe}Iyp zlxC{Jt{DCq@Gi@;b0q+UbWFwk*7;w_Poz&5|IL~+7x%{sY}_J6sbp?wWc263Kutg? z^lLRg8C6^J?#cx`Nx2O3*4G-TMjA91D3v_1F|(T{cO0>$%x=A}8W(-7Iel`tH3Z^E z$IqV{^`CMK!^G^Xrcy`235~&4irUg0Cwlv*oLHRt+1lLZuTh{rmI6SGEC1?y8OsIU zZ#>akNs+TKUZL<@Qli?4{qbE74?@of*?0auxc~R}xyhd=Y;6Uxc3ogMWz&&2{!k^}a-+*@j)I6y*gY>_ zy^7?d!clq&p6Xcd3|gs?ZpO6T?>2tvS3{Cs&v)#L*Gf@q8yg!1E!sjC=cgI7{Qgt+ z*~#e**b^ITJ=$>`%}zbr?jRrl#kbc=x7u=@vB0dcfJenq^il!osm9eQ)<5Lx<26nw zC~TmBWFQVzh7ddUGr=!7<(T}(>qkG)9OW~q+4Wz&3IdPO)UP@U6o9-E_DXT=Ip<{>d^k~p^_5-Wzn-}04-ZBpkAjQpI{n;-FTf!f zY)n1rH1M}7`sW5x9t8%LqF)iTw2p>l^^OGLBlg1z(9%p=-=}#BOSOE!3Cv&o%}khO zbNaPF1$K(ztUeZ}sb4bH;S!+W2fCG>Dl+ow_S{&<($Q1)*dyv%BN{vb$UxiM!HY<9 zahdxWlV~SIBqgA(tft^6d%vyo_VI}XYC`F>J+gED_fQm0RaQL++zSYkL`0$ZmZ%fL zZ%|zG<^qRWNEkXPzj4(LwdYGHUCHPsAX-VwMVnFfT4pEuaJuC4DgZvvSyr!G_Z5&5 zV&fUqZ|W8`2L}C@2Jycf9DbZo1tR%_fa&)nl19JX2%U%T3fRK+Z-X``~;au_Fup3b`UFj z7jp6w6J{AD^|u#q44CKL%t23y*Uox`&2z|cyvLR3_G7b(70aJWl zv_CwcrUA@g89p4=_+9>lyD{6I7v;JG%_$O}N(y13PIgysuWJ1X9$}!X&ldTM#SrW& zvFAj=yg=%>EEl2d9I_wQ2Kh`H$ebEz=u2zR?ZzQ4G)+5X-E#`Z@)mTG1gPzmy_F%3 zonDOU?@TtwC_e9fqNcmw@;U0De&Wr zu{SOk-(xBYrd#_$%8Lp&d;|aLEuO5t7mj0!X&n~@Xph=Ow%Xk3wvay2)W**sL^pDh z@-JAmKPJ!5INb3ziWdqc>O~ByCQedhRFo)$X-El1qa7b|K+0s=vO+^cKVwI|RX-rZ z;*3r}8UaoSZt0Cy1`j_!|6o&^FBX^IZcsfOtWQ6zWa_-i6XAB3$N1##A3v_^%v-nd ztJlcf#RsQ=$~5Eyj&PyMW^fXg+{I4#wfr+4LjYfR7AtDN(&l~-Rba&r)TK*#OF??7 zMb#zS5yWlt=FJNW6V;^u4mD?V_w^B}-C;1f9MDMdMB}mP z{mcaLIyO4`0(B5*@IF?9fT3qiRs>H!<rGKcq`uhQL=?SongIYSr2e6c<}Et;FXCQ?q#7aKn46(?v(* zIQyq~H{r3TR!df3zo8aUkK{~j!cn(9C>*3fM09`2Iq#AD<#2TK;`=+bI+lnC2E>Xz zApNjJ5)Xx(O|))Y^C1W|;^N|OH4{DuANCcs>7`|1Ig1iW4t~ui0ws`T2jiuBs=>0>?=zcr2Z00y4LcCR1y>hrhfyc zdd2;_ypT4^3T~{5=1r&V3SPZt4L^F~Z5r0Iz!{JUjUj&#wHb|&R4S+{(hZ3^NuoKK zVT71rHFdwHSLH42>l_}|jX$=cV>o-HDr1C5T>}Pu+qQ{V?uv+rAj0`zYxXmc$??I| z>ZcBRK9LWeJSp;APd9_zBQIWEhre=|{qd}Z0}V+W=+cnz z_^>U-`+oNJ7Skz;AD5G(!rmkI1jK!$U2GN6ik(Mt`+|L6bAmE{&`+)b0A)WL+V7+r z^L6ZtJa4l5vKuIF2^QYrf|RR_(m9nUWCd9yoah^G$LvBCDJ&@wKzVR^78hHNg1Dj| zG69k4a0O4e)xeZI~P?pu2D^}j{Z1a&&;Vmes6SvQED>!CU#{NEQQjA*$E|;CwO|n zdTz5`*f?Jjw1gWDrUSnRPdZ7!hU+0B1HOjrJM{!r=oa$WwM_i_tASLDj8dOWor4wS z9a7HpE9T|PO!kw!cLCgq=tA*I@p%;>olovH7?mi29^GBXtmdMHO==1l(%n$Xz7PG@ z-x){DB<=UMcTqyNX11X_omNyVsjlW#^UXj33w!(atyG`m{xeRwEFMIm>*?uP*$}6m zyc;6EK8ia;2$AY>aG`Tq)`qXngcKV;cfB1GV;oV^nrciV6ygWWxK$*`0=t?Cj_eug zsl(ehf+EW%HDou9YWwQBFEY^3&;ZB;jf_8cn_KSzB8~>e+R!isHw1;KI&GsarIOKJ zoh>+t571i(`%byM9@w4Y1@^~6-_hj=Ec+KCCVZ`fA|%}Aa7#_s(Q6~K&}gfDdltJZ zE_VC!Y_&T!^YV2?DsE5O$Vm{SM*#uSFh-LNj_;(-hZl&vaoa{BF=ucS*R~8+M31KzI3?>ui7X)~$*7X3+DM z(-WwRwHX%e`p+wp4N537VFXi%IE?PXlZ3raHW<(#X=3Jcp~m*D*BN>9W_E*W^xa`~ z>-eya^?=fRK|lcg(rh0q0emiczW45JfMBw^fJjcqfH+KQUaUCj&tiy5AAO|c0c}_D z<3D=PS`^YtA3X^;ynzPv5KE4-ssU2d?!GKRL^E=t4;k`p>u=2##)I9nd-u(y#d#Pp z)i|^Y5TgU3)+#9}8A=d)Q4KZ+K5 z9A(T)04N>0z~T1%=K%W*SH=29G;($I22We8*e+z&Ki9$}#dyc&OEc8EOF|(N2mHat z!Mu+(rnjvy`TEK)?z}kMgz0wo=O}U$rF;BGQN&a1hBO~NdgRkmjdhi?wfZ=Fn3k5d z0LxU0+nPnyZcMw%2xee_{tEFqN`b54Ib1L>&Xwo7gv8O83e7VZ9mY%mzb+dacVnA? zKx1hSK~{Z&7h7@MZnY8N6T}#8e_j31DNa^A79N{>t|d zdk&zDyG+)eBw&b8rWY@EJ>}J_0`M$=t}e-Rh&4I` z1L82k?qC!R8m{C+#V+<|9hqB3{lUI|d%0{2{_Z4Jfp`Kq=*Dk$es#10VOMdPAIjLk zA|CiRop77`$pLpj21Nx6w!ePa>dF_>G=*Tn^|wA0KwJtEceGB zR|wvefyr;)BTHxp5i>P2%qy|y9DhbX>#t8>_T74Lx0qP=pZ;P0@@i1wl1YenXYnC8nx3rlQ0ZPs$d1d!{QZc37Tsb zv}*P0LBx#&EtGxa3p`z<42DiKxP~viW0kg z?X4SFO#zEGap3T>&!6{-i{~ic*Vs}eBu+t~2tHB9+yI}0jR!fu%0Ao!ZEOr1ch$0L zub7voXJ*qdQ~{a{nPy(LVfsHCYXvN=^)xgw_(>wJPfpsxgDyt`(f`WCt)LqFSD`vbX0QU~4&N=;-$I z=TCHq-&3O#6WwUw>pIY>2`BMN90|>5KX@=fx};1aOz%tEa8-dDzAbTL?aBEXG)|m{ zPE+R#x(>kH=sT~@hLsZ~4qg+y+VHrToY+`oFg~gHxgi!KPEq0<0Y2QLVGc|sYQ49^ zVPnPa9u|Im&$x}%u)S>-kM{NvgK6JB2KDHX_yQX2<_yJUoC~UBW@cu|Ji95`%GU*P zs!>9nh1+&>4Ym4v+x-w`W@cx{K2r0Je!HNkGtIgZO|;GP_U-X1u@OEnjl9`j=B>QC zpV4j0a~vnO37XKtXRc5=47#3#oXEUlK$B;z**)rCj>7R&B%u zsLr27!hA-j7*1K$^elSfc)gYdLzXKV#v7>Jx7Kddhb425MIsngIuCZ%mNO!uqzzJl zb1Fc%%o`BlsI4q5-6bk2swVa_rD6U z-o4TTjxvlx<^JSn2QLlnHpkY3M#MUP7o3`FXFZ!LIG?_=s2{fOig-*myne=FdbU$0c%%#`ggEI>Wm1xR#*BBwisj0aKE>W=+UMs7pi2M9;q`3~@WyQ%Cj<8an zluL$PbCC6Csf2m}!Q*(|z?#*o33OA9SO0)m6B8&0QUG4VVn>cNF%;0Sh|dH!xk}FU zM?qF4QqpEtRz+oHdO(P8u_~y5ngI^(`>q3{NMY=lX9ELVN73T-3p`dpR3UfkzC7RlMQk%$;YPMUD_2hnGaS8sq**{-Vo*7W zdPb@dZdL9D%6}BohP9wzg}9zTp=;Yw1VQ}{ljjTbdJGT!3E5j<@qzu&z~=2yNXY@*nA(dLod|1_xLWMHS^k(SrDmK5rwYEJ=!7b4a`+ z8`TLZYXvbyyHPZ3cQ!!LAUy|IVx6L*B63ARWChC`Z*$})rGA|InWUKppA;65y^aUh zy$R>NfG_RTlv3W~GEF0tY{QhQaZm8P$SO1>gwu$RQt;-C-W=FUAmy41^j;_g=o9P6 zMMy3$*tpFP2?O<=L{SIP*b5DZw~p%+SuH-221q=um!Qgh<;oS*txTy2bf}3kj^{nU z9_wntU4mIpgW#5O!~pn$>jIc*&PF(#aD_mnt|BUhamEjdWzFk30cP^ePu*Je8Al!%&wWK7D!{(M~Y=2xw5z);R-*aYEf@e=s5u8#v$? zQdtZ!QWjPAwep+u;X|Pb_M|t(*UOMNFMf>;wj-Lu)7da8DesS`k;=?W5VdGnnEa|J zid-)2jXNw(-sr}ba2cOWjiM1g{g&fs(_8(V)D=*#-e-Dlozc&9BOy}dOn#9ksRmA; z^Yx$T(LzFO&gT4jS!;s$;R`S%IGUcs?SNIueB?Hu1|i@0J(YeZ2q*EHX+FgKLPI2O z0wfRw;-D{(V@PTqSZ*2;tn_0-Lqwl%95jL>2jEvP-my81Y z<2bK#=%}z|)!r~}%o+ViO*9;fyxH4Fmlqxb{cPT`L*Z|tjNN{)u3Lx3m^ob1b*|@q zNGBvL_QQvtl(0A-s1_nARu0AR)jsCA-93QzFqw!piwYwK*&`ZdDcT0i^L)%}>-=}L zT|#iUh?@X-+Kmbi8w?Na6Re@vxK#}rMI3i-T&V%FSilPx(=@MqdAS}aDZlf$5Alnn z9if9&K^-A28zw&YKqvM(#TOAxN~>m3@H?p5^m}CQbpXB-V~D6gF{%nJ0~lRl0$iPxH^&8a znz#GmdbYwava4KvHyV>b%Ki(l-Xixvv`5mrh>3ozj#spQZ`cE*LAJV|?fckX` zZVC?W%>2B7={HW2w(cMrKlu@q4A?P$;;dRY^M=q3J@4GHfo*|+1JhwsdWc6w7x|FE z#1GQnsx|2evB$Mt_HdBexN*6W|P)9qWOz`~j&gOMCl1 zQh>7h!ik^#C$-tN9b}CFbO1nSP!z1-j~~zU2;%y4=>BtY)+E3ron<%KuLO}mV$%*0 z5BRnaa)=Hc4SC;CLJ)Tmy^DwEKE8^G`;b$?O;{~*r?!H3{`L3dRym*jxi)#2T%<)~ zfmdg@p2f*=97=2W%Oi#*QMl0(JL#1|&_`6*TGKS~u|hghAYnuVO#LTu8>#&#&Yy^e zq?)K*1|$2loLnT%PYxvBcX({^ByQft{yocv1I&O0MgTH!HwgDd*^rlKAq$U7$|M67 z(pgrBX0^F5yOO&@0|M8)1X-njYA%H(m?&PQW^!LzaAjv z4WtIWtvi0HilGw_{-+$Tp8V$uA~zHvvj8HVa(zJcN*O5ApI=e?lUf&0%V=7^apMoP zfhVeB_zUZY|9s>aFX`ikyAK_DL?i}O2rbPavWDl~4m!3u4ICs+as7$^Jf#ph+(+S= zC&9r)@YHy3`{rHKX8Rp5*;~y^j8ZR=v@SzmQfFxX{iLP3(A4dXvssaxoFq{Ta35J` z8f~i4vKit1Qn2C?(=vg}rL@2{+y@>1zGC|g#uN33op08#~x zCB1KKEDq~|t-b@7(ll+nCS?%g>s@B)#!`1X$pPXcMRnEKp_ioOR`17qT5BHpOz!zyJ*uleb1fyY3u#?V0 zB0@Fpu$sDF%?TA1!R>zBs~djW%G~asB^lx9t>Uq9aZ@8ha4Gkq1E@4IG5-9=OC=CQedn$$7160MjUZpkRD|3I3$uY zD|R@kuFjrKgjF``Ary#){A)tYK9PumBHNsqo!#Br3&@f^FcE6Mi_&TjKtA}*8MIUa zu)e1lQlsM;Kw~4o)p|@F&C&+U5e2Y$$^v!A$>|8H0;UX}qDy^E)Dc7Ta;&`8^#8}| zB>7tP`Gj*3D~XQ;l`>v8_q}>VUTiF&2$ctxo?&L`H|l5QL};T~FY;=~+8Wx+Dk~Gz z4UXCm@2D1^*Z=?^qePGF##x|B_^I$ZY`O~96B(i7!!1V$W4;HtPMV0hH_pnFFgGX}6_JaRBo&g`83T}3P zL>^}YsmLv~#L$KNy#w0>TWq5CBx@~zWeUsFOusj=K4q87=~|fmWHB+a-H%;ubZ}t6 z7h^JBQ}#}@LpgjM@UZ%w?6O-KM#c0-5TSyn!MEKHrFp~JTipHCOf$?I*1j2K6qZK( zGf2W&2qI^XpK#kQ>}~kMe6S&dPC;QYHy|1TuGTSJIHZ!8s1W+@67Lr9KW}O7>V~zS z4(~Ju7$#ydSe4$_-`Vyf=yCi|O6ibNof+%C(wu(qNElCp!>3Q5h;jzdZ1~~k8Zr;2 zKF`W`kpe?RosfQj?igmXM~GYw2Sr9{8~%!d9KA;4=atvT49Xtub4;()IIUANptl1w z22R6MU{qu#7~4=Pw2Sl*H2P8TjI7lds4pG#3$S16mUgh+Z4yp`9A`s0$G?LOb^+3U z)KBKK?LP+<)F15yTmz~N$!4I6Txrjf01cQv9QK@ZCuy083=~yYkG$GxgCi(Bq%NgR zLyBPZuV23wfFkdKukPgKO+shNJcCdxbRi85jl`NZrY>%uN8AdVHf*q6)Y2>t*yJaB zD+31SvXSpoke>%j===!R0v_r zO^;N=7KUc_QhA7)t#D`IMLs^uj$@?Y|9TVi9SaNa)q}M`pKAn-M z_Ufj-_y1r@zCrwn0#SE3lL7rLi6w{&AkF&# zfz?DKPphdHM1oQu&=}(@k&%%z2v+-C-+&=O)KUuM%dSO?L;?1I8&O;YFE2X!@Vp(e zJ`@Ck^m|eFZA(o_fokRu{sTm_3m}qx-#)K(tS%4m=WR`QaY+e@IYX0A${CkKhz7+8 zl&pO+op-lz-?T2=0vyWy-L;fTO9U5Xnj57yG6a)i1O+F>y!8mt9DsE)Xi>JY1jx0c zYzrxRBymzfJ9&wC2#NS3qXf!w9Jzl8S(#Z`r!DpFi;hG>VpKa)6J?fRAn9&;|ItVp@uL78RM!GfdHHD2z*mKefkpeeEZt+ zfRT}*qxOJjh_R_GeT$IbU^(1I2PLQPg~4wxj;}%%czpqYn}4f0Z+$Y36AhW)!MU-x zF>npq5ZW1(03_$K2WD}giSxPg*6^3!oLj@s-Ip*lkyS9or5axfDd0QmJjiTS>H@A) zIZ-~*3k@(`|S2A!gJ4|MSAQ2n_0wmT886>HVT!UV^2lX10 zjV>8BZTw?6E?*IqC(a0}E)o|Tpv-%Ed2ME42{+D$u1vTUVXG3;E#1Mib0AfFpzD`5 zyZ}U^AewGb9^IZ8%-JB2XF$@O!9v%{@xv#u#PBNf2F~hp^i6wwMXw|`0?@fnGLcsJ z*1Pu2QrgdOu+6fLfZ{GB9?>kvqUcOsE;4!3YeSQ6ND#RZCYqac9y+q`xs@O%M zS`d#Hk;DLEoP)fc18)1ne|u17pq(ANAig(YFke)3Id#23YbWtceNaJ2b33s?>pa1hr5} zi1384C)MEfe3WMjSVt;@>#tdOf+7jHFS)?+Bm(ZIJ2Wx)rrEO!P5+0j>pM}En?3xV z+5?IpQd)!*B6&W)w)7L(|t$;kI& z-N+ac+FJ+MuK}(eVI6Tb?pK%z141r_D_8r|mZVX!cegMz%R|!zyudueQ#kJX8`ivr z6Dxaf+Xt)y*j>x@1|4L(aML6VJ`r|~X@?CXNedxP23kT4uHoTf5@bQE4h1+EgYvC} zw(63hM(;&3LLdwQ7)Fqs=~xBKqB__xjGw#=Pykf=qG138OGZWI@8K#4HUhxx`Q8>0dfXjyCnTe4m0CT(eb{?srd+})08hXg z1Q$Jrf4krLO5X7)So&{45uo-f-QOW9g(bz~*`ykW@k)?b9x`x)--Ex7fuq>iwsD$} z%BKT4<#K-}G2G{`c8-ix!|zyEfX1fr&O|9W-Pgxwa_ELGkeiOm% zWajWaHN_71lHA`-R|#~7*p!NhyBj=Y&Q^j$`e$two*kwqXW3qU+foE0K=Guytr^lap% zOjlKaK;co>xp&-!=q0F2kAcZ+aI@=2Ws-kvn2S@1>!q3l{__J!M5Y^BnY5If(k}RA zq(~AcdgaQMfuc6-5S2s-z$9c9U5DACpOjvNgBE(&hihMh+OaF$ZP-46;1S}(#*y@X@~hWGhGge`i}dWdXC#)`gu zQ^w}F5Wn2bT-o0K93jT;?(S70?6fSBE+QQ!dUKxzsz>{y{NL9@ppPBsI(zx@%Yj)m zg$CzvqB%CaEcJ=30qsO&XMIoJgGq>h2G{nIt;)=dB!p2t7~Sp_o-J4U3P4w0b4IAC~5K@h%>LQhYxA%(ba zph(D&Hs-q@#drv(wYyZnNn(uqR07E(4~C9TRBJ@@jSM3aC1hFZWd4#Fy5%1iYPk#x1XB_wfvBDaCQ5>0S+d8mL{j*j8w z%K?qaVFxV}GdwQ;pJ9SfiR`e&ZeRJB`HTzJr`xjsH%$=6@V%-gBmPR7CSX7re1{;! z)KHkp8HAdL>xbWi7O3Dx>Pli1tj~{xS9oVpXV-Gv`uD0Y7cd*${mdjU6557zvkC7 zc)|T}pB;Jogl2+JwY9yyeZ$LQ%XR8aM zC)|wpJ#IDUKu5=qkg?pmccE{+uOFM3_yies?6rknELCIUgYaR8z{-n!$I?tYhIP{o zY*dE;q#k3FlXrj+UlLhb9SJ))&PEd$Gyl5vu&|~^TU|(a_@)POn$w!Z{>QYK0ER8e zB+^zM?f%%{Dt{Z68Ftmp*N&JoA7Rt5qB8Y0y6WlKvgAsj9jIIcjaJIWKloC2X z-#7F3>7X*qXbuv#U?GhjBp2;yqL|s@R9u+YgVUh&Y;pbyH`&|Wmr{S|Ob=>gPU})qX3lo! zXaWcje}eQDWYr+PKLfK6z~tquCYX?|N~|*5;loquIdKX~^awX-vzG{kR3?rlzF03R zABis`Ca||RCDf49U;@^)k5E|X z&J~1g?pw12oQzFcVH>I3C*yrHpl7ihl!7RcgefHU;Vm0ifW4pb>e*GU>g>rWu$^jM zDf%N+xuMMQ*)dyN+lSZgg)+AKjDrflMH|GHD1j#`51V9MhqrGhRh2Jww3Q26IWtiZ z*2pO+^k7!N0PPV0B>^Nl8eWQbRb9+zCc*$X8_G2x2G|qy>&-`x=1avL^W?O!zP*5T zM8NmK)2Fg{e3)YdQABzUhl<;3q*?5A)h}n`2hYQF7KVoGyzhAmU3(#(Q<#^RM_!vD zK3Q`jXSO$l@KrGGTc;nhGt890aA~2$BgR;-2RiP7(?)Trmh!r11n9hwran3QITvjA{b$UKx;l<_pfxk#F2m3 zGl_-6vbm)$2qQow5Do5#Rq2G=O~@CSyuq+c1Sjd3)pWnac)957CAstKFX8nlIQ$cM zSrEo@V$O@MgEff040=zb_U9vOUWPJCVYp1Ei`qb{g=??hW~rV5FvmVP+^Bmu2*SVq z>)RpcJv`<-5Hk>k!?nV@iTL`HP+LJkWv5QxW?0)CoSfdswlu7X$8$tUf$I9Hp`n3c zy!|fYTeEa`?qSSh@4F(A6nh`J*wV(chsBt;(6K$Qk2ib;MVUY#l>g*w%|e8?0Txol zucVXmX8zFLuGePsECp)Us{4}NRb@g+Y6zD>nZRpJ*32d{($XFSBT@%cBaR#t0#rwa z_ah(#Y7|EuMmw%LjVz+clhTU2If;-nAQEJ^^GM2uj5{Cfla8Amo0__wFo6}pbZd!J zgZ*!7-YeXZ3xe00=ske$k^Mggkzzc)+pMH(WMm|EFux9rRS}J*vB zT6232h2Rr3dQ{cj`Mh47{(FP)Nc2-1XOO|)2HXO;u+YA|$g1Z$^<=@yirMC@wKLu_ zfqq3Ivmkcp$z+%s1Aq#EB9L>Q9Mfw_hyH<+MTikn8;~LpT;{;Z?j?L53J)-9Yx?7% z?S8bZG{#j0A8w5{TfDu_J=jWp^8PV}4KH8P@9Z}=K4BJbz}lSgDOP24qMCN|iaVP` zV<*F~IyX3M{qXV(lJX^@Fs_1NG;H9D6-oXNpkP>AAoB&6m;%9~QWuWF991OEdZAvf z>A)fYLpLR?a!+6I$M97I8fzoA+woNPsXJ{q>wBN?C z2?C`7Tf!>HDTLI1<7WPJs2(>YRGmC~iD;ZPl!hg^Ksu(VFV~hS_$xgypfC;?HI>A0l7hg2BxVrKN99C8+SRh3CLqK6XJ+FHaUM6CD z7tjuxLO7(1En&W?8xe>xzR~WC?OO=SMKT-k84owCxr@gQFa_h73TYt6yu{Fnb=sW% z44hIumVK@AJf5WukaTBA?h8%&XHyzCLY8^g%n#Eq9}1}iGkzEl;8|-@2x5c&BF~UV z>jh&9ofR|R_YgXVjs}kuFFWu7or5f}dhOb+BBymV_=Bvca}^cuxM+rN^^HA}uBhPo zay9d5#>$uMfyo-5)e~eDR0SG+o*YQn_*lOrvMHI%uI9$tHLQn2Os&`>-_^a=(E{L2 zuFe#Bt($9Go&Ams38Sx(7h>SODw|04Modr&WcyE8f-P|dYMsrQIny*vl2l-4FJ=g~ z$HRw%*K_;bmkES$F(7ggrO7LiSP^5O=;I&>*4Pl_X+WtY$_Bb8q??pPZ6%Awlzqsg zKvwN}UtFK(>WtSBe21vj@j8>IV!d8cJ-9MN^G{%FRRqw0D8Q9`iNc$iolU|GYGUL~ zGYF^Xn$j(RmV^L`K{>3#8+2x&77*13xfZP8Moa%|*Hc8t*8ek5iO!-uhie1mKSWFbr$ z9cc*TLs5ncdte6(gA@%3!tQ= zES<`|ju(!h5KvN4z5m&+WtSb5(#F&xc57ka%}wF$Z2N1^evuJ4{Y+tRu?Byzf_l8s zz1SeLgq>0PCGp>rFMfJep|tNw>P5NaOKQsJhq#VhTU#e^NW``2PO;xkUtVp_{0K>r zrCp1FLqKsBWbO&wRuD<59Y{`f?G&xPiSUdt&SmwK1uQ4*bp$M50p<~G4Nb4ThM&AX z197OgcwABq)EhUFQA!ff#L8I0NVv~>^nN5Kk+z+Ov+f{ES&K2)^Gwf>Gr^lJT=1gB zW3yLsA76mc-uU*?Dk9>+Lfng{3FwICqvkwpqk`jXOIOFwHw?9 zwci#~HAwWI13^76p{p_R`*$zKM|v7cw1hadq+X03lUM*0s;z)*U^(Dc zmysy>mZY~FTzv(ROmh2Vr)K9M;X~LJ$&;ZYTVoa!7#u<;c{c)4<&~@Z{Q4ns76RE< zKzI{@){d`h5SK;RQ|1Y~l2l7Fc{>vsX@KWTUK{`^zF|sUC4d!CV!SH_XXEslGY1>$ z3Z+^$jb~G{^YPIF-Jj1rWh&GcmMOI7lhTx1f|<~%EVJGG^{2k<6BrxY`sGl(`k+w? zgIJ>`v#jxygxw&$C0_!Qt!z!OQkrZ{&@qEmaqHwvufOB;ymE6aLDlOH%ba~zi_o=# zih-sQ_f+!cB47ui07G#_Q#k_s#x-z>yv8qb2MHiT^Cbo^8N;5kG>-*aw83js5b2l0 z!V({@h5u#YDQ~fFWb6Qp_c(Ma34yWfx8E2@*p6f5Ex5`v58Ch^@FQj zV!l7xf6d?9P$Hck6mEv7@N<}i&3CN574i4W1C?S4P{xD+(injI0SF`) zP6_wo4F!%ro~U84yTyHZ5iLFD-bwDlFd4EayRiSspf?EPSV3ZCAE?zuPQkT)RZmLKi3AIOLCDe%X;*Sax^(ARw(i8 zn94CFv&`A(E~dbnkxHkJXslCaj@6=)Q>E2VPre-UII#b?^`X_PMTR9$$=$RTwAYKE zm$`)5buyhmB5h=x780TW@@6jcDO4nr))EU>wVJ%91g~0fzygryCT3xYgn%ZAAQzcy zN8Wb-Km?Ul2~u$0Vui4e3DCp4jUwHbm&jy1`s}l*x6;dyo*m1C1?ueJh5 z2S`w53_5ZuTqZJVg6t`#t+f8&ThxqiL`5JvEsg>?B~}Q>z^WsWSIn+nu6c3%%dm>~ zpOuP6<0lVJywRd5xQ@R?Oj&anU|OgL>%55W%BiS$c@;`QB5lEe0CQ8}96 z*Cfh_FchTkhsdRt>Rk9MfauLEi(_GKfCu z*$2c7jo@eV0cob>#>B)tdzW{OK5gm8j07$l>C4N1Lh3-(bc<2;UBNovy}lfbsZ)i~ zS5H@byHs>vChby1l4EXhaMbnto8K9gM5zycSBc{;%m}`wUa9&nSg64FR_NH<6b zQYuI)h=fuiA}P|{B7$@`B1oem2q@oNp8X%={mwX!agKYVa6jvwYsPi`=Cw(RxV*xV znj0}c0xw~CJLsN!`bC_hP}cf@X6*#4_Sm(+;B&cpdz5D7x=%xj?Iui?AJd5?!7cwC zdd0|s1Rr>B!0rEj>gfc~UC!h~o4XstDhOH(eUlq6gMVFwoX%E?TZx1T@4n;1#z1l4JUZ|l$$jvNiK0*Om6w2 zDs)0na{J7+Yion;d@3c)H-Lj!+1Rk600|%jhefc*<-k4p>4j8!VfFXqe~|);zFO97 zz*J;|Q#KQ6k0JP>P9QJ%X>$UFV)$Pba>5h|Jpd48L1hPWmBhIS*%^y|;e7-AL;Ve* zZB0FT1DukG1NiWJ`db`&y9#uA1~@uwHy-s3B&O!GvFY7P(Q1vBa{JS0)mb8BW$;eR zo1G&3qJqK9{Wk@L%Mg#sQ;R0lsEQQN=%?c}ECN`U_xtl&f>+H{w#3E?-(dnA~`HBOl3T1!B`4e!3%_}Qz2DaRiWmK^{5+T@V}IiJ-Kdz*1hY1`Ztk9Kj!|q*&tU1F`CTYlo%JPB zoX_az_6;g_TVboS8-3;`HdCBwP_Mb0x~u=*pNpnJ&`459xIxhFXEbRf*-tWU%J;rf z(S4y^cG9`)ytS9J9O;%DY{uPLRbFY6vs_u7vQj^XN%8c{6o)W9$mqw9Z6MA6pRYg` z{+Aa|d~B>OWKcjoK(ZWgn$OM6jR#3W@@`^6_g_ozjW!z;j%=SUEYUpgtL56s96cES zx!pA>zIt;->f*(T3VuGrcf8*ir?aev^i;LeQoc|~y?KMDmjAZR$bP?Z zY_p(g{5c)@6xH>ITzUstg>fm8`cFo+?z+9V6q+tK(&e_3;PCx*FB*?P)C))9qp*{ac&Y#kSOfn&|ro-8eVhTF4#5@#oK zz!ON}=3gBKbv|k&fpAR$%9!UcaYlj`DenKhbsani2<++sU0h@$g{JQS-~`Z!`dho1 z8eBqXgb1QEDk$FJtg1mHq5zVX1qXXUu(YBK7||g?7Xl0|8%T%PuBY_aFhlMOZz??D zH@a_zd`#)iFKJj;pGtf#5@`7R!TtS6EjyKJ$;Uu`fpR{XZ<;<2P4;LP z=h<+w=C3K=9DI^u*OI#U%Z6|x*DbPOthveYH@WSDHWzMt+ZL8%dy=C2Qu-9q zQ>=S9XB~By^ZHvR*LXPt^R@aczP9uT3;sMOx4M1572})~jySz;F%b!^+48BASn~7d zJ#g(IDk2)cf#RWLUg&@K9f6j%=V>X>=s=CIH|10ctdfgRe!~5b7`;P=C`B-XpP}*fi#s9i&(4j2{)J>7TQ~C5|(93#3%Np8R&{22|Hwz?5bE)KHOTvrJ zdLcghjg5zyA6p}D<~_wU>LFnCCGoNC_9-GB4Yss+D(khP1ri^YoWIrin5$MY- z61?$Au`&0fS>UVHMq}~9@#G$-n1ZJ2Y5K*+LNmf$p8nlAU$zE6oI_3`4jKwU2TEKK zKdw1U;S)*9lSb~tfV91V=N`kh;Z{8!kKaCVr6ghZreWEpVOcAhaPfs(ZataMhqABv zLLeoDAPsJ8PG^??`OO`fN(+WUUH4qsP+9kcu+m#`G3`e!A>l!CY#&zJJV0d+6JFh7 z>c`zOEYAyZ8<(hGU!ru&3{wYP)X*Y*lCpF@Rcubqc$x~|Yf^bV3;*4xG|_BDkP{4sH)gH`ZKH}c}Up$h31hUZ%9(GV6gEjusTUD=-F8Mym%&gxD z4yaqMC99Zb;Y_%o}eHUsyCER;ej%+N`8jfkShIh7YUf)ej-i}hQ z6zAObVb?Vk$4QmGQc(I?Z&Avkda}%T^HjsgNxwW**O^M95VHB091aIvdLzA7^|wpQ zZ+e&}cu5G&pN48DSW>T(JRf})qot9w2i|6uL&DNd=I2>G?D~^ga3=sDl`o9!FYm9z+hbi24uU*=m}JqFd7*e8UkreHn=p&zQ#yI z^`-bNP5u1oRMaFw|i+5ik*Gd~c5-#@9oA8Dw zy&DSZ=^wG=f14$gz5Y0EVWv83L9#oqvXGuqX}@-MonXg>y}P5bWK66utWeZwf}=1@ zQID5{NL(+x`ayK*g~LF(&}Vq&e#IO1QPIC|vfUGR5V+rVnNYK=E-VOFfX3dxbPX86 zhri$G7Fe+G4k&6S#D-#U$^6BW+-;ZJ?fTm#1aOWT(YkMMI32YJ}=C!QCSyt93bj6F?j(jF+d)RX9 z9KuQJcW{;n_CLRTP`~3F@%!0T8HY8Yb8lzP5D{5{{0{YAkg^dJlYF4dm33Qv^52W{ zlt`hk6qxlnCSvXj7^r;?V|>K-0&iQ&eu674h-8sc5>n8%wl>661wjiERD-VtB*wR4 z!~ixFctwy04VFcKj?<*UKM{n>NRg}kD%!+`v|1>4Z+_uAPv0IN*9MKRx6eif*SWWj zeZ$9ddHA&bzr+kK{o-P%*pDMR@fLZA5tYPN(#!KYrEdq=GS(AgkiXm+kClxc~-l8XmiI zw1J<6x*SKdZKAS16J?nx%MlxOi_!>%;gPiTo|$@IyPcg3{#SmG@S)`*Xrw9OcNK`A zS5Ws^z7~y3V9bNn0v$HMFX&y2=LmrUTO26Ic|gtuON7wp@6eA$M)ldJ<#4-0gRv;+ z85qo<{eTeozzVknnM0(R1*rc8KuwBF;i#ckvT6W04Unf`9D?8h0AL~IU1%);0?A>G zt12s70LULfnBWBlFb)uLE(6{cUQB*KBp__>T6u{uoEsQ_YAbrvk49Xfyz#QZE_LVvPl8v{iESH>tHKpqkj0wWg%_+#z@ z9~Q}Ve*J~qunYWurmI@1a8bTaemEhTheFB5$vw_D zZ<{qT0bOys(vcESa4YZ+kmV0RlQ5`*QL`O1-+&78_ICFnSO^TOUFo4^D+132l-+1} z2>%v(WFV#1g$Cq=H4u1k;G@P`04`P*49_~}^k)q7GISq$y2AI0jK!TGvjkrlqJ)mg z)fJ;n0nj{N2`B+HPRZxDx^I#;n4@7e5C{pNzi|yM`NBlnlOuV4+sgcea+qA^Kx-K^YuOr6B8^9@nR|lKZ)k!l!uLDsHX@ss1^9XuVLi$KfAU_3rNt> z*ckEhLEs0dnbrP2BY8d+uZ1KgCkB+i+EdN zu(SmOXn&vlG9fkI23D-(XLE>o&=352e1y%G#a|nIwEQ~U6m=c54{j!)CV(*sw z?U*6gAPl+55tNSk0QMdT18&0nO8|_rQKks;d~8in2E%CN-_HyA&IAB>prFlznyB;o zvm{s@)OS!R1xO$$YXS4A1^yw)UWgPKtF`d3_$=FSpoqfUg(?c>me7Fnge6a)8t4MC zf~mOyE;7b|N*sa(HU_j`;F5`kj0h=+;Bh0rO%TA2P)>{7njd~N`0p`;?A-Sc2#Q{j zt;b*iM?+q$3M?OUz<~wqGpHT%Aq}&Y%9BW$bwy1bz$p@fCI#+{-S3=+^LIe(_vDJ( zM#iU4P7{fPYiVU;dciM~BlWyqt()O1hDQ&cQ=_CCu1nLSoh+A8qn zwY&J?qkhlFk%Y`Q-3}w7g*!q9M@MV!jo}`+zX~S2A`S{oJ00Jx6g2tiE=+Hkd zY31`MzJ%L()<|%Tl0HD;LHz67$hhWbVb#y*MMRF1Z&2uYYN%}#bbW3j`(hu;zVih$ zS}|Pz?Vp2et=)ZLvM(<+vO+jz?%QEuyBr=*f4E^_z`GDzza9_&WjdgIEV;3yzdz4t z{wyRp1>htggJS|^?~m2b=nNqh4op_lGOB4Qo>Xk5J~|)d>qOr z(^GJs&|4ScHLvrX*ZKW%2G36W@oCi22p1t-Ee#I^i7e!sMd@jy>W;+=Dwnb_WlIXR!2~KB%M~l|Y#~46M6gg#ZXO z>id7d_l4r`3TRpp;u!>E6vs<9RgmFfJ~S8_Z3t0`f5|@-hKd(z8CFecFc3Ku^f{mR zX-uKj3@3gU&Na-mhyjWXh+gd%e_)Ef2cR@?OUZ&e56j*TtD^*FdslZ0u=kh z&)1KM_4E*~nuvZL@*L;~AfGRw!I3!z&_J3w64yF1(I&l*OnQ1qXAI8?G50~_>3-R^a$zBCm_lekD7T^SA@SL!PWGvvZ{Xk211eQQKby+0Bs&5ETE$~f!M(8f>7_gW zwxFcPDYlR#w^J+9t)A?$F}E*&tnWJkN`8S#Y60&FFLfFxOsUZ5*=>Rmy;0HyFSd@) zVjjhvc~+Gs&ujDtP2Myal>BTO&ykZUzp^%vWAGl{g-fnPf6a*QnJui?whs@-DeAcU z$x==y&mG<|DBPpJKufpSxcyNs9GXderb?Q)#kt?-t|c=z%VP!Uz<#}kmrZz5+l4P# zK6IZ`w{)AN$jrUgq-5$|N-`yDv}=M`V9?G1WRIe=GdIX_fyfh?m6b*IN9L>ZVlF41 zvm=3!W5>dlEenedFc~kiU7P6!o+$e#pQt0h zS=@2=%*7ONN{ay6xdm7`-Oz|qQB!LuxrsnfwwFQflahwf2q-T=uHXWJ5zh3#JO}cD zrsV!eEec#cz25+tf_)`-@9S+v8aDXzfkIDCivqn>5arYCGob^`3vWi6h+98bX+s;N zBdH>8X644C0P}qboh{IAYH7TXRDo8?8Iy4bLA1ew(J%QPFhpMrgVKlGbeV3^7`9bw*GOJ9w23YO~COUez7BYEQJ({T_TZTdtZe^SFYKdOkB$M9l6 zksVJ0OSv95tj5YXhf&`bH|hW(+*ns>gRWwBa6z%B`N%AnbB%&0Kq202a4COqX$N=C zY&{ucKB`soXG|_<7oCgGyTLn^4)2Y;jjFjADi84;2#d0qc$MWeaU(HKolhE;w45JV zs9*a|9nj!z@Mll+&){;TEYl4)!AlET)1+VOM{)Wp4>}DfaDi_HJzj9m`9>bYz!$9U zrMGV!c>Y=unSPtSdx3<;rp&FGp-i|ABccXcb$a=Z6s2$0j$Yv*jLOH~_HV{ss(_hoNvo zoMhN<^@IBc6oH^^WM*T-g)TYTY%&MmrfhsY{Ba;>htm|&g$a; z`l~CVNfgo}`*~<%ub+t>^L^zLcj?@%1FqJ_F483fO#j{9_o#~gg6EiKaEZ-8Zo3lIjtN)E87edg&{vFRO*+fO-b zCih1gxni(Gj;-oFm8#(qGALzhb;K9Et*+ugLgj2%cJZBZuLjcxmVx~rzecXqtX@*8 z{F_O!9-dph4Kb5i*qIu%9B3&2&*Zkj4Q&CNj3=qUVoXF`vCrQ#$ah^~MHM`G%+o53 zy8zC*3vvbo&p`DTqpq?jyPxw5DJ&*B;5_lXADFRtNp4|ACNOc+h-_`3X^Imwo%nK zwOXL%7+4>Mhv)7-kb{3UMrv{&-AqW(A50PgW@U`+=F@wieneZdz^<_fbp(874?t7i ze7l>>@9ghh>4%=k;tgUnJx4&2Q z_k7^6+;4>GNB{110rDc0F1T_k?tm;xPgkvQYo5@#6z5QXwUy*~K@>4}VPX+?{WXY z^y6^NK(FXwOw2M|*r=F+c@dVJZ80d=a$8-it#h!pg+++*1bjB^NGVq$iPp+=TC93P)OrQu`EEQwuDiN4Z)F1Ih@LH}O&_sNQZ8orKhiL3f64i^KJ3V^K*7ydFh zF6}_B3MgQd2TDpt&zq^h(1ia*_|HEUaXu$;wWC*zcy)1KQZ1Ny?|a|%U2MZevSqNC zp#BOZ`iqczAYD2XS>+&Whq;LE!Y^Q%An8dVt4sgh7 z0zO)%k8f#pi!h0j&kYqr`kOS{m?a_L;^^!qI$Z61l z2Q-Ti2WA+~mKDPRJYWpSq%OdU3wRe0g$%?OAPpY|ehg?;wMxz`W|$km2Cs=l|9cl4 zp>JLTd$y!YV!Rd5Du93M08C!Mo-xReFigBSM2j2%}3-SZP zJp-vZHb`F#ONcN$tN~8~E*t0qq00zd`Tusxw3GQU>h(1r&FLyv`psodFmXw^`MU|` z$ZoG6V{|w381OqvtT!(<_N}M3=vRtQO*<4Vq{KNYcbD`q_ZB2&W?w$eIcL~&@Af-c z&xiwGSoJZwM6{zulBxYaodZ3<+$FD{M`q6`EqA9M%9g=9OM#Tr0 z=mU!J&c%md2VJ}7RX()_cJu8C#gQyqy^ll63{%h+8LE6z`{k{)(2T zx2CNTd@t^ajJir>G-EP)cyEnk$v!3rv)XaN?1JQaXvyZ)Z5VsQzGaB0Xbm64K7EVk z;6}6f9vdu$2c{%=Lw7W9gcopJ>mI+Kv#^@I@EmtCbe4!_i!Do-n!l{jj=#vTM04IT z(rYzKgZ^Pm`1+{Z`nP9iKt{oB){qK?`3qF(!+sBtJI7$1Y6386>G3L~W#<;@SBW>m zye}|^#R>*?QqKMYW1T8AxNgI`K4>>$Eb3FcXF? ziv8r?tFJ+q{Ga6g9TWsXa(}?f2TZ&!G3b>U8Idj=+;SxpV3`Cf3AJ8 zQM>Ifs^;RtV`1C#(xdAlG{^ZZGoeTjG$h|0&d+u- z?x!HofHY%h@5;^8NIobT{tvziASCh3V5W(?q&ZE}1D~HJCwoBAsBL2M>CWrSF6h~^ z<&V25SZPVrl_0bCpq^RT=)imVgT~MHvp?rdy4|QV$?Kon@9sBW2#@1wj}AfB94uZ^X82Qv&w?6^gCwK zhfSktz3P%;qlbm>1~dBJ*$P>v zrKJ_Qd&6(W10VVhkGprFF7wKr78Df3_!)p9AuyaaSg;|kuC6Zi0u#=GeHoDWQETom zY|8LG-vJu7;H*>ABMYHd!GJo0-~3A+?@WXjj`+BHlQ=+4f;-<61hKzVni@Z^#+(L2 z%xaMgn+vi zqxaAJ-yeQMDwp?wy>b4Lyg&SvwSb&G-b@YKV#uZ=!uj(7lC0XboS1?)Zx)kOuad^@ zRxxko`g(eKfu@dBOCpRX?2ge?pqE!pCCs~xV1%1Q^pcoCZ}+kv5FZ^(IB zaQ3UHGaudwEedbM9o~+q-xUhONs~O>(AgVjTP|e?y`;rw_52`zluT#BZQOPP~cdgdYPSViU#;7*l;a61B8$H|B z*k>n~VmmaHVPjXPtZ^%P!oiX;CwsHLLsZ4R`0;OvMa75y{%t`sd%((Kynp}Rc`K=kI#Kz+BTR}7 z5;jEoHUS7Yq?hNAyeq@2Uu@QuJWlxGdcVuu#ps-lQ{liwt-CWG-F#5?(9qKU2C#$Z z(nFS|UbHd`OnV-pbQxemZY;D{O-xTmj=@&Ajt`*{`VGC_+t8k9wK!ik_W;0DM1Zs> zh3@Y9{^JKEgq7+1Gb1A%;5tZL`1KMNjDT1p!*A|x5P(h5_d^E_<#eAU$fW5!HMinMH{fjbB zx(vGl-i52pC!Rsq832L81t-)DaFRNVIlef?WTrc*f%8K{XAb^E4T_Z?W~`|MgdNX0 zk-cZD+fo1I|6?kvqF%J`DOh6pwD(< z820PuVZ&;jGW`5EpKWw$4YvrjMUR%R4(7%+PE)_*?q<1rQ&kl$)d4FhtmI%f2~v8D zDVmVT`=R&DlQgRA+R395^(tEbFyA085y!G}%KGkG6Zkaug3@kWT@POw``lkOb0oE? zP$kE#LRLNFc?LWnh!u^753gSYL`(ge$Zb|7C%_4g*a6mB|etO-xNq)l}5I?x6eo zj{D%_%8s|V7=Y*QLPsH0z+NM);1rG=mcHDG3y(e`t($oEu%TZO?Uu0h$PbBDfh+r` z=B^9i)jnlPR4Zx8n+{&5IxK5HyCK+Q4|Hd1xPW|;z9aNntT4DD7o}cvc`*`IwFMw%3_V)fe`{1<~S!E*d zWsE4ecK#I`1)9lFZMxW+?_`UT8q~$)*{qMbog7gW6m6upXNG57-hU4V;ipP z_guiIZFF`J9&ArrX^Iyvnl1C-A@cy8Mvd^-uIefSHr#ZZc6P1_HcDEecO&}X43@_| z;dQN!>wPsm=gi#8?^{o;)Id z`5gCLX5_6c9S0SwE6=~F{yTex%(p?bkHW-`Q&{{(i(j-B| z&;qpNTo095?U#*(9(U+Qnol-qzS|iyBk+g@oiDQ~vQ7k-v zo331|zD~0--oCpyy>eUmr6+tQPe769Qa$_c%0pT(8GQPYd0Rr9u%;G?){igdNm=|^ zaV4ePKGpIJ8L#@AdyeWEXzAz-FaCuPh;FCrH|C&XUJg0vEGm=Ot_ep=;U*?Npg$9; zjro;2XyJT?det6g!;RvK+%X|=kQ%AV0Sc-HO(7D*{ej4EDgJ>nYa0X@0_WdrnW$|D ztSz05?wkrJ0|OHeT;S~6Lq`X4^j+x3dVS*ScJ_3+6pwG1!bcnq zU+}7TA2GO~+kb_JXZDM=i;7h+6DJp9*0*l#yLzV|9R-}+3z3a~1g7K8)2p1ERu-3k zC&25)3BMfMf?)`=>K1Zr{J=*@-5NA-p+d?7rWGz|A*=esqcBm~kqZP>{Aj#@%(l?A zf{vQcl$Pm0^}_=C)~{I`DRY;|x2^farG%|JeFw}M;uTk=;oLp7A}hOGw}Z)?uhB9> z@QvoaLy*RVpfri)l=0G_n5a?9*X|BRvZr^7Z*`fcg*BIiO3U#F)NEmO52*i4h@ujH z7@_w5;e9H8-cMC)bdaHCby#^%)$XvV$R_J<%2({uR8MXe6t4K+((v^@zcW0PvG=*e6_4QCE1dn8cZi-^q&BXxJ4fWix;pXL zH%ekC z+#hbUr#E>y7PX0!w)iP!2nCKb#$_u%M8_>UvJX*ol;ZA9OyOyamEcy>u5ZZv#19|V zkcb76O$mS#!5H@unjxH5cX7h#F;^MiJ}=t2K#;sez0{Od3}$+^0cAJDjD6U^O;Mo6&JKl7)y@J=9DGaU08k7WSvy>4oH0fQg* zPn#1IFy`H;5o3p)OueFZ%^=TJJ%heWz!zU=OvVX4zmFSM{1HSCC^IaZbVezzS}JdL zv+7hfpZb%jDI^f=?dQ~4}O+>l_*zeg0z#VM&76E(sSRCqi^xe8M`UKlny ziu|_Dc7n%Ena5@+UmTp{!c6tG*eEGqg-E*S*32Br)64lye4Xz*O;7j6GeZPlX~BfcjV?XciQ$8qJ81;#JD&1nh)(b zxq@$~*KI+D5P}cWUpqggFjMv9ZivHt-U_Nmm`o>q`P80$`>KQj-$eoT52ej@2^gh0 z%gHW+v}2vwrP^KT#xh==(%!X%FJHW1TrV}m&Q&rdm6RQ+7KK{{Sz>D-L9Shj=JNlN zs3d&crpqrkCdOcHX?t0WJT6R2P z9lzz>sG}hkiM%RM$Lv97@5g1)oeCUn9v+^8!n(mve@jG{9}675JpIjZ{bW~81NJo6 zsGb5PY$or|eiR&vO!(C}?L*vd0Ry7LKYul7kE)bXY~fHB6cx>Ec9mfOfCVwR(0*0Z z5^`EeSiUrU@|OniFYr%R;anoR_HsdP+=4`b*%@@omKo)P?^|%~!6El( zClOh27ey$&j6e}|gB-|=vO197;%4h1P z9an5)58zN{bzYfIKOf)!sCGwBMOHY+HK;+Ex?WO=V@$vTc9rR(W26-gHQ#LLw9G}M*wchqEh5X&&x4vUsj)8Owa zGDk@i=7^*+j7t5dtbFLOxi2qi#7%FpLz*tlC~a-c!ZdlG%UCASn|RpbQz~Q3g?e6M zcL^r8g7dUyd?eCl=ZNJi#+T#2Sc!UnS^GglWXLBeoGzrQNNHb&vDtk3SMtXvfq%IZ zcs@}-na*cK>yK%QG@P*~9q-E0)6yWiuvmv{Ey0-B`RxcPIa%QFCm$y?_gy$R_NbrD z0PlEk-4427VLUf9Fwg>>!trn7P_3OD&!2vUHuoxAh4#?yo9&uMc6&+K1dee6mLYra zAf)R~Lt7g4-O#@FH^s#(U=5dr;);*+Pbcir{m1+AfT9zzJ8a`xJf%=dw_}U+3}Ddg0cG6G?$db!HL5w!%MES24W$C z=(4G%?80U*k4{PhR>5h?&5VbBq0(iPkeHqone`@jtxkuAD6GakgskrY+ZaK0Lqi4- zr(hxICBGE{UyZg!2yjkLvJ4X?j}%tlyD}~)jh_D6K28{CJ?~;dJGsE~+LD#v08@@7 z0IlqyO7SQ!@Ah1<@$%G!LgBQ_#sac= zbmschu~?0ekEjx?ZxDP-mcG@#S3VXiC~xhv-m*6H$iAIEc2uP^?zO#Z5Qec`dV<^h zq3&*GY^wFA70*R?Q3t}&VKU9#2{|BRCynR1-%icZ^EEk(U7EwVL8j>471_)l##MYj z^h$0u&YjUPe;1cMg|`OnSu&b>xW*hV7WFp3zgR&50{9nS0jOaIZD~4aYW#N?DUFPb z3Q9|Ne37r7RmBSMXka5^P9#9AXJBWDXuN&`SsI)j8ow3SJ4s1J^HuBXj)Lmbb`iC} zML->|uGzNJqw(edir`^dD?Ulcm|UvEX)b4alOe1=l%Pv5PfsqRy)Vzq0`AD(y9>t- z{chw1GwET$vo}uMuQvW1QZM0=lFDR>6Agp&!4LKC$-Ns&bRIZRX{I-PZptewO904j zJZfJeLTXYH8GMyC`s~dwlRZLNp#Xf5pgQ_NxAmR3uK=u>7la**$U=rvNCCS~57Q|C zhgl1^>!44j+*z>*=&%G=#8Rc zzGcOv=*m+aVWU&<0@rL+O`~QE)Xo$6_?kGz4_ubVV7u8SG2BqGNGXb>^3IsvM%9Y%*^DKID{&T}*1x z8c2yR&Hq(WmHGJBaEq-W`=ky=oZ9;jG-0h6Dlb^Owy3fklIabHl(pHCd?aG)M$gHd zQK=EQNvM2h?It#lkz-haxdA>2i9SyJ`F(OkJ45ng@Q^RoZIrb05^IPqM*O|1$_On# zQTXIL6~_&0+?gP7g2)ps??jA9WWj7Y-j={4rk1{YS1dL*7M0sz%t$IMEaWlrDw=eF z-|nXKB8CIl0Cd(_#rr=gg@lCQU%Rr{1eb)1i%SD(Y5U>=gsfQyx=;C@ZG@M1Vesd@ zZsPixr+TIh&;V#*_g$FW{F~kYJ`udysN)Ic>YX&8kfP8CgrB;};NgC#j{ehoU^)NB zt8H9iI6cIV&OvleWZ!`yAZQn1PbaPVFb5A$=gt9(3K>+oIub; zKM4%wmpyAaI5}m(+3m34* z?rs5*xDWVlGvDr^UCT)9MejG$3V%-@YLUXNa7#nuEHo+5Phw^kS7JQQj*Q9ft+*#! zZXq$vx2y*x9?<*Vj<-sJ8eJaINz>BP@4^)o31vJqxy#soBMK2Dd=DZ)Jds2DjuuxJy%u}~TBsbP6 zd~c1e3_3;c*ye@@gWWn;<))4)zKt9 zIhkYbb-uXBwueE^JwMF}V+n@2xSlApWZG`L{nzL=V-2|dI?t%~ute6K zDr{-p)J1}iJ|ilCtl*BQ4Srq8qSLR$j3rhPkw=9$FUA9PX)`OaioH8Kg-X|+nnzbv z`m>yfH%0AMdv$y^TRo~+OR8I()>3-P~93hs{e&>9rTf+u?$)U zgyzi}eqICIHRVhV9MLhR5-9=}zncf0g zZ)5)*{575}nE38vhW7g^UNhS@AoDgXv%n-FA(<`lgWvUl*!Tky(H{tw3>v|)QuFxf zEwsS{H4rm=#tehUcn`C}&oW=T77zOu!1U7CCzSn+^%3ek3iONqlNA99L&wZaLraTP zI6Zb|#^e`>Lo_6|-d`6X&wur*)BZj3h3Dijs}0+rrSFhopk%0~>dBLeGgoUup|PI< z@{K3ZchH0(2?!9}#WOeG^4-^S)u&rB$Pk!1!|m!beoU^DsbIQti;%-Y|!bom!9 zq$HFM`RIpqtD7!FgVuo_^}cW4Vyw;yG=9x3vYIp z6q3BP&WOj_YC0P$m(&K*xS4KfJJtRWk#0>~=z8|8(=Rk_=qkgF8<%WZC%jozqU|ZW z2c;aMFG%>n;;QitXbCX>2q9*~4V%!#H*41jpZj%0ljz8Lhq_KX{+6o7`F$VyvEuNj z6in-WoiLEU3(dUW)AN73)J0|^ayXcymHzigr5Pt$QIs9%MjS9>*IL~8?sl}87UvI- zbf90;hyMA$5AW=5;hvBP9+nV3$R2%0{c`xKI6n75@CQ81TL;^L0#q-_DYwJ(6%)>< zHC`OvZhtS5JDtYs@@PA7SdfRO$@Fy0{cZTy?}DlwrQe5)UdpptSgl!n)P`lyBQB+k zw2T)!W)HUBCSg$GQeLFwIyF;>^1T#H9{$kgJoyf#Lf2w~3Oq`ca`}@lHf57GtSW8G z9toM5^1f9MGU#r-_b8@S_$I+LElAR)s5cYKz_%`JdATk*hT%TTwxiu0Ss_o9#c9~Krq%4u4Z(P^CM6IW}O*RqjRx#~NwW8Z2}JR$UiqDKSU zC8w!*t|Qs(k^UA_!3rs#ptI-nm5udLUaN@Y(w8Y!LJYIN+5xPF0NkQG4L@oYqi8iL z6uy)Y!C8gsjQ~gDJZc3VB_LFPx@#R_^>iO7{~4M6j;I&m!4B-puLi5N%oiM+jTg^k z{!(``HYTr|>zX|{OE6XEH@}e@F%@$Vr6^s=?4=fXCVwiz)vYTbIQ2dG4yNEz2yK+g z#OooGL`5$>nkJfSf5k5(>f`DpE%$7b0r(~fGx{paedtVIjs0VgY%}89ADCX0`nO6&b)#=_F;pfS<7c#*C6E*yPH62y*qItM;)J_JY`q}&sGj?OPZ}Qh z06u}1IpZ1&N{mQEW?*)11v0@+Nb^uYP!OfFgU$#}xC}gbMM4~L;gbDhk#}15gJQ+S z^umv8cfw!DV}{5bNQFkp@X};jE38EjPNwnF!McIDv4|33>yL{y{%%;*?D?*WxIyGR z+@#Lh4#le3wVbhIM%V5Kh#0lpe>7U7kHu<3F-f*%`z3DjVDN_X%_0vML0Npt+9!u} zstpFYXM@;ia#@}vF)!|uQGRFQ>3e=Y{RvE$91AOYl`jno4hqI9vkonO{qdXZuS**H zJ8qCWf*M>jQzJv;GHh}iU6eE6gtlx%Y+-Cei>U<;v?Si!2|If`br1K)?kKL?B;=~H z(u%^KSf8k{84(ZJuor_(nA8|`_~exI7q+HF#pu{`mGBKKSu+bQQuDH8Sr!j1w6m^6 zq>Y4qdz-8GgZ1GN##?Rj3NBL6~6@yg&L+BL9#jZf)n?|DAKJ zRy$^z6(XGA7AU#;Atj6je+G=n(UYF)mIk|Ycwo*7+W|CdejGo)eL6`pdob0- zJj{|<@S~~mcCuL-w?alVD{moc2BP=#lL&c+4!rMMH+iuNjL6a>9WYNUGbAdAqdaR zb?3^g^mP>!D&I`#(nc3r>FC)gY@0-k8&5?9H45$3N_Sf3FYxPlB!tOcqaEekE}#vm zS^4xV!alqCHWY?>dM@Eo`7>eP{$M>YR7|UOT6aA|6u zjqv|Ifv`SllYJtb^d9KSQC|4@68y+aYX?kGmYP~xGXE1yfYl8~gHzuK@W{P(($fPM zn&K5!J7~sy$uhCCdQ&VIyj^C3-B>$T*Kqm>FpXK#8VP5|9^P?MG)U*o(+u->qhY6J zkFvb76QIn#SwT)L3Kf$>v3i&J1{0Y(`JMz{TZ#k6b4I4@RF}5ku;c5|%vUD92(LD( zYio%()NuBay(`Ko^sGPSQcilzZCrBv#YR~rky>)1bD{OoQselBg%gZ4^JgORQ_epf zBh@M_yL@p1Q@N8B*L3)i1F4|8+%PKhNqNGuY4vr?)1znemo&pB^4KYINhj5U{(&x zyj-9(;7zX}{OG+f^A&YbdZwVXm)?Ol+kX9PjL44cfDBj-?brH>dLla^rxJ*&iBIpX z8S;$nUs8esy)fw_#ChRx7!y2)0DoR33AI3B9wiH-?Dq3Sk?8QlAQI@i85*S7NcbZ;*{V#X5A^Px)cy(pDyt z+pC^Bs8RXoY>;Y;h~U79a9HtKSxo$FeTm|Im5B6N`7qb_`uIjIXY*%R4x}t#@nz_cgQE9H~e^hSL zDr;|$e)xNz_~uRBoh@I{J$cbhpfi$6EkmdI%aH0dg%n#e|3zF_9XJ2~o^^F;QJ89| z+knav46Br;T7^2{`e?h!}I2mQZiJ>U@$^;poC>i_0xq*=h9I^>|Gu<8Kj5qa5YO}a1lXZJ?VSTxB zCuc*qF)Py^e#1;7r@Et^Al1!QQACVY7cHkxaB<8co0%(>Dg0p_o*}(#OiBjhdN^gP zjuw;rpE>sqQYBXgPFrhhh!p;XFGtNk zvU%URHX-gYAD$fI7B5s1Yq!8P=#W8@K5Dcbcu{=vNnd_a@`~N=0xQIo$8_K9emaO; z9j_X1F%$^G*|+MAb$e=+M!R7*Lz&Mur^KyvUD4PY7Z+bcfb#Y8XzXkJv>}&RS>Fzp z6fSjs;4)&)duY|2+3|-@kRQ^~@F8*DY?CBFHtaXS4H{Y(p~R&-UphD_?DExn;31 z%`jeJX3^5MFPm?D8I!SfmXVVWQW#0m+oat#cyZvH80D2arrp(sj^AwJf)}y{`do?? zm0nRT_f-f|dM(<~D0P@`2(e)E4L$AcE<*X1r7oo>9F)_$6zc6deYhIbVtq^P*Kx~l3e9woQ8-31K?t2<*A5On= z00-Nl-6RTPkP2dTgL4e-$fS`gyZ`rSTJ8DQuI~z0^hK44h6XZ@@S`~Wuh6CZ0O?Q9+$I;%j_o>QxL~;}C&WzrVmLrE-Wp>XEn4y&Z+qQrx$mCJ64lqCDwot;CIu z0?@2OMJcuQH<0ZAEFF2%Ek~1|;5Bm4lcs@=sfl^~*f#my>)$PkZ1~VqE4OKFzEy>7 z&iGer@A}ia7EgtvmDk*&h)(KrI1U4j>$&w5IL!ujt<$%PIwDK zPDt&1xP?F`|Hjb{=Kgd&t`SN<1G^6Ed=jVc08AH>=!NyJBc_b&sYuWU!Xt1is9J~W z{VcRo#o3Fde9COZHc&}|Ukt1dES|Dv$XWR0VXLoQxS-&Bla)K6sWGjEDetg;8-{+p z;wfb&?w1FpOLt?2+S8EExPhOA@u8mu8W7*qc3~YoRhp8GT~jB&4!Pko{}#)=mllKR zGs6o_1s#_kXlgY7@wyOyPCk6vEkg0ML*%_dKB*4IpER5E!lP)89BFvOS14!@&9~)g zbi!qslLz<=HEg@-Lld^Xc-k!UY|FUbZi8wj=YeE>Gp1xd#eEVqIx?8}?pJ?jYyAmj%v-Ya{(xa0}8^c8C*2*f$+w2)*_Z4ZF=Gi+T za=LG}r<|wuj-2YwpX>I0m=j~NI?Y!bse)O0c(%`F59Nma7s?-BmvQ;kaxGSBL$<&y z5<9;_=0v6;ufMZ9f4rGhvA^^Gj{d;Eqfbc;P$bZ~cruTj#5dj!wmua2ei8v?k@4}R z>VY&x-Q>Kt?xIZ&rF-gVDE`t&tVn3S+auG6L701Se!XzlyqsFe(>pnX1_b$fsYynwGQ(9EH0Z;SU9D(XmHq|byEI@rdpPg znAI1K8^y*INoky&3lU9)f)^s*4LOA08>|{YacOGeq2BBg7JmRAe_>(oyvs_b)fpDk zu!PGB=dYYPf1pgbn0y#zs$tu6bPw!Zr2liX_kN8dG#4cv^^|EZ5dBDmW zC_jzLO&An@<|nUSg+pV7bx5GC?6;Q&jJ6l0q(yU$ol$(~M>A-;=}2M4%{T=HDJ71) z{Icz(wa*mk_f7Mr8?i{%$UnTZNyhZyGoH;*uf~7&$CO{kCN(r9)6~O^Trtv}V^aYU zHt>L-M`bB0t;Nay?+xhUx~FtYe>bts0>8ck>&OiB6}=ADMU75o)uPJ#_U*&#+P$~m zWz8*MGiQRGuOzehZGF)5ZNtlwyv10RCyw8Wr(|6sPj2p9pfHfF*y-Fu(@d@1EV<_N zTEDmL4n6zBRq_vQTEC>>O-WOBaaKE?PVk9-9@A+f?&HySM)Q4oLbqhuxHs5r?WU(a z^H7s_tJH(FJwokvX)gCt*7jVz_`<2ftjtuMsdHh+Ij8`GC#-J2Ioh8e5;D0z8szYn zhG5#U_rCL|C6C^ETTbDu>~r}-{{Off#El?b0)!qH%_u}t{_xiOD zWFmL~9TXN84u1ds#DWbfbWt%eHBHUiPxy&42EKF$hirz$d|^N|4Sqd(?u<2cBS{-I zvoEQhQ#E|zDZOn|o#U&dC--^URbGU%@DADPh1R~6DtSO*(|o&jNJfw3vxbn=K}I18I;ClL<*_j~Xb`Ug7Fk<$;1U?+I9qpQoaQg7D3 zgHy#veaTP{=EWu2-jFi{zAQnZFEv| zK}!I`y&F_b?l-P-8t5=3y`@#3XZl>wely@op6-GtMkIFW_)xnSL0 zY@< z-d`K=6l8sSOkEX>zpgcl+jE#9UN z6TSspu}EmX9BgV^m2=p2w50X$`uuM%t8!I`JyM$eGWI`YH;;|j6twp#%fal7gry0& zXlvgaF6ERoNj9<%jKjX&=)Ni{R?U|x!7L@zd`mAk*HU|bqe;f*UMI{i5pj zz0zY0o?q2@?C3)_{J#A_%1er?<*Yc~-&<>G`e%QWaRp{hEJemwgD*lNuKur(vwvda z)S}yKT}~^Dz$eG8?bVh-G_Q@b)7F`t;Df7{X(m*CdzPt($Z zh(QF}G@=Qj5Jrm9-(UHq&P63WN|0zE{GDGwU=Z?)?+3hopYiYc)_wf%k37pOWTkt4 z;dnJv8{neJ+ze9NA;0Qb$@kLLl>>mS`9Y@bAtu=6zcIEYk061_Z?xv1r-4>>BCo?H z)_p9MC>M`Owh+n}qTF)0Vu(WrZ5<@Q1h$1D=(9*Q1fouF^d~bO{8)q{sUER8?W5Tc z@z}u*L-9|cEjjvZ$K))2vp)QW2A~N^o){tXNK$;QP;m%8m5J3i6tHzr832!>PszY;}8@IR7T1YMNcg$w?m^zhsC1I#fc*yOZG z;$*G|xRG}JX2p8qzc^cU;GWre87)8k!gx_VSuak0D-*sENJ1wv)mw^sNi z+A#g{+i2n`2EUkvh!RMZQTw30$in|fR35LfVtB6LJJEm;CFiV=m4IuB)^e#4a1(dW@)iiZp zSKvZg8c*f-%D*;V{f{3!;O{jvmDw|Vr(<1G>Xw81o+#V2CbHMXl&bI64_pkFY={mE zvwQeKf;9jmw8^2$s~@QA6Cbem9B%qp+jGYvz5?d|lYoQCox@jCpqo+PQ+g=5Fk#r~ z&oZmlYZJZJvPaQb{M_sM$2=mwRxWs_m4lePJtmS@%pU07R^G*^(axusWG zEyBO~g)-9oK4cWrmkmtMjShUf#B$7Cl(Ydv!$`C|BtY|@WPrfc3F<`gOB&^D1glWs z=&)d`+41f<|)T|S*E<52aOez*IFQm zsd9K0$$4eStC13zs#nCimy1NdfpP-zo621|kJ6scIK68chE7IS^o`_b#ec4bGiac? zTeqUd?{2%OGOz>isf7DOGkVi9qtFjG&FbmWK0(1DXjLY^*keDd$&ZpHn@tq~UMIF~ z-#&{+f~4z{N@MrUBH4OoVC+svNQg>&2T1(a_*C&AA?9W>>&?oNcoo*J7eEn%RFot7 zbOD=K4^lyrvVT93%@gK+;EvRt)!*@}Ws@$h+L51!2f(NGLpf3r!pF zHY^qvws>?LivGgrJP^fj@S|{DhQlNGTiqatETwJb%PiiHJ=|>{@rKpr#!379!s$4l zlix(nZ8g5chu~aj@kEuMqN6SZ$+(ISw@AHkz~Au1$eb3#grx&P{9djt`) zAN~1mEit3%!lpyyO`F(6Q~q_;-c;S!r;gdYo!4^GGeZ^(QI||$%jUGB8crG!0}-kM z9A2Fs7LzDKAcF36E95Xi^MW ztccPq4o^qVL^Mow@;r>Yu1k%(c#S0b6aC3g2&o+*rmB>m60E}x!y|pZOhqBDIS-kh zO%^h{NfxJ*cY_=f?9C1a#PGaA6452Yc$kx+zVr58gCi$-=g9Ym1t~AbCCU0$vSeUFZ+pz^g^XVF)j%=(Dqf zpV*=P$Z%LpOiWfizMId`G(IFDlFG_NVyN?SfG|vPoP4M&N{~vyn30yY50kB;vNB?Q&kRm2 zc-JS@oJm^Sm~%WddfI2I&AiFl4SMXaatw)VrS z_nAzum5q9>kXt(vJ(iSQ0-heBg5EXsk|SJ15k}rPBFF~**?3NK4`a-+g534~6)>%G zX13O1`)eD<6{=Q zz;*QZU}PcI#L@a!+`y!QmAQo-^TZiVaR<8g0={rSBJ!3Kf)PCSIGx1!M~Hr4i(gYy zGRGxJh$-2CQYOSmDK>~Jy{7j`u+ZrMANxYxPW)b^i3Fesb>v&+Ix&lLva)N@u^#g* zjVIbJ{8${-)OE8MlySI^V_o84`NjjrNM|FfmSVmB{-z!8@?4q}(8=2b5eK@>*3?_A z1JlA-%i_}G;fkM#11lHzbmQkGz6~MEWpYI+klc2Xhp{hIyt@~oB(jP_MrM#Oy)fWEtxKCag7irG3qX6?@hd| ziKof4G%l-0ny2l-f>ESqY&NH+=J~U%#n1X(3hg8~Y8+aHOytAui}h&xJ=7wKYg>=< z@~Xc3{L$N5V25MmmpUzPcZ;U*D?t{1;w4|IgmcZGc4!AQ?T$GJ@ILCudYTV4R!Kgc zk0BK6aH&sijDxN5LtP!OxcGKhlJxZS-t1A-(%OdEszWzpJWnC&j%d zo)}YR2j9=tzdfr@AyC}&*v%U=%M{VtNzF|aLD=LOKzn_lDs-S{^{;FGqwl*FLVA&J zHVE4l$oABh`{|dMR&T}AGBfOK()0=&O8Hnz-wjv3VA#A>srV@ zKaww3+}VR+q#jWpAVWZZ=2kdpY8Ijm-76$yn&0IzUQF|~8kf@tb*R%ueDl$tC()K1 zqv5clAeWE)Yw;)^^;+ua=`W=tN_(>jdJu(Y&1 z+CLg5OOKQsveBHV=i!upB)n1o3|EE?)f$1mc}#*vM7QVi&EnWc5hka~sRa@Z25br4 z^&#BKfbDyhtZ+`Qst_630kI^lGVLd9s zV{f2L7*W{sIgu)2Z=y|aNfaF`t+3OhEc`uv`BC2$mrcqF06v1^c;4S>G^tLN%sM0H zbWBEqLt0e>-KR@&D4pX^J)u)FR)`B=kE(5czSP{81?(<(QSTE&mk^9^|J=2^{<&*` z-kcLIPmniq&f%L#Vh0!1*tt%M{qxNDHYMt6F1t{QR^jt5ovflBw>u`QCSBCepFfH6 z0ev^(H^Lu3-r+j6QS$3^HygKx`IAo1bA08anJWzxVh)_T%KG^JIhAhh=vod=xs~&k zck3=opSMvE|9W1G1%aJ@SG;aNz3U>36|OBAdbMcVNmPJCT2X>!M#-P6LBYYlNgISh z4*AXX%1<;9Ic4Jby?E^2NhdH`;GGkCW2k=paGP+=U$>%Ljnt?xDlW8}T|7&Ody zZxqt+eshf|p=hst(62jty!Yl>b93`v5CBAutGA;V&sOQmq%Aqxhyf*S7ShtALLH2@ z@WP2pWbN=zGYRMxgnj>R>fqoYF;la<5Hec#$rhBdC`!pt-BG$pv+X%~c`ilun7GPO z)`;T=$m`(HkUc!Oc1X=~VL3ly?bG$IDMRT4>;UQbCy2GevX*f;2caA?!LI^*5?miy zFX{EW_KL*!henbU2V9nkiVjWgV+8%66pMqj!eHEs{?81u7`?{}aSk8%k_!h!K zCsjF;{QHn5hscp{CKF$vsfT?SQL|K(t`!P}9UJuC3hB@9)y3_EzO>V&!}_1B8F)q4 z4{RU0c9Yl1^>Bb|ljwrKeQ@n2^tydDXZ7XlllO(&*st4~nVBuL zy}J(R0k%choZX_v$s-66EHv-v!A<}FRFAiKCLI)(i@DF6h^73!oC*<5g+9hXWxlCR>3CJe9O7;4HAF=LkAh zbSlzL0ymD^oC^MRK>Nm*q&4r;*Dukrx4NXLX6r6MGn$jbd7$jQu#c+vUrBHGy?*Vl zE$*_*FPxHJ);1PosH-|x{o(vz-O8f}lf4|Km}_KTdTn$M6{sb<8^gnI^-LwBvSR^Y zzmw#L`g%+|FLnHT>lH4jcqlFCkEptqSkds(I-S`R5Vm;jqtf(w2{luX+nnoY-|A7F zJHI(1{ZE-z;JMFy@;}@X4|*Nv@GNK!DTXs9C}_ij16QDbk7>!(|cuggX-b2#kS(eXp#eqHUeJ6o7u7+?C1 z5$4CV3v5QB#BPcz*wozo3PwG^fj@uWAgT6bgqn5+Zcw^BvylS5#2(lukT+5tbpRPl zety(o1dF%Mv9qyB00Z9(yE)1?o|Qw7hu(@HJzeLr*NQXr?Qj7DB8DFXt;w5r1aFg? zdedlBy@JAl4v`L+9JJ~sr#hV8Bj}^w%k{eqtV*ur(LmnjCU&uqswxHej;jgE1fAG> zynjO<$OA$il;q)kwr}UR z!I)KUlYhv#hJon&@DmXGC>EX$;7^&HyD^1F!vLKkc{JmyPFh=%&0G|m+4abYm)6tc zJeg@Id*}Ec^T+OEd*LD@{~#{&RI#Cy!RpGwo)ahH@kNnnM*Lviqwd5O_0A>o-A~jE z3eGFrvwZ&|ljk3*-6m0p^*%QjC&H13HVr8AytZ}_yavbw>%$N?rpnaz*ARsT_lbfY z)9eoGLpK)fU|4x<7jF?w0@Q9Ee%H}BY=^AB$B{MBBQzkgy(E1Z{~eoSEFC^YGJ zd0v@idn;4dg4GSGq@#f(xyMO;adGj#q zXlb>GO|xfXxhc|H*)jg(A>mAVFD@oFgJFv`{nt>Px11;=PK-$_UX>+6jkQ#(=uMr)@JvQG#uwQzxSX#`mnIWGv3qNnVEYX z3W6gyd7PMaqU($zOj3S+IS1 z6l!8Dd2Wo!!0TGmc|6+8bixpeR=N74?z)34elA3sP>nREB!U;L+8xHH63bHR1yd8* zJOP+t8s)|w!M6+TBI0t$--wGQR!ca@jPg5(cc1(=QxSRl&o&b7ABY8lLvxky&ivx7 zeIU(mev#9`NBV4nXjf&}NpYtZ_0R(zBH9;CyS^Uv7PP*St3q3~r+qPdmv`bBjmN&cjc3zV&VLn9@6}+LxEcSzEWuwl#gEC~a8L91%Ap)1*N*V#KhAyH3a!aW zP3ys7Gq{_uulV#`g%s<&J0g-&4*dZJezb_!m}B&Ir}b<9eXn1+?T{SXF16rihkKj! zB$VD;+>uG#ET^xdpi#4v(q)&H4IQ)mE-9%P@G8WKn~`CP*M?koEX}h0augsoo}Cbw zH&~JzKM>IkW&}o#{Uiv7#OC9BHN`+nQh;MIt9oc*LYNLNrrNaGFT==C+vmbnHs6nw z`#qCa9zMFN5jIrw#BR{N<$xmdJf%#cRzzoL4%>Zm13e{k1XWKEYYn}>4?gTI zfpW+>ZR<0;KaWJwBV?2(r+5gsvu5{Q9%IFwAf-4QYIczo1LO@pwrzg+bNuFaHNQ@TyiLf> zy^pSfaC0PmiXx+EEKQ|u6KUgsHPW`SVFAh@#=`-!VF6`-G&ki0@P@yLZeXJKFPyGo zC*f#}5Bz0zJ%-`TO^@C5C5p&2i(eU-f@+8kK9Y>Mo0Nf5@3AUkkK8Avh)Q6S#MMX& z5Bh*J$(i{)N@T$+I?QR@*ZJa4f1E)uN+O`oTfQrzJM6YY$@`yl1}Yw1iK~ ztY-67wO-$=K%#XS#AZ{Ch5F8VI_}`7e5r+&G3q?D=nN$rO~JM?ECtg zb*=+GL*=epwGB(6i+(RLSuuj;i;X1;p~Uk6nB288bd}W97@?qmFz)3(4s2+ThH|8e zW-ElrghFMK^QaK+{~@c~m{)V}yrw3goIuyH7Wtq=_Lkcv(WeC%&+cyH`?c1*ZqU72 zSl^Dfd>Jxvg zeuRcr@>#*9^h$2Mh&w{A6t3T#+Z7WN6AEwv=1<36#Fr@2ho?bznE*!7v*~$s%^XfV zGK;?5=82p48Bd-HKXZRljEy0tHO`jbxFHrcHVT@}LU732mD7@|M~XSR)mZ}SoVn-; z@Wt6(<^08qH6X2uJxcuPIoUqDP1zpZan8Ri<~+X~qBRlRqjBMaBZ-iJO_@hd zo1HJ(s;{ROls)U=aSZV1nR1F=tW&BWMz0?Q11V_ppsnc8@Vdn^dEG#sJ6!qk7Hbyv z(vnsDR>jtDmQh(8K~7Yzz8rVI@pI%^q{`UOE_?LBBodeeqv0uVpX(~#ixDnM9jY8W z@pQbryt+WRV)0=>?dWnlV;t-&FT`e&aE{25X&vBQg-H*8)RjP5QW*UFS zQ}fx@v48mcj{*_NNfG7H1Xbjue3Nu~J-l6x{r!5GXY@*z@3x58MSqZ}Tchzuzu}Bp z*%688yC*i?i!(C)^vf6domc|{BtTV95A>WRnmDMDBBG;lkI#SY?2OH- zN6p_6|NX_dd-C?`nBY3oamd0_9{O$UYIqs!p|q~q^1Z6T6DFDGf=1$dj?q5XGBi&- zJH3;B%t+aVvaX>amm$LXn6hAKT+p9HhEFjx?7u1s3ojG)3!37Cm}4SfkZ6H001L zo}=+`TY^X79JY&6F;j|QIb@2hA)3%8=T*#Hto-6v`PjBrp!vuv$IO>Q8dG=B>i)!) zbS|=*{FY0CU`)}D5ilFft5mmn9&#d}tgYS)Ia1|`V4cQE4^UbM@zRm_A)BJnB)6uo zFi*=O;s)J!hlWP)|JbkpNH=N)^RrMTpzZpE$?#2kQs6_b$I;Q>3@A3Hw_YWAs(73K z8|~Ng_fUI7L)M2wi5;Z}I*NaF49vSTvF)o4ct37cgR6haIrs4SuH)`|sz?+Kl&iq# zFyh9+Hff=Lx`!X{%)~v?@RLkb4*SY2oYCX!i;(6)*3W2Ynhe&2OV!_~=!I_h!)PkZsV?>|CsAwDd%+IHbJk_j zd#}~!MfUW`B24$q#?a7qW7C#{4ud!Y{wDE^}mrxu3 zn{SvxiY6#h(%3#*XTE&-JSXQK##}%EO6Sh?Jn8rJuVEjtBZlU?kt&jbXdhmq$01R8 zLd6r=$6vk}*Y()!XLHi(RrI#4^DlK*emyCv{c<*K(4gk$wyzI(gZ@yIh24F|LsOE} zEEl3{6?fyfB-6anJk~=6p;J@U(5QsKB)_QE>iTz}xyGQ?$^5%Kwfgq}h7cpKC?19h z1~;o>rl#69k=<^_NAv0|8`zBIM!t~c5^%;tz|Ghh+x6x6p*L?Mf1}+ZO+KmgXJaME z{0(_7nXH1I5Dg4b3LJzSiNtIar1WUcsR*+eV=OnoiNqvHYPKB`9T)!C|9Wn}PwZV! zBy}#bkL~=;qq`eg4;274`G;C#L?fs*$=E=1bG=;hc>HGlr!wYef8Ldk%rP?(>aDMT zWmwtmbBxv89d&l#7^@iMP&;<*-*5SJsZGO=p7SGmOHy4_kdOwVRunVaTE;`5g0FNB zR6Tpue)pYLo#_d{|JbViyr?J|$pf#!N3FH~A_a8*&o*mqT_hVO9*_=^-F)mGWD@Ds zOKiX>3FuMH>B<2-m{Ux9y5x!ETn610m*XH`&a$!D?}*|i=9w*>!z!~b8&lqeE(DWF5{UQ2UgLV+5-E7 zr#ROQKi+uZGObxWo&S!jCTFKcyh|*`sOBBmDo&hdbqoFY?u@8k>F$*63%3$%y<67b z%9V(|5cXA9_RfjhZ+@Ovt1FXG>N+ZYMVWf)a@hR#n)nT@)jrpK8Rqh3F$F>U1~m>^ zmI~};AaQ??JM9f)R~k_$ z{;Xd-k`j<}%kjWPnG9Dhp{UA$GI}faxy`il>Cq;qMC$tLm#(B=*}2ny^5zqMkkxA- zg~ap^vb5_spEP^W6EbNCyZzKBcrw##nc+9!3!G`p3=(rf1@m-v*#Vn6mC zw~W|W$^PyXcX+|OZl>d_%Z?N-UCyn(xDe;Ry_i+jgUduv*PDNnrKtbe4^h=};6Sp{BwK7B6W zrRD2(#(t$dQJ3Ol-A7$Wu;tMAzi=9bnLI)qhf(m36U-Z&e@kZQ>zD4m)I)_KI} zUGVVmc$0Y=blAUWTEo|Rm;t3k_x;%K%d3>7J`ME25%-EL&QL#hEVohAWpX1hf1o5} z!I=KLn{oSm3eq-wJmYkx;qG7lmS+!h6ug$AD?C3odRB6s{gA!Jsq6G@l?@$XXU#J{ zaqI28nfA@xMkP5xpYGBn72OQcg9rc0ss4gpsSA$yM$ze|6V;P8GMbGI^Nc(n=y^&m z(8(r0fA9Z!xMDyRp-;vNA&T4w&T)%xcZt|WLpx$>DXNzIFhMoQLV++yBF+tL=S6qc zXWdbE?c>_`=r~(jwwaG(!B9=U?Ge{I310XVA%sR)AE=jv?Cc2i-PP=qzn;t90P0AZ zYUBnYPy{rg6xhaq1FpqgNc{nl%W}|oJ$l2o$zR7;k4=2k;~(9z>1o%x{OC0io04u- zQ9mi!!X-6opx!5TPGE z_6AB6>+&!A6b3w|O)IOk`Q!>4F-SD&JxoA6RlHTf7WmyRtR#p9ay_s3w z0IkSB*E*26A%N#0r)Hhx!Qc@#NPj43A@AD!cgQUMp=-14X7Tn2p(d(3OkPB*fZj)E zt_@t1KD0R=MW5?0M=6^i{ETS-$i7>B;kMD6>H>LJjWQj*{slY1Q}7cg=t1oA2(6NN z-+dR!&ooN!{&cT`G3GyTU>KlCKY;0PgV7KqDDyB%YifeS@ewj>MH#0*GRS~IouCCr zfTW23eyF!g;G=h(ANmA>2%c>m1n$6zq%xK>yHMsr9dG^HDG$hV``0$$| z*^z6<=O4ZmI}^LiZWz8^`;;}U|LVmH1yG*=xjZF0_M#uqKLbtOKb8li{;Ui@0_~5p< z#x#CoN9`h93jtnns3cH2NVy?W!maz$ryhckk0Ed(kpmDk3>JhJpG#YdUB<49xus|G zPRO28-KnS?Y0tVZb(GQPwRi!Gh;n(E77wfF^(_WZXFsp>zi;>;up|+|BLbl+_H(1R zm%p*0RDS@dzAuJH7UjkQXhHz>k4;5TttnF}had|+76PK6+P&@Q`2ybC?Q9!zKWng* zp~Zj%)vdWn6lnMYF;ef`-n((*Mi5#=L^bW^m+{0^Y_MnbulVIm@f{V7S1Jt!kVd@p z(yxve$w(;MJF=p+gf734D6xWenQYBcZtQ>3?RNuRGIV}iN@`KB1;p;R&3-#BWUTWX z;1(wPHP``G;A;}4>p%-N3lGD3J47=(ORZ;KPelSWNTMWKqhJ$1*J?DD<#i2qzDYS6Xsn}RCt^J`u;g1ov+TN0x<|P8?unB+>TAC6 z@oN`zcrfuleb{o(q)98eQNw~(#YL|y@dcGmtgq#4jmxD*mQx`tey!uuny&9BAD>qH zdS8M=tL=5;ku93JGvAn>Q{*pkcm7CaWJuVcEssoOoj|1iK0f7-V5a;J z+Ud`z%tw!yQk3RJ{An;t-~8zr-y*$W_>};bIPo{5hZQJ}XoPlLP;xgq{JVrFTXvL( z{YYEeS+o1bYg^YVo}k~qe%#{f)qm7sQ`21FAv1^y zO%R!`3ndi?6Wu*T@Eaaw%1%hQNetK6aemO`<2}KC<1r!iE3M)IJJi%D8^l6-DAz|t z418Rx-)T-mzt>(^kU6x=KB-x)tEn)7Bk z@WYf8SOj94UqQ9Y@p?i1b?@ZM3tvwJV9{p~FV?XwK#fBE5ND{IG7oVJ_ zCdSU#tWJnhfY;z(Qh{u*fBCtPOfYimI_VAL+`;|wdn1U&ib)sZ$*X#LJx}b_!?^z@ zf`KR7akuH)Wklo+MO$V_w>kq^(p}%pfyVNYr1L%sV$dTVi{(S)Y!i*mfdhMhz@y4s zCsB|@ffFM}ht81jfk1eFBT59IcR zPs0v$d%$QV@HY$bn^EjH+|!b?fPXk(bIF|X&k^i>2weT{Bvz1J7_i7$Mt|2Lx)bx- zGA>vwFz^ z>Fnv#A60k<{X^OItDPcI%m!~6Jp#JDl^gAE@0QrDU*WSgp^1jy#%`Medl&s0rR9kY zjEDdu4Ff1=Vya6^;|8A#*l_ZXV|Q-KRF$yU;-(iC@3fs{9NyI$%Qi~Z={ZWT*DNc1tY zc=D%ltbz$jS?syGfy#T*hv=?|y0`E<8&ShT6DbQX#kFhK@Wet|GhjGKoGDnD(`;f~ z4>|G&G#+^^2ZJhS=8QP(-ce5f-G$KPDa}KT#K%Uq_JAuOT4zwwjl++Aun(Y}=|c`n zKfW$54tm?ytl!w-!X-<_3AO~7izs_nf!0zyJZQ23Xekj-6Dp20BPIaEG_f-(O!(a4!M?@9=8c84DhsfUGy5GNFitR;FWcX$f~^ za?Y8Q9}MIVlLBF<1Y>VcdCieToZLJz`?c2(vtU^X90{d3!L&V0VdVd0P6#MW0T%3*5Y-egOCS7t#t*=rLj(gk3^5 zUcqC7vvgL)EPBdAXxZ=qANULEY2Nh+3@R4H^4BUY&^B7T#0^yT#5bCwb~^yObdS(SD|QV0rm2+NCIynvdC) zOUnpeId{fV$0nK&5{`5|ojVz1K}F=_$0h0kJbtWv&!QL`1b6iaFXi3Z^H5QHzY1M! zqOgjdd>Q4Os!e#bX&?--`ky-rg817;!+8Rs-G?a*YTlH&cQ2#PSkU`K8y9+Us5_@yXGZ1|7P z!Z(0B$mxK}H3aF-3H%$dXUPz3E}N_9~XqU5dYk{i{0IYJv!U7 zc$A1ojbK1P0Z1eWfbu(+CygADhBHWS2*VGM&yAcnl!>jkb$_!Vh7L z2xN57+rC2s&uRPY8!JLb-{1@rbc^JdlYM45ya;)MnvL9V4xCgDkGU^#LCHFigM-FI zib8}`H|sv0?umekNA<#mD7*MN)7|jxL&W?VGpq=#eCR%~Dk)olLR7{t2|S@UFzpg68~H(qT?{xh5DZj9;zz(-y*n}LqL~B-y-mq) zQzF$SYpe-lfxQ_7S9U3m!`O(+RsqplHS7ov^=;^G%aG>p=3%#{#u9Ush<+5lWZ*D( zuoBMoJdJ4IKKo{!xAi@;lov~bV{xy@AN%#&izIIqAglk0R&f31cdJi$c~Q^z=egi_ zwt=TSrP4p#dH>^Ky2dv3_+R@iqrMMhzE&~r+`{gn{M<%Q?5adu;@)*|6yloY(}!B9 zNYND?6vf;Mjvn4#fI{K-?{Bq@e*KhDgl+xUd#ZV7-?`K8+D63QmN%VuUVEXkl68Z= z_=LxB6;0N#!tLcP`s4Ab$=;ka6~f@Nld`iJ^E;HKrOUKj=d6WZzI;inadASglMisa z_EN`}w(v5f@>a0i3Z^?Q`+??XV?VD(G@V>I-`%(BvX*ke2UHyR2Ih@@X!|s2M0hM0 zRPGiJKZfq`nAavex@aCD)>J`W2g9%HM9e~TfB@~t5->s)ujoaPl!PZ_MJ{<`iPH$$ zkKHSJ_4K!rEI2^hG_|(QLS1ClNOx+TcS$b^VIdT`QvFI0aOczCT z!@H?+oJD<0Ox~{Yrme~R!uVdlW?*biZEG8yL9d3X!df|WT47a1m)iy*489lbjv`(v?V8mso6Qo*!&cFs|O z3nw&WY{nQYOI-rLR4^uM_~(p>G5T7`n`;l#IYtTGN=gcC-GPSd5OQVFMC{$ae>~EY zMoJQV2odh!^nhC6`yz1xA9mg@1biCf#UeY*+RH=)+C;I>!V)F>k%avd{7h@gw9Km2 zgQ(VUF$UTJGW*im`H9H9-5gI!w?0f#g3JUXFv{5m|EG7KDCQkHeLBIWk8Qu#p3~Qg zlz1dxWHoP6iTmquQASBoc85TG2QvB?t_cnUB8e>uGI}Sl539+NVos)Po>hK(bU|_&##aEULJ;a zK^+DB_|Zw$HwHYDibqqlQx2(~Xx3IRt8>_2^MQ58>hR^Kj4@i4+ALjJzBewY9BZZ< zirXR1;dgCTOl^@`g5!?cXCK_;T@wp83DtAgYuN{CLYsm1A5gpB{l-7uG~@i*XAIvF-n=Wbcmue5g@0YDlQ` z3thJk1>Jg8OgR!XI@Xc}C0P&^GU1G4Y78efo>kwI67_}U@2!vTUT9EXP7+x@CjFMG zabkb?sVW0EMcTp-*0(zSwyAk2OH?FQn#RNg^;3TS@!)2>*Ae5`^*>Y}ygR^P#S6u^ z56xzTmWaSyNtU@N!{H$)T$|lF^zK<|p-2dw=^(!nmsEqK<5&!J2D4&w`mLsPW~tx5 z1DEKjJt$H#)=O)-{_b2}+&$-VwzAJo7#e8YRQ|nt+mqrKtZ#pa+~WwK>J|FG9e6bU z3jGUDRaH4M#7rJTB9}&U=N0u@kF+43D_DO3$23lLQ_m2bIdKfT#x}Z05G5xO>r&Hm z)BrO0QygoJyvIxVHk>GQIKfwwx6U1=^IR>Vb$lPR=s_u^@jNe3d}!=y^qcqh{)XS$ zvh{XsG~J=zgc2So0H3~#@u?lspBw3;{ayRvgRnh!rKk`dM1XHFJDOq;fHLj}!|UBn z6{jqXS)I$yrK~@C%Gyq>q{h}GnX zk|Ls_s)JyEvsq1*HkNo*@(7=Ho^I*w3_7L0bw*BLSK9)27bL(h!r=Dk-O^<$pfG+Ldp+WZRyIz+Wy&7(4 zlYE?c<~TK%@z_yGvtwmRBMZp_k#S;Fmm)+Cv%WRILOnFuJo>8ou7B-;o}6lCdViuV z!lo?5xd(qQWbcEV#|&x=gnS&l{>uu$^Pgg#|D~ns`|B@hMo@Xv!RNwmQ*eva4fG$o zMr^5ejpVDUCcXY!dt$)*_(^&9n1r1N54Z4n$vPWd)oqAUrgd#o@KD;OeV(`R(dme; zx_~Vej8q;snN)6m`jnT#3B3a77K~XP&l56ZD3`yT9+QdBKK}FQ;WAf?z~yhr-X#Ks zc4vGPTd5;@qGen>j&vvGws~*$*gbM)U%`f5D;L4_v=85O^+x&cpvx9O7S3k^Pw_Ce z^X*9lhp&`>o6r+_}a3curpVOuKwT6LV?iaoXr79vpOMPe*K1ePQ0T+|4Ib zxZ5Rq|5w^v3HyEY4sRX}4w*zoMMY+<`fT>=)ZOXz_Y$_x;ne|?dhg{-+n)`{xMMkt z6hNeQ89coj%uy{g+$$PsuOMnwrv2<^P^fJayYU_g|4TMEh8eTp{r%#- zExU6jF>7;j#W$7u^rX~uj{NHy{ws?&vg{JK2Ii&SwFHw+Q0@MH!#>Zi# zscPh`$WOAE2K>Lgp(Xyz63*eREk=a)p2aA@s~l!vi%2I((e%dM;__76#<(-6$} ztoGIhy>gWGcBji5;bR|eg8jLDG=q(;ccJI{41WiPkrYk_}{l?k4{K<@XC2xyCx3CK>R>?5VO?t{Sy`z8^ZRDJ z<2hPmt5(-DVt3KdSvrb{vTCZ;%$j0JwEow|Mqy)vR!v!i>RU9fJA^csj` zUjc8MGNw@_CtzXclc6rc@8Jrs4Xv`jcwPI6@qLZ3gif`v&nZ}=}HDOPA!jHX4 zQf|6S6;Rh2IbmS+p-xn+CF1epMuQ6`CR&Z1eHGW|vm~)sjOB;}&wj1+lMy@sN_;PN zt%Hkrh!iJ`xPutN1|gpx|Nl={g!MUl#`W%4`1bcB@P?X?AN#Sxgn4^IR~CRFe4n=y zHb3d2F3n8XSv!~kg{jhmQ`R01+>A7!e7Nwei?gfBA6;BSaOZ_eHigo9hF<$ zye%^F5Mzt>lM?O^n`#|YA0+l=syECD7o{dD?o&)iYHchs^+<6ldP2#^TK$(mGc{!>MUhn|AUMutHn1IL0 zL0Ik;l~ae}DGN~~IZ}Ni<*bEFuYUOOHbv!Bty;@D&V`OVJg>OnwR^)(X0J>`IVM95 zSw7up`K)#apX9X54hOTLSfs+l4eVD}Xisw7gSyMt=T}?POc#L(APa{7{X}C5`$qX0 zZJQe=6n6pwIWE%Nia$9uuzu&^t-8D4X|DgZTvz*nVZDptpNia`xRjlNa_5Ic%m4hj zvr%*Zg;I6Z6B^zxdRf$>UWiJjPvudbKKQ8O1%ImgJ7HQs9VLfn6mXOd*yZ)k9`2id z#xrrcvG?o?!V+cT0D0)7uBUVIAB|#kVR4APQnHEHYaesGm_2WHs1+T`KX$^7ixk zS4VG{d5Fg~eJ;CvLFO&RYyJLnpA!tqj@J0x(3RISFBmVCHoYL4H3oyV%swdRs)vIe z->Xjg^-ZPaO547nproMCQd2g7Wv(O32to9Rw8q_!P!jNrM1c_Cl;TUGaO@6&11_R+}%+mlWot=*{;ES)GDS=KjvHk-vtg%rdKnL^iAxw<}v&LWj$%p#wJ8l)HibDR6*gNA+l_%V_sXqY$kkFKx83}#a^+}yF0 z?`hro)VFPXw~go_>x&!HGjfLG!fWrXe7G}v&#>h5LIiws*=1 zzqftWX1p)*_wQtW*VsE$HunKOFaDWz(L}kyyM;Me`(4aLz}vUlI&$N}SYCvMs?e6? zRUQ6+DUQYKuoC9r=CSQ%IaeS-MZ9_`Y*W{9A3(1UbQOSD-IcW#T+tOVqGP;l(P<_0 z%khs2J;p9~dhv&^TMkY8WPVp*bH^`v%t~>kCK3p3)z2vwAJ5;ClC2 zHS5K@PNp*)T6?1ucc=-T81rb@B-GTD=YFXxxX+-Vun>BIG{h7%`iCVZVh<7_Igsp1 z2zMz)C;tl6Uias_}|N zxi{@AD`(@hR6GtH+C17MzVo(cOU3B(;|@#rN4*|kI&s)>g{FMwfs?$mV$KyY z=#Fe<#}+f$L{;-=zYoE-Lw*0!D{i}wFYZC|>iDP@UraFlf8u4ylDD}YS8&jk3-z@3 zCo39rq2@f0>kSMGmK=UPk@pi z<>%)&tU4>?G$rhFGg*WvkOg@%L%yX9xO19QWcmr711NBo+hl$$O=<}pOdQ+ zoQKEDRUe3QSC_q9O2QULA!(WJas>%r=$HVo-SK7qCLIu$SeeLxL3a6tdDFXhpP`>3 zAN~xqTO?pjbJNvpp`oFtOIBbue2$HWB&CMXs>sk7hIqy;Vsz`w#lN_87?(?5t*JR~ z?qm#7vGKOZVLC=eBADVIchV4%jJ*ke6(bGRu3fuQhWLhhi-HHLWJY58?3v@8bw6y` z|7=(L+Xn?}ctZPjp3X^OnOskE?&Z{FR#EP_T_f%RfqgfNo8u@~U-iz^bD$C*Fgk|8$W z)3&1<*j!7f!fK=g`qFHLC0;7KIB-{2vzp~ZtzVz#Ew84@{8iKw>DFh?p2ZtQyn^7Z zQguV_$A=%fU58%q&5Nr)e*q6}f-q^Tojc@9TWk>fL?{ok+C_-}tzW*eL|uLT(w`Ws zd+>jO*y$;A5jQPy_yR@8D9N;g7LU(B0JId&2O-{*4VVt>2OM}8x~yoBEL$3PYd^l- z-EE9i6hNrM&=eD`OYphc4)8*&?v)xl{y>kN>C+3Fk(n~_T`y4(a zJLogC+S6Ceafm^O-^C(uqMIdRlmjLCCfP(VNkL-X28!Eki^y3si^0%|ME3Cs)6_QS z3s*$*d+dl|0AUDt0sD&W38{l$X2Uz z@QhACHb1$R)2~}6BqkOL%O8KLZteE28bsZ)_eov9gc)roUGxw$8 z+8-`Fq zn3A-lkz2|E*`l-u>fWYXo`j6h_hddk`&H}5aF9X%SEIJN!0e0z*740e%`$VB7BZz;TWYv{xr0{}}WKrU%NeVpQ!ey10?_<;0;6tcu$}N#1Sx zGdZiUn(?$RfT#br#`5uFODV2W)QwW;1*H<25|_z{O7w1I+(xJY1V&4RK zyo-yG$W6(nN;%A>NGQnRb z*`*YuNk%Hl@?2PCWTb=5Q+r$zQ22BDVchT^OKv{MMS)!cggQ5~c6Qc;qmBGF7|Pxy z%E#jJX1w!e?Ms{)cf^4#LFBL>J7MGYIev5s4%dEMGw%EY9}Jy&tF33T$8QjlPO?F= zv-TW#x!j>>bw|Y9vYjdRM!-54gm6k&K1ev0%42=Pq#=FpH3Y^agbwiyN7Mb&(OHnS zdn7nWagK%PIgkhl&(^~~bK8m?2Hs-qX~d@q`l%aXTsKf)(f$*fuYB(nSA=o(jfQ+P zR*SL1a1MVL`Cfx9{JvU?9q2!pUu5A$_h@W)lS|Ct`1+tL%Tuq*Uc~RF)7eqlKGC~MuXH!c-Qi6rEW&8*vccb9*R-!l zC!}_mb(rzqi`SE3;le0L3f9f{-+;}#wm#zEX4%h&ZeQ!L zwSu))vwgn@ypENVCue5Yr}^A)lk&iIk92otu^4j1q5gPhEsZ*{30L8jZOQ^exbx2- z6qTf+y)JlHg&xnxie)kA!}w=h-)l&E)g1lu(Ec3Uw*f%yDgB5Z(|qMV)Yd#UzMjZ0pxn8C ztd*7|7mx*>6vWD$SLeBz^X7F3v7Z`>qaaTfWiKiw%3k`EOlPn5x}}8}v}abSiSCt)+{K0wQ{jC%X*m3P*QO2o&< z>Cci=qyvZIrwwxh1=;3PcW*2CeA{m9Jj#~&S1F)q^o%oYifuoQRj%@qY3q0CZqLP% ziG5Efq&BVF607n#+u|N=&^cYk59i6t^E?9Cz&Cf1ty3_QhTW+=#w^R|erp4Rbz(?t$?V^t+Vbq1VdyTKd5n zZhf8L~}UAGgNilU`-()ox@Q&4^+%exys{QiBQ0#WL_dy4~b|ggubN+W(XGfI{m$wZfr_k*VMET zqPRmxj!e2$1k{E>gBFW{EnVG6IwmC@#@g2bf1psS^9k?BV=NijQ+gjdPbUvR}{Bq1vZ$;*KYiU;(aa#!LN zulmS-S>O$}<~LX_DaqH!>W(Q4e#H=RP^5grGi!|@yZLiN;&;U)zEOlLdWg{kZu@yE zbB=EL&mYEhu5Kr))_NBSa@_fPu4c}N#j-T$V8@NIDW*qd^SaBs4SXn$Rs|N$B((V? z(_isvx0{xMm^UP$&%EHt{SD=HYNBQ3UeBmFZ=fBXj zE}ZeV5I-moi>2t^-=eEjRQ*(LZhv({`%6lb!_>1IW~LGKYdSj23g>xw`|RC=)`e-P z!i$bp+rV(aVuwr+I<()|7C~Arl#%aAYJ7n7xajcKZT&ggLaXrSJq$TQd~L1p9~hW- z&O$CIKQy)a`!})*woybpZo&PtH)#rTCVL80MIx?RvCGvJCDw9^V;Zx-p6Omfn5U+# z9lEozT%L%V(3g{U9FFa+XlIBhn_Q09f`VMjk2dshukz=84~_0Cah!44>@okX1Km6u zJG=bVuL8!RZZd_qAMh2D8--lcoHrkLEqE?WCIePaO4^L0(P^K6#{~lNt51h8m)MJx z%C3NPd7=cluVWKIb?`K?L!jX!ks<&3ehe&=4kM|OIArFH}nl8*N%k5;=d`yDoK)Z2?Y?97q!CV#|qx>UNluu;~v=Y z9AquVH_EZ%Y1M`=XdQo^+&DE*zA>RGNLb6;p#&4XPyK0K-IoyeplM}*po5V3-j2>| zg%swgQ)7t0I8ok>cxhqqP<%gpOYT2Xn6qdZE0(Lnwii2yxOq~|H!lc^H&c1dB`Rwa z7p)#TQPy;}TuAk>E8Vol9)`zk*M*03r6S`a51l)=;?sP9Hz>>X=75>HOW(Hx)ZP`l z{WnkX3HQ)vpKL8-?fs$1ouwMIw)gxoXn=2TlQ)+-_qqUqK=>|TE0T;|SAtu1Y9ae) z@kEGQe2TCZE-IDegP8`d%Y)@Qo4IUX%$wNS+Cm5{A|n%x&=eoFl%eH@W zZ8>%_D25Yx5$@}fC!FOVDgpwplKz3;peaBoij)B4(cH$h5l%`1!Z58Lsj|^A?YMqD z&qX{snsKl_S@niKy`G+wJx|@MaA%o{A0<_*VMi18>YX@I|Ec~aQl*zzt?WJEB+7T2 z`RKEh?ACyJ`yIk+Ug6bviEM^@&=_JC@^;wPSdiO7jAVnWiE!AjUAwjw(X{ZIzC>@? zOug~czatNYFm!_?Myt{F^hyL&zrUV3yf`_pINa3F#Kbi1w!RAVHn~=jVvBWu`;oY4 zS~3M=Ikoe{Os~@Kh-QfDDYO|GY+=OG4n_+6(iE?oo4=BXj}5gKoAwup>K^?w#>(KS^uW6^k<=_@Wo06u6XI{qNJX)N z2=UXWPhJ~u>^kOiplHxRD7R?!mCh@nG5N*8n~gp+1xL9#yaun;j&mzIY~MSQvn$0w zEJ~QZNJahBAz}8dkAAJqg2=!jw3M0N)ouRe+STGy8K%rRG0E#x^gS5{zTG+aaA>x1 zok&y1*uuL8Sfx%Ij7FP#0oArI2;_BK}A3-!a9bhbQ0Ap3{S zLu?>&r|o!dUwSFEyJ};?Ia+&OWn~Xr+qd-0GZIpJWDZ{1WpG0I(4pL5S6!|!4KkAB z&4RG|ol+#a+)ut9jPoBJ$&imN7r)(SP*nNJ_YQn*Tyu?2>K##jev3>h6 zmP*iaMJC=b$=(V%HY(x3Ha1O7%`3>u&b#noD+vRJp|9%#TprWxV=EUYkYYSuc*-Qt zLb+-tVgm#9x{Z>bu~qwCESRk_I=<LlZZmVPzDdVXH}bE=zy^P9Mt7zYO9bqljO#&N8{^CFO7KgHxSARN1QmifMCrx zQGm80wk-lZK^6p^qgUr?GNhrE5fT**AzP9vXVAQS!r2=E*FJ(p62mZn2`MP>`9aXh zkk#>aPuHnhOhq%|2TgHEkuG`A$L9b^zG^w0RRt{R;=!p3={2`B&ZY~N@{f-_h0a&g z&Q1ba5Q>VuL`Kt;XW4Wy7#t=Ks!@ERiP=z z@`9b0nZ%(_ms|M@Bu1R&E?m5rnw<^ji3S|buH^&UdZ`9NM?ESw-PQiW-p%|jMnRP1V#FX8@0nP9ZJwmzrT=|e6}CcLZVKvPP%GVN6LqB-An_OxsA@sxn0
S1&dOfx){HeTd%BxUxb5oW&Kz&G|9ic7)r_0N zHh@FUcHTa&rZ;b2U)%P+v(GL}NZp$|XLXmIhv&LV+W{%dB9G9o)7_M{idwB_^0BN& z8zlnS6Dg89v=W@_P=%$4J?6iDn+4XpJH%P$UUz0|#v~20#;)`E>uqpzc_@FF%qI|~ zlHeUd#RI0+JON3`4F#Da*#>OdG&F#!=Ao`6cFpnd=b zO|+%h$YOrqgrA?%5|ExcQ4Q-5a%I25=`((51v*K<>=b0$mp>_o*eG%_0sDB1F3BrU zj{@He1>co-yAo+iP{{Dvq&*j>)w4R@x3&(k&sX%Fa&A*kTsw*BYcQ!dIc{pGsC2w( zOUlSkzmipp68T6}bfQf}(?U|`$mGW|warylkr`SM|4AX%6hxr3d)NWRIz>6v$d{QY-TH} z5gMafn@JTHeOtvMiTnGx3lFY@H`w@QT-Z3*=V70I{MFgEX61r3ffq@+yGzzB#lIfj zpI6HP;Wwg&w+(+>sp^~3NNxL7=iuw>`^+T!*bAdf1EH;ZU7n3_(>av%ZTlt9$7Hvc z7wf8kx%BpuL_krFP#r;c@?L!k!-WXx|$ENXZ<9z~LJR3T$JrR_;cfY;M zh@FF@^Fr*=qj`=3_g#4yOUlg*3^>s6kqn#YXknW7a!!bh{sETr2Xg<2z0nC){csF! z)!&cn?RwA3xbw>wA%5kmU$BC`f*Rpl;V{W{hzRj~7*0LI{MF;id#!CtcJ6;tBFF$$?Ts5bC zxlgtfR2q`YHgHN9x4zjsinzqFf?T`>Xm3cM%X2_`_x9Sy&&bQ*H>9-bXO8jT`oO)) zGjJeB2-F`LC@}F~!5BJ={3K@En6{|5!@gE*e0u2KNs`%_eB*0VI{WY+jX24ML_}<4 z{xh`e;OWAk8vP9?T<5NuR$3F#qyL9}y~w87_iYUJS9hK7p9-ivoosc+ zi!CaO_0a7#fs~S^6hWzf@@mbb4=O92{T@)cmG4OU!C&(%v%VGw_@C*WHED$dAB45; z$&*^RK=7Vfp>4vnjppWEcl+q5bCbS~EUB2ZfkZJE65q{Ym~3M@WB zikSN0!#72(_tU^9G&Xh~C=QsOWE6+PJLtZ9b$XP|E;NhjZ9g)QL+eyFq1<=aFdf0G z+}7a}&t$c~%zAaumg&Wte&;H<>NKR5T*$`Jex>?UFxA4GRJc?$EY@CL3VRcpjQozY zHlrEw@3H%eLpHxuz_3s|MgGMuNy7J_Lblo#ks+=@<$?F`hDl`OHR>$Z@$R)tLMZj?*N338j#i#paZ=Ci zTJiadh~{p4#2r@g`|-=WI>+C*B>J86-Mq;jN?g&T+l7l~%tu7`o3k8h+%FVfcg&4f zu^WTHTR46^=#gp1=B^U&5ndzY5d7%B(t^9hhO87Iw>f+s!4B&d$&Mh6$NKqKMs;$k z{&=Jvd)N$(THW72i%Qti#r)&Mm=aAOWm3bZn%1inwJU#^UCT*SsL+99e}Smf{^;?P zWv%56&ue5M6j9#IG;{b| zeq`t9jpFCgJOv6d$(vA7G7s6?p5{_g4x2gYVx5blBG07QOH74b{)Avyd7Z9o1A* zTZ_*grXfVrxR9zVD#gWE$-8T3fyByFFTj0>LPPaL>}u=8d}UXAdA7S^gcojUlkCpX zLIHphU#v|Z>(Bqnn{krW&3vagJ-EHgKPNV2YdDWn5er*X=x*PgIjd$DMK)~FraB=h zuAJW_V^K3Dl?Mb*uTPLuhyN;KaRmr%3G8*wI| z(J?rf6$3=&tC=@Kbyq>L{8&2~+03Z1P z^A)LG4-a{6zI5pl7b6WG1RN_B*2)5}E4^1J$~fxG&;#J3P@)$dAzD42upr`wbdrrb z;O&u1eXtAB%IG_fp(7*EX4T)tzj9V9%+VMJ&+bit`1>4KV7L^h&Oh9f=QLbnS>YO1 zXOp?}MW6=5^^}H_d>*3H0oWuI2n;8nvJ#&>UlE&TW~xa^(uC zsDO$Or+)$9df`Kw-pP~Wh0U`m`kC(VUX#lO(Gka9zk4SP(veb5;}B$74$6;xhkBF#Yo-)qs+Li6_U6^Huzi7oHy0MRtgM1cRYdC_)q3 zwYTx&Xz#%Dyws+c#fmwHu0D$gZt`gKrj8KKkbo>C0zRByp70OEu}$MCs>z24uUkAM z$;x%Aw;sKFBiO|KfJVenNRWDs^|+ZvN(H;rHE42#s%v4pmaBKgqOZmIvnl|a#H1lyWxuyEkFWW4dw;G*47(2u| z>oS|fRsN6_&_{>ue}|BAk!^U>Fl&C`>DB8#Gf6j$#$v6w zQ`PRebl0z`LURIOhblV;VgjMSg!dw<^yjO1=(sBc_gnu54&-}My_+YkdyR=%f6Wo6IqWH3Ll z^G;-9U>MRh-VTK1)5wT7rzgL6M1veQxG)i}9t+7VmwtR9Lx>Rn?SoQYb+0@^-~9Pg zo^HDswRu|nE(`w@>s)Z_F(rUDzkhp~Q#O3ecsj>%yz9ISRe{m|)}T?a?z-ve&z4j{LE4v)g@*Q9_*~3hXl>qBvp!+U2Ul}ZO z$vVcUFmg*;E?b^NWISpMZk3R5OzJRqFAiV95%>(?z2WkCf+DmFyel5HMjKMAtkKa|YmhhwRGY5-6X%kmFYKwB_Brlh7a zd&GZcRkafFn%^+q$@Mgc>G}y7UCWQTofFTx16RBA<}aGW+m)I**qx|;?LsL!FjaFV zQqr&e*IE^ofGA<=1_5UFZA{*;S2xa$EUa6;x5_?uG~Ms7OysMtlj~2$K>af={7IaH z0ky!o@E+y_XSqa{JHb)mDpmVfq&lXzw=FOYOcpkoNM-aMAGd`A*ZGN^g0f4quZ- z6eGvR#&pzWf)mi9E=fqd=-i`PjiTIwcq26k_epwvg!C2?3P9jxv{QumeAu&$rOk*+ zqhcbB{EByr8ekKL7HhPxuQWzBo6A_IFv11N~Zpe9=CwjK43hG7SCCOXIq1O)@W ze|I9zVe$twy<7h!T5@k7W{(>?32MGv);o3T2!J}0q-R?ckK1Awyvq&<#u&eesDWgO*G(S1mR@PWf#4~!l@SjFQl3W*XFSic zBJI-Eb+2{d)z=QPOrB2-jf+ArU!-_kys$3k@Q+_(jTB-cmyNlUYYgV>%UM-PdJiqd5pYO1V3(0ps~q*E8u!PbZJ6v}@M=wvP5(Jx~{MB)y< zV?fZsEeo}Fdx3AfMR2axkt4z~G8`4BcU~J|g`9lOLlI;ID*I*F{Gx8(4tR4ed6usk}a1@GKOElvq@R>UMjD-;PGtQw1BGE0(@_ zs)gpYHs5o#)zXT4e`nU=mYtfROU`O*$^s*sv4rmN56|6G8>x5fh`clMrdP=T6bc&) z3+|Tij1z5y4nctO7QBQc<9FBlVF)1fUXsNj5XPL%4X{SRBUR|&L+;XnT^Bngnw9=~ zQ6TqkSI8mF2e^A5c>U0#Ac35js=r>26b)l|@LpL!R$jP6-%%*F5L?LT2Yj%!r8*A# zjlQGxNzoN`M?_m$(@+H{5nZvn5T;khK1Sm83(1A3q5S??hGn8AgXvG;{pbWe`&KEq z;K5e@g3`eo4*%?fIk6}t5A*&)UEtjJ$~5Z}gj+mOpVf17|G@7&hcedX(^W< zzyt^CRcNpV7hiT*7IzQ{0)gxCPLVCAxR|XLd6KEB)k{CJ6SGOFkN0Uo! z<{l^^sJ~Wn0+vs_6>Aml&7YRx33H@rL5~RC4YJ*_NT<)Ld>X$7S?6-_u|B5iwWRY{ zL-rI9pAylP4mRC;Dcy~gViEe89NZU9pOXCb`^wXK*M(x|2inGL!e@=vFwoc3YGw6m zoHpcUxV|R+#NjK-qCBzhjkt`qY>(qhxYonx*JHO;maAFOZLH!B?anc`9kM<2A2L~_ z+-DkU=}~GO9MVcl5sqZ}!9I9b7Td*H=M9S3+$> z9gG+RRt%Mx%@8_fU|=vO_6=YMg~9FT4SRLmsLrSQZ>sET)^O@fjX3=3+*JQ7Qz@ex z{C{OV(_Q9sr8*o+4HL5!de(h1+~~9>iJaO>Q9Jxlz>?{ z*Jo>)m2Nr9f?mG1A2%O%AfCmM{yq42Re;cD8!azl;{83$?3;vD0i~s-%6)FD(4C41 z3xCd&-&Q{0K+s&Ir;B04pZ)n05Byg{)!& zl4#muyB-xRbI5qMp}23Vy|Z&+S(<}>pD(}@9Ctz|5ls%(=f3>;+zU7i*&aa}Y!k-X zqeqV1a@-i!|0e>pC$^z`%;2ox-YEPe3u$r#cYph*s#`BQvWk$Glli10|jq2;`d z$YoLKwSOoxkRlRw1d%b|p)ouvM#q+HOX{9-CEjBgacBaQ`xt*Ee?=^d2)^WO(~=+1 z%vqFpPG@1KB9TeJyf28|y$I5C>z&3(qMU6=_GOVIiNUL7X=y#!OXGx{td!ewmfNyQh)WL>3JI6d&FUa?r^>B~gW&27v+jaQ-Ie{i(QR;8Qq0or zFoWdt)jgT>ff~6NRh;NVIxiPbci+&+PAjTcKkguFn_hM&!sn?-KuDx6W;`Ds1_F%q zjwhM5I!+ZO>Trv%U8RB-7s5GUj07hoW8!wm!qoD|YWZ?sJ4$ik7OMSndFkT>UTa~k znsd7?_a@9PE^eGzv<&I7vj*G&(aVTyd6d=jzKbswlQpxYhMkOD%Xg-qiZJA+jCab5 zbs4lY-4d6^wuQF!VEI9o9GMign(MdBPf<1PpDL!1Jl9{Q5uI)Q&Uznsq-)on9gA{w zph;(UoeAQWPTIIJd5ii#VGfzppaj^gpzyoZS>Y{C1JNWB@jbo(DKwsXHitHI)#9Q; zKnFGKXp-atL}n%mzMz;GgVt?;XtPWC$B<%zK!U5S#n#Ya;M(+3V+dae4&&W-xqgGu zuRjaD#}QhMylO^w>wpS;DTOk1vAwWE0}~j_#*JOsrAtsgE+1IO6r9>3m;j*oQ&Pb%lA;L4 z4oYAzDtER1w)2k{3EYT33%%A58xao=ZAS}?)9z5(F5$&#CToGf6+-?5Y~=Xy}WLGf60ntFsBdZVtMg(qZYHF~t9I>?Af>;!U9_ozVBg^y9KNngxayH9NNT9K8 zo0unpp6>1td=BhbmKRnJ+QGT*?{ZhwKLHwEEkdZawY5Q0*#_Ou!GjcJCW$@w05T-9 z&wLh%tKX5J_yb5gm<4S#54hqpGDIK1k>JyC3G5hojnNkwzi{7$-s5AOk0A#{mTHh8 z8)5742+;)Q3reQ`TbA3{2%LC)Pwk~6`B%10_Nu#H~lfdi_<+Bc43<+s?SCY&r*gvu1d^fZlYYCukjhI;Ub1Iz((rQ)u60%{=OmZUx zeW*(B+uJ{jTgOC3`n8?s2=vjE!qkHM9Z%OwTAHW1~c{ z;$hLH@lF?bq%<}4&HE2*DAPI_czXBQ{DY@Y6NPZ%89RrCkR@(zT-_Aret`$G4e!Lo z9RlR&QBi5meDAD39x-W(vEt(IEDBecvFk&KO8Bo`zYe|8<<4Alg8$<3MkYB0pn4D^ zgtQ|{^Fo`J17kK%u$|^I*~nqr@g$kvhYk?exW1HeRtFmgM;+OyjdSxHvLKuS#mV6z za$A6@KZn7r!g>O;6Irft1**y1DJ9aq1fk>MdC%afpvNCyp#K9&mr=52;%ne6rmuC^FL zZl@sJTF^k5n_VaMV3T>6NMJ$d451(6d!ni^8Tq-^p6T9D$QI;D;?g;r zZ;aW*EJEGo6_35W{bXNB1iXPup*rDLLvIeJMr0Io_k3*kX}hcy$-DLJasQ`XZG1y5 zxt_jBW%-=xjYsX%=r!#rcx=k2!ZjX$ZNJel>^LX1d-v_DKclg>_1Un;+SL=@49g}~ zjMcqY4`ui076$#HU-mwBSpGhOGI5h2miaKmhv1g*LcFssl-)*$2;PLIue_7cLE}ru zO+-mac_wL8D*4HiP{?cF>~sHcY2l=7P(bV?Y_k&N)2@3k=)V#goZ{GM14RIVvTcBN+ z&H3rmW(7WUFiEF_l=bkbP{LrvWjQ%ca^5ZBJ%Ui5XuDCrH0A!Ao)GMJ?7-4BqJAf0 z4^Vw)iQ_|qZ+LtQ7poj+!zxIgt##$A5Z)jw3Tj8rfXdic8i*oDIIe7R*em_Sm#?b< zmD*Z2+YT9O$#sOeygsNJMd_LCyG_8`Z~N>>+k~^RL^Yr93zj*}HI7?d@uA9Hr&2O9 zFzqy6qh}`y?cw3!Pxm)dD7fhy?K4fFZzF8$(U`nfTCf;^(%`bqoWN}%nE)@*6iw6 z{$~R`%ZX$FjcdqlT?rj|;L>@L$Uu@Y0%+Y12@T~L*VUk9qJ3&|-Ok?X*kJ`%xAM5- zj(&1Vks{}37t?bx!{5~$iA;K~5>_NbS7h4rMpNm4V0_3+7T^}_+rIUf2U?HSOr?Km zHr_DT*V~J0+Y&k}v>8H;yKmgMfn#_Njeun$n}qCcz}sq?n$lKpIe-k|0D{7aiiy#H zUdGFIN5fPQRHfJ`aZk!JK=jrGV3wpO zp=(UJioz)d&m=HtQ(5&F@Mif^Fmc$m^aPm>*K}q)+R_s6Hhb)Isv{Gfg^Ix%{0?<> z2b&c)2K`9wk{mPjw8#IB&K+-6T1ui`#W(_p*AF>=kR}rjz0w7Xy@>&&vI=5O2+#YG zWv;5<{RWFpA&h`6561|vBnT)vIy#nzSE`dB$^kX|dv)~^-ewsYqVEJihmtNNEWEa` zN5S?07Dn*%t#_|b7SR%fYy)!+6m@tLG>WR0pQ`AmM*enc+%Yg&9d_73Lu4s%Z|B+C z5B&DjL%w`lhDY=aKM#HTzMz@8M$~EVCkOlLoyr;&Cu%Hz-uLC@Yr4kEFFVm;Eb+i@ zwY%Fjt0ZmfF-~6%8YEZqeVf(9*NAZeQ%fztt6vi%OA?CE$X2F2$4HQpx9xu}xm(*| zl9T;RqLHwG!2r)4hZmY|Kj4kCy5FpRTEBkxsCrz}_2SAq>XNnUoZP{aDJOC@cP|&( z%y7o@A1$3^+cU0wNK)hplWn+3>eueN8|W$pg#t@|oe@!8+6@FcVtVtKuh!qStL)5T z#c1Zq{ORywRs);`MgU2q&;d)M9GaPFK?AzR-R=@eXv1yyIgpbbN1EiJm6h?_)mF#3 z#F0Kgd2BP67zYD2?uE59G(et_^OJ@gZ#W)NE`dP_d7S3YoEbW=l4*cxuRuabiwzJT z5buY1GiEu4ae2?n*ypRacO};dIA3c0eteO?Csf%rHT0OUuxUzvyHEIf!?8APZ9AQ9 zhBk^(+pQT~i&`f0E=>681_W$Ytl$Y2Vk+wWxk)zBqA1{@z~smEA2gE;pX;Ox?cEy# z6cZK5UCsLd&0*|HB9{=^5D64n4hp%xL;t^@ns2v;CPRY7yGX>k0DaJLcQ1AS)f<86 zu`gd(IXHy9<|;maetAr3*h#!+;I^McNqT#1f|W_MWUqmkjAb$h$5*{}UBAhoZ*!YZ zxo+TzO&QQMza4j5YEz&|a8;GRc+U4)yG>=f{L{;nbjzcB`pNrvWaI5)v;%>{?sK)&=q7OK^9WMb4h!fdiYM zxk0ib?0-1=w?Y>GB>!d#w|>#HJoKi3iH`2&x!v$f;FVh68cTg_^$-7b|CC^5o*v2t z)FC<}YBv)REjB%EUyAsrd(_fw^_=6fPfR>f&!sKmIkNR$e0(#_a#+}dzi!jlqQr(R z@{M6t!GpOW$F}kuAUcu$k~3Rc>i*AVi25OQO9E?oAvM$!Hrm`B0ZciV@npXEi?OyV})B6E(;DiHnDTw z@3%S;{r2a9ZqtO9E$WB7R^}hKT~Hsfp?!JAKR*6)|MWtO))nc}flZf`_LYoI2PXic z!g_ZgdfxjsQGwFo+%E(AdMkls&nBu@~c2WPOj>UK=+-PbhA{9ja+8 z=wJImfrE%`2T&#_pnzf;BX&ntR@MOq_chgrlG@IP+`4rRRUFiZzL8Nwj#;@VMv7r4 zM$f;sOZ*D=$1bFSOWg)C1|V*HdAQ53?^fRe8;q*9*=^D@SCZtuQ}lC!?@{~N$oAGw zjXPQQ#cNHVJ5~`T2VRErE4nH5!;pp>6!+?wtJ8UjPdnnv zk)oj|LxTLs^OLe>5lb10Y|WRQ2CgKr+ijLE1w;bCCuqzxr_~*M}?iopzIm?m{~A8-7&gRb7v193rp)`Vt<7WW4!AC=?IXshc6JQ3=-Fm zp)&%P{Hc5gR}soIMld34fg}=$sq4(gqv(~eyRv(kKYV6r_>d!&w@1Z;YuDDk4dpqX zzc7?JsMp>#FZiwRaP^d5tFEA(nax^{!ubS~C$r3kmJId+b{WN8XJkBln3)zh-c5xV zskXEPmz6zG<#GV945KMtVE)_wnQcJg6hHvrF%p<|(skwklto5)fRub}goB1~lQ_4r zx}F!7RleMaO6W<>(Xke;%czfA zw)mrdgM|g2H3@Fw^XHsm4tNN>efDJ=io>G^FW|H1f^ra|DAkW$x<0hbHXiKr{7kgKPwIYUws&>7iBFb)-g>U*xvveU?t;_D zr~!+elv;xQ3OS9oxfz4+d;Qoqwz2mW&YPaH&tQD;>AAIZ_mp9L0Tjf z1Ma`)S65fBSN-vUZ-JNr@GTEvrW5!-FR-O1B*>OzdoaR0!cf8W_}rB6*2^a9oC$hA zwCZF%bNbh+8%2Rh>IK(g8>3FqDTA@Z^8tZ+)YSTsddjY=4yTxIl)Yo&W7@}4!lq6u zuo~BWC}n+eMu!7d9timQ9*vcxRsAbHCzOA_MZGOv353I=EjMNlq9M8mQ5|sdQQ~#g zG&bhKv$2330{g6g^@47+-0h;2kPI3ceo&tDGY9axFmPcB5bVWJH zKn5rcUr6xNh~!^Po9O2ZMf(!$9d=W*@!TI`T`(5=Zu9w8&XWrHiz54&@O}Pw(Pd-|t3%Z9jTsOe>l67BzO{Kxw%c4~T!3!i* zOij=4$7T1w-cHXm3KF{8?teDzm@K%nVN#9tyb3q_QG0dv1h>B`*{77vJh-ygiwURS zN(-C{-`-w#Z!vy~Yk%!&)v{EY<7$Gt zf{a5Ko<;pku!#veoxDZ)!4Wh5Zo9}R%I(`5k~%zcx@`AMGT-efwEWgyT`8yG+{wq5q}HF!Bl>HC}c(^sLkzXq-SW4^HId#h^fi^3bXI(~BMNHiHzMosKeb=VgD6230(c)ZYM?f3ho;_3d zzBADpXFnK(D`@{URq{pQDgxtqUuX1Cu=J;Q50xH5+a6Dp2KJDZ5gvjB@WPayw*?6@ zc`%Yg3ob!%bK}lUKdEJ`PNo5SLIV*H7^q2ZlksaES;>#7PX-O_pqE`-HwOw`zk)wq zD`NL(5bL@w(SC7?ta}&fnv~M?r)jpEB9o=3t*|X(*QSa$B zV>WTq8c%u;OvEM$JXOp!YmFVKn2{Y)lS=ezon0>ij1iw2?<=sYBlmV+NB%>!w$&i- zy0w}tP)k6g2Ey%@VU`u>ag_S?7nKKu69071P-F;4P)F&c|D<2(Tov=c(k(GxUFwDY z>$h>pWwD-eTIe6YN@K1Mgn&c&SiqRdjExC{tdvW&kfq=l|;^H;2S!Pm;or*a* zL7zT7P?a^fcI_HiXi*6XK8(+NTDOtHKuh6{KpFK4%Ktq()|+TxYF|OG(1ebFFxH6D z{X`x(!6X5zB7rSVV2HwE)k9RAYIDhs0FU5?y)UPobOXcEr1;;4IJI7Q-E%@CUokMo zV?&d=^(o1HVzKShsXMjQUR`I;ViuBk_v-0MmQ9;{TDKIhTRvJ*EZ|ZlpzIvI+%}d!Yb>QH68CQ^Gm-w!B9BkmZ+l{DH4LY z_i47_eSYbAO6Q7m6MYJ6O2oF@h)m1r>OX&1soPFQUSV`GgNgb|mYvJmweEEmzP`Gd z{q3EG%bol726+Cx;PYtRSC#TAA8acGLI|`W1Ul&kC!}u|!nKc~SBlW7|2<>10puwK zd>B6mDS3v07}6#xA#$!mU4iA2m6uoin*Mi=`lE-blA0}rOLnk*r&~U{SG;JKM>OX; zu2s>N+|(qwz@QjIp34E3%B@h+-VXRIGu)E8b0%Eq*Qbo<43v5lP9ODB7{NzBbfp1czqJP6F3K`|H%2NUo`k{6X&ERNZvgX zTM`KehWbV`HM_~yU-1Gt?)GkAedrh`>%}ad>grOgNRRS4h?JXtQl<%RyiwGlZFWnb zphqAe;QW(|BGN($+D6wD^S$y>SoZojd(MTw!8fH!atiWt5R8I@jG7 zpEt}}EpI*1C3|tU&pdUXC>`v%VPQp;^Xpf(tuGzOyJ*y6$)~-BiV7pkKmV<$kPiAB zcq_L_OW(nEA+P_!`P)*X;Fe zO{|9s!)w2G4mm&1D}H9NUb>YtBd}G9k7=7{$ydG1caI$&QpX;>(fCDUO8=MFT!-AA zOS|;0WvqcA#3UtUPEE~uN2n|d&DODp6RdL=4=CBKT?M27kQ~$;xyN_kyT%`VP>S>* zfSZZ460iW2rZ>XE>RYnbtSd{>6@|@Xfb)t=`9SF@Ntg$23n$+`BKtXB#~yJgk|ha~ zFA|R&dN9m=&ET6d`?u}4`uK}gbdRbC#NOI&FTXyhIdEs2n6Ir#)`iQaZDL2(s%Oqh zi(kle)3eIjCb$_sKWecJaZS*(-FdUdJ@ktlUy@-hgq=0?NnFDQ+yMQ;!iaMM2;{DZ zhnyhr$#b1DAv9d`{!zYN>yB=g4fqAcB4KWTY}@shUNTR1Il$K zUd()$8~;FDRG!|I<6Ix$4?T=RRWb#GB=js+@XnEG3v?F(5b!C;dH`}webAB5?ix{w zggZk=Y1J8HByp>Nin~%(>h|C0o_fj?XKl4vua1MQr#rxprC z%~U0{BDAcZ36=Jf+7g%{W>hGJbh@Xzr!X2nE27kG=Tx~naI0}D4V#3G{K9*6*Ix@3 z61NykW3%0`WH8E;x9>uf758H*4u_WY@sA8xXi!LpBDA{5U>|^Cheq6rAg-Q z-E_;)3n6+FfjEjzgAXOcZ_ z*iPHB|E+1X>0_x8r!MCUBaSjDSDw6i`xZ*B4x#t9XFWOR`xst2vZ-mZ=buy$OfKAU z^xE5pW((T!CKu@zesYY3J$NKCVk558bNy&Xh>GQ_DVm$q1%4uf9o{&# zMDMU#u>Mq)k0MOF`=Cw!&lHG5@EZba@`r4=>4!38RW98B)6C4^)J0P;E3irnxM_bS60EUkHWMvvmh|$Mn4|f=T!d*HP`;VbRf?K*N#05Xh827*HwbWf4&McaC+#99mS+RbU3m zD2T0HRgV2@*C-GELxzR$>)9L?rRPPFB(F+boX-?q;u-T|Jed%NC!s#qFXvQ^JwE6-+m) z?gi0EaUINEJU7wsYAe-)fal7}54@+kcgd-FfA9WIwwV|kA3F1?x+`w~{pSdc`vscw zDhT@LphxQh5yi$ud`Fa+r7m9+;^s-yN&F_CrkZ%|J8@iNl~Qu!Wvl{20PQQH4u_hu zj(Ac5;2s#`HY9C2Oloqwc2#uG&DRj|H~wFZix-7bZ^g0jW#88*)4e;8`OKh|Z{7hcHb z;jy|BIN&e}j`Iy-Wgpm}Pn43lbZ&U!=`_IpfZ1LU3W zaf6l{it3H1F2wbOe{^HT=?FMAV0QfkO@%drS|BRSgiXgqx1oy{*UIEV(R_2a|Frqa zbodB-C26ow;eGtyuE@d7VfgFt{*jby5=a$+LLwi`&F; z0II>97oQXXJVZ^}jmJV3hM|04$dx;YP7$#iHFkTYvEsvZvb!1;qM0mc%k9#V+8rWu zv^uf?i=h#W@Em)|8h!`f>2kHn+sCfSQT2OGPY*@uIqGr~uwBV0!H@N|?4y}9e@`Tv-D4{)ycH+;OZ8di}#BeX<`jI2Z?BZVjwNfIg{J1QZwqC#dy zlD)E$N|IF~GE>*aISz?zhYy zL%lofb-0gAg&*h?_c49m-zxdxhyc9npj*N4c{OdD;$V+s^S5>M z!35#7`8%iBLP1QQk561#8HaBry|Ym4NQ|2* z6x9Z^Y%~1tFISQZiq&-&Cszf|0r=Ui1Y>~cgu~ZA^n&Xy8b==8yyPjD(y_;?o3IuI zAK*4f>9`ogtA`Y5>b$+`VVOqh&@4IY90*3oO(bg2-CTfA7YVy21vQ#lv1Ecsg78y< z3R;arF`&bC%zzg(;ir28Tv3SK^Jgswv}5Eu3^K$}pul3sxlZGJ+s7QLXT(D>v9njB zJawrz(A2w+e={(x#mhnMJp2Mzw78=Myb9u%f@?Qs&_z;M@lYa0djcUKE&4gt3bcQ&w>LZ&Gq&~*bHG}_VKTflk zO&=RGwtdKQbH)(^C2KDd^Voq;GG~lvojDVgdG6!q+t8?bS{j;LT^+E>GCCx6HqPa# z@_jQ%1Hez4V2GOj+@QR~Av zWe{ww)HUF+kfcaWO-(rcASmrOMH);4sAa^q5U?Uv9&nm+$&F&aJS8;f6y?%koK*Jp zY_OoZpr9a}*^B0nd>k~fPR-rAhHtyzmga^Ve^d&;+zg3%2;@p^#-rWRzp%cb*p&{_0Qu!9y*K;& zC7_dAGc)p|3i_Uf8~43KiRS}zyqBf;*KD0p^uJPJ zn6BTRegCaZ?7lPDRYJe;MRIFnis*B}4zC|YU9w&Li*8eotha|B+@yY2tnOebeR}h8 z2ZyQH06nltv={mNed%->6g=KhF5^81kbvEiCRlpFVtS(YGIE_S375lu?yYUX0|3$P z?#oAg5B!|EP~LkaU2OJy-HsNDXDtWC#W!Plce9lG2@lV`QWiN?fu2vVJPNGUl?uNE zCp8;&$>(}(RQ`ErR{YVG8_rcKTQ>$5cpN`2ZCkKud~VP+=KbslMPzza1A{bnu-G7B zjHY>%?wlCgGE@ZhH(Uuzpk5T&!d|Zov*OhnrxYR2*|vt5KtgI3JVJ&S z%;k9bh%rjI0z8;V1fYlm!-39s_hER0JyeT%G0tEZqP#~mJIT$6n1(NqFjNY}y&iK# zmiA&h#2N4<29M=@@O!u9|D=@?FODpekM0E~*-TB+0O|`d_LRb4HuB4B0-VU7_6HlR zB--&7E)On3BHRb#hhCUZ4^DX4!wv`?B;bsL&U*+7}#Qy9+iv=9w9qOI(OU z;qO=MF4@EHe%q?1ib2YER7*?fqA5GIn)&ShFq92x zv|+8}()iqhn)a6yp_h}JI~C(wH+*|GyIW-a&Rx;}Y1i-8{NC~KvV@DM)1^xd$J28+ zS6r^l!J?Z?RexF5u=x<88cB66^DcVJp)2+@HU5^(r_{I&PTCaVzMD4+?w)&~a*&^P z>~kZ7{G1og5w1JR6=W*hkt2;o3+ZN1KOnrvK@VVA7mRkS$k9-{p~FO&$hC$IhfAj+ z7bOXhXjkmkXm73sO_@Y(fe}Z37l9q;&eO3RXFj3hQ=zD-Nt?Y}@64Icof* zbWsp5(|aok@NPfj2tWAy;jI>-MWU#OIRaL+=Rt!-*)1_u*Mf#9a&L8a?$?D?IVmXA zz@gm)Gz3aa?Q*w{-rio~05u0)5HdAy1e?ru4iA7A7n~9ZA;h!6$O*^l?Gc%FWRlof;(`B7 zVhZ-Js_oe3t|ul)RBCuCSKzueYZ&Oho6p8+rq{8uFJ{qgL;JpATlLuIwSFC=Z|duV zv(2bxQtWkh?AV)g2S9c7A%WQQGhacTDUU zto_aw$C|D?5EE#m)D;=Mw`Lz9I>3YvHr5p)kSl;)g8uj2fI~Jsmc%3Ty1F#o*F3Tq zElO{Hrk;v5G_QhBQn=*P*ORt4;H5m!deLnQ|M9c+f#)<7PF`_*+CFlCamnY@W93tY zx2O#$I#u=sQm zU2%mbq=w)fcubsuoal-$1E!x}s_}ijO2$w$jZy$(2o{BgPni659&k7(jMRh^sdK17 zRZvitdrY@rqJ&=M9;Oa56oVm7=5R7@FQFRAF9s1)57YNY)tEXH({V;AU9pplyop9~ z?RR+Sk|5#o{7ILRv~PnRtff@i{HOMsY<{ia9MDNhddI?q!KqX8miO)_PSd|p7mofF zxfZUdVympDYF!D+>aNZdPPVzf<6I#Z%%&c4mZO~9LvR9SR)svWDcsiK6O&lo?nA||=`_$&s)vpSR zK{$zRuZfHm_wX6!S#Kp?cIe@VqVud}kb!~0;=4@L#S*=%x)@mro`*yUF)`iz^5uw# zh)Ez5iwiNbPOt2uL4GHe-rAlEOx;XdLBT}537s`Q74UDA>nd*q13w zGGBpc{^y*W*R{wk`V2!Q)UYO9i{M9sx>SPBkRUl^E6zOc`9`bknhkqYaJCLuQuDXh0XYNO=1c8)?voqkjzpodPal z$s9DW4h3Sn;f>SE{~jn2i_@5KpRF}RjW(B&EzHknN&B`l$ziPRxB97e?exl+mwy<<1qdcHp6j)bn-X_G}-hUdp_{hC_Ourr$DdhQc*0nN_`JIx;@ zv<$gUH4V{46hHaov2lU_^*lGFyFtdtA@7v|il9ETamzlaltM{|CC| zuE8f4j73-#zcH9D2lZ?2prob(CHcnzfsUcHz3R6kt8Nt6TRm5pX;!t#wY%)6moTZP zbJ0%O+`jwJdyc@^Z;o}%c7xr&B%f~lJU1&Z)r9pjm_OMvV z{d1m-6*}V(w`76$Pr6*f$s@}+>^9KT1z*arSfioQ{lm`Pv#n#NqvTD|Y@>AgJF3S$ z?(;|=To29Gtg-l>e4*?dF8ShhcU~%HyNViyolw%TVchU%O`UYd-ImrJ;^J4|NNPW$ zU9MliEHTVFbxiGf?AWy;;Gcb@k+&GH?4~YV>s#)FRe*BkTOF62VVmcCvPvi7Ajo?dspS(ldDtoQi&s+mQz4_q zpEn;HhzQ0DSy#`eKjLuZURjBgPvZ`t(l~ZGtJSE{dRjsf7D*S++PwR&tt|NVXUXiyS<7Er2SOeLi!`EZ6Kq@Ww$knfvDiLMm>4n*|cG#}m$P zZs!e9)H6nI$Z*5AjBGN1cw$Ec>?OwN!h7!fd>XLU?GB0j0wo7;dyvy z%>GN(=o>}LYg`bTsmAGaI~Ic#JDMM;n$Yvz)?ap{H7D6PAQ?Xc4m7HUbl+8IX#w2g zQglM#NHRnF=cpu1c&4fIYhcXsRx zTCkLbe;~F^>W?{$Pl_DXDa?2}=ue&P|Mcoe%QZn}k&zLPcefgEQO$zVW-cB3LX{T>{G_|xA!dSt!LfF`sLt-b&#Cs#F*P71 zBSdi91@2Q8fOIlbI0z0X)q$uv_E!ta?d0jZl z#X4qhH9zFk_u$ZnX^-Zt#mZ4|3wXNY14PebLk?A#GVT3Ms$11>>mBLce5oQqQE6Mx z;n&)}mA8!2EeT$RCW;u?lc?~Hx~MvMIpfxkhEdr`a1h%az=jn0e+KPVYZotm6P`oSgI=!RasHhj6R`Y3iM7`F{gruiji?xo=t{}N6FLk?j}VgW0CiC))sP4gy~i`4l*e#rN-vR zKWWe1=)cn#n`VXFl(QQxBKqvi?+=(><}(k!4h%o!P7QOJ zY_e09&#dzkyd}!A!7ug=-x*t{i)ri`RKZ$mkNM+&EUTt2UCMIt*K^Umy!E#5B`f<} zzDqXF!kj55pM73}7vLGudM_2V4vX1Q$C%|t?|*vb6S}(MX0Cn$ek6Ri?NV~ql^>Gm zlVk{oBYJ(sJK7ISLF`*KnAP4WY}I%xzTao!cEe8dXbg@}!VbYWDH;1Vg<_;@NU5`_F)f|5g#BmdOlcNeGHr`~ zvjAu$)(zwVArfGykpt>^F7Q#QyT(HviAs25@oUQj?cKA*j`Nmm4iQ)7*ywY61_tDZ z@NCZGc`Z6PvoH>0S{_ zM3QR?0c5Ei3=0L`2Y71(9o@*uD67vC(=9r>J&QSwm-y28nSHDOr?^@g^MPo9Xq+i;&I5JWX> z%yD$$!LPE}_ZF&lHA&m{k67Hi1>&plzeiBw*U#Bd4IP~j?O!fH%!>f^l7%J;#rTGR z?rPQa)YJ+a$((DyKiTd%u~SB_hW5rPIyx`m$$Vkg5U@8)<{a(RS-#I*h`E&@ViQBEVV>Jfrfgl)u2co`%$Oa7^Kdda^0zl{NN%1`6 zX9LuBWs>D93%f2xT~*B(DRtaWqsQD_qpQYEr3hphyE6=7j+c7#3id_Ty4T%h-%$sj zVj}+su?k5Z;dYKn+BhZFou2tDA1!m^h216U4E-ld>)u4LcyQeUax?NQ2QaQ^BgTC0p5~}g_Ga3)gB9y4_ z7^$NjWMb-wlFoBaJhXfF=f3OTdCy$ibn43T<|&^;-eu`-O&b=F7E(44MHGPEWs3H< zdm+vN=^2_aP;xW6zK$^nQ~ylMI58ETZrXJt?v=YxgvRr2AJ50dmEDqzq#OR-Jwq#W zbc5gR0}fkrAbt9hnpE9-ldr<;#Cbn`y-R7R1{q&(#qc&YJp{Q?>tgbT%L&_* zk8e7&o~V*>CsGLzR{QiohwUoHjBW=RkQSF;%=R2gYHDh-eeevx?C;`NJepVQ4wF5Y z+mlME4u`KH&7Q<48#_k%?#-I6Klggh6GhQUef?`->P*Sxq+{EOsf7va3HTlq4J1WU zZ;gC^{u}@@3^ha^jA}}C|Jk}C@ZE_)=^8mO59MW3M{)5$a$t$oo+wv{0v!m7;4+Y-1yQa~wx0M!9D1fL z0}JeOouS&J171=?^4qsRuV)uB2}lA-1n=ngi!t7ZeL++4c^mnB74pohSY}I?yuX@| zkWgh)A^rDncNMSfp0=-SM~>%gD%A?sNR5{h^~yI3-WXPz7WE=q-+!l({RnIQc4nNL z?V_wlwN0LNsqCq((~t6(l+pT}V+b$b#_#<% z4F?U4Ga$y?-v(|M0bWm@l(3o@Jo?7iehlv%ch6CfX?4ho8pr*Q_taI9J7N$^2Vlo- z1oRN}S?%_fbnJ(o#1*OWImnZ-bC3 zDk@6Lr>oO^0k;ACyoo&ptu}~Vm6b!DSlNYyj%(@9pWh*N?l3N6hh_lYVlGoR71&lF z^M`ok%aHpcSVEws1b#7zneQs-Q-9IvIleY&8*wB7Nmx~Cf+0oB!@;tmx#09GubVjN z8iI0y85jtxeZwNBHIeT?-GX+7Y<9^gob?VLs3-r2Z7=Yziy)9iTc**n0I9~VjK-O| zjA$!N%fW!bOcjRa?>qv5;7v#Wy1WN@I1@Oj!B(k9u22yof{1mbjQcD2%%Y%o1E7XW z=YDv#1DJd!y7(IPlo;ruY~!qrM8ay9!Ekc`PIK^Xf8!&VL)ebk9|&JyMMf>RpVy;3 zqn8l1>gNc7(6l21pV6}+!^fla{k4S|ZoxbyRv`Fc|k;mf<;f0pr z9Hp<9rz9A4e{47Q)ox&jYRzDyrv^jU#N^qSawo6$@mq@a7X0d4AR?BReFlFP7|#XY zt0+BJ+e8yg6?{E-OgkZFGrr))aXS9BH$|p-?3zPDC?Ae;7t$ZK6W?{C|H>Z2 zlY1EaPxM~YKhdavEY?%dq_)c9@7$1qLBzRzr>$&fngV9)G&-{={?v_3h^f+oq;=!Q z^kdPdt~jrV6%!sr>XVGRe2n)^po&DzJ4h^lggK)~uo7asbr7+DLqZ*HyV$rreDp{c zE*6+K7N!PpJY3L`lyjeE;L**EkM|~CbqdP^+Qd%@4m!HJq4h$2xQBnh4;jv_2ifE< zDhtIkkKuAf~zflq8lKNk^^}iWD|@r{WG^yJ23$uHjCJj!M7D4HmFhZ3X`B} zO~Sik!L5p>21^DF%sM1j#S9wrgcsdIr0}D#eFpTWh?x*{7+F}&fw_nagW0KUtPTg! zKY*onpDA3hAO*p_jaudhQgW7I7{&SiL$r-TFoAGU6a_& zZuoDvf}t9e_Lw8Tfiu4Y4%o-^MV44clF&$WBf#?Z5%)0i8xm5;%fW{Pq)^6Qj&(IL zxF&OqQ+)x@Li~VuE8(#+#a`TK^{*R&^B4G=SSe>`jp&Q}Gd@}HUYi55O6;M^OS|{| z3FvLJ0a4I_E!xh}dahK4@{aN?t>mg#26|x}8#ns>S&*E$vLh~DIjN%4_HwLJ;IRFz z-3d~h>({qj?_|HC!v5pVFd$+NhA3zF3@mS%@+l3MO*q<6V3ZCYA0YORp`B)&qVg6G z!smghA54be3q!xt)jn4&1W7&deb>wNUX6L&$seMK25gY3qobEy8xfuCuf44>b#cfk z_qh6l_Pja6DZxFDuH?-vy1GqPOqRc22Qpr6zNA1+Lzb7At>x47WnIOe{^F_Ocn~ZuLp$CPNt@(Z{M|RQ$piM5Ubx|AvupTYOQnj z{IO%ld~-d=Kg+Gud#+RhyT^7*4+Agy4u(>bdK8@+7(vvMQF!cP4T0T2Gs*&^bDMy` z4nSB~F@o1kMisK^A{O6k2x&!Ed}#0f{W*I4&HquRk3bwvob5bU7kP1$|7IEm;rV+2 z0zz!PBaQ=eFa~!6WX3J#gg6tnPUy*@>b{LXLBgh@f7e9t=EgF_p4=&WK{J?TC^`rb ziVq^5{Kc6~15Fi=3xITlR>WnGH8DN9!H$^Obz`@-Q zOE3!TCHY|gztbo^JSvI{uLA7rh`ODK+1+4W=B_rH&er|tHt==b zd?r*CI@_ufdWJ<0+CZT26R)Wd+GA{U&TDRL%ifJpLBE)on7w-EHTi46jL$3&Ve&E# zpNrcTBoq1JI#W`}$4HjLX)zmy{^Dy8&2}@baN}v3Fk^gq}%Q-T%0OZY!E+UG%8xC*!Vx{PYw1wwx=>AKGZJ>ETMj z(D{PMPyQPdQ;atRYf$?7lrWf{zxXiW*bkwC=$Oxa&xhsT1;8F8>oq2I9_$-|T?sKgxZk6*ea1#F}?={m=1% ztBBMiLbt|!cwsuTe_e&u5sSf1)bGIDyM@FCV1?Wd7(Ds>dvV@JBh@hXE~E92lUN|d zg$V8xQf(YeRt~@LrGP~*tT(=*`4`jFcIJMI1N6ld>T!G`lJ^RCY>kR!ZXLM(C?rJe zzPE>h|4)u!c1I=C@C%@BRoB-G^Vm5p>R$(uN7-X(AN@ML4hNOOH{9PKC{Yx;DQLqR?Z#14n=HeH-vS5imCCP` z$Bqr>m8vzwe7m?xgGA$PuxJICjWBnHpmLEhA#U znbR!GnflUYYq_%bbll39ZEm->^DvmJY(8@R(EBcM;BG=-;$GJLe`Q@(M4IVA5(B|v zF2oF2l@wdcwl~OFcde;s$0gmgWAE)wor4;uiGwak(Np8+^4gR%8_TgjJFU~=ihQa{ zskd%qEsWB-U!l6Y_e|8)^Si_c8BIB%&UbTji{UT&rT?N$e|ATI8nY5Nj)nx~1~jL@ ztzc!1Ghv-esfXbr(|SrZ>i^=oAg@LN`YzjqXfC#+N%CGAgYONxI($+xhKh8as{inc ziQ$C}-^YoG+q0PgBEJ4YuI3THjTETXW;OKNj6O^s>!Xq3h5`wwv&{UUkFc?4E-dOn zQAtP&`5A`{10;nm-D+;u2N$U0|4~);vqI}T6@b1A?9^ND&-Vajg$7;qw)k1Dg6Hq` z*a{A_Jl|c}{M6pYb~|+`kAsoF@ZH`pi?)~aj<&m#;tkaUUhuSdL~>GXlt{^v<+57J zHQX}Ab>o_N)#7%?K~b0#1gd_$e3VCNb$`@oo5_zQ4nY#l$2gVJ1r z135jTr+(pO@umO)oAA?d)ZnPJvbE(I2w6A|jaG;M!bZK|5m&B;Z>kg>rpen4n6Byy zVY8HCb!GdRM)#{?KFWc0zu&313{Ur~pWWm9=0W%f3d%^Ydax58fPU zJW~j2lz3!q3GgKtolTYgPp^ij@-5<28uk*RE^t;HGVU?whe9>Lp1=fZzBi%mxA(WS zo;8gk3DwGXPQigm@x+eQC=RuNHa|F#&(_~zw$wP-wC8&i&n;L527l;H$auIuNa}&x z3CW9;+Y`dtE_&dx$LyhON1GArX*qUg%ar zGYtAYVu(KdXq7ioLqVYw=QTKZ!b`MuLDp%8?aMuL*gw$I!x7Rs@i6;l2*b%vkB*Mc z4afBVDHY!5eqr~;Cm?W`xq2(t47o1C00s!OgK_CPPQDxap_euz}OijIqd` zk=&FINy6!pjOk>YO_Vz;k zv0u7cR8voekyZOacm-2zXxyWCDUD;RciHY5(rCjm7F4tt3Rb+m4Xm)Ggx}iv^CTD+ zy4(8>N8R_CEpp!}s+u{}Hl~=tBA2_B7|R35xTCR^f4V~|Aor6*O|xywour+qIZ}?F znhiwwMAo;py_ArWYWTjMyM|}D^=pyWs1@Gth-oD>OL|%3UH?bY z`28^IhCK1ILq-pYlx7S35K7+{f@Kk!QgTQhihMZoww4sb+W zZDhXVoL#$X<526ep1>7JtMI|l3&Bt0RPS(XhjKyhwqN0;Q<`ivI|lo;7I~y>x!Qs* z1=Dng8K>GBcW5iEx;@OdPFcz2^zU=`q#~I#VTbsG{)^6C=`OApXpH-iITgpP^ zQo#A-RYvl5t>Zbb{V25iIu}O2eY@~6yQ;LVCB0V9&LcNoKMmv|&gq!Ao^9}wB*uHt z#{o6T`G4!lR#+{$yzj9e$DEq*b-*y!su623 zC476XWGCje{oc>3D~0xr?F}EAr?)n>h-6+KyjmzXaBm|sj+#u_hWX2W@}IQY-)U$y zJ$KB0hL#H8%`{t(L4#A_WfwYE9q+Q6bCm9|gTqOIM;Xk0%+z<&_u{2?Bz8dmg=7Le zSIrZVW!%K`Po~#p5p<)?=5GiT?bzv8vE*o4OZWj4z|6n|4e^@#s84T6TcA{}${H~RHAsR~F)HlNSQ`(ppb!n^T|7%Ly-K;Uy$-ntdMnb20zRAEC} z`Fqev8^GCrQct!$`MNu=C%5?f!g2B6R2#)|zxgox_smChXyUprX z4;=Vym>#{y>BWoK#KgW#RSwYm@sr?9NN_Q;Dg@AU4qP*2h-BJ!y#9Zml2Gx)6C0`h zch{86v|Yw2igz*Lmkj zp017#g@~|-KdPzM8-7`{8s?_x^7Yuutmoua{8`C71}ndnzP?ByWe zhsi|kBGvL!n7?`G`JQTUI>-f_W1X1C(wxbW-FSW2cH*Al0s%tAu=sk9yzCd+CIizB za#8TY#v7H8&v^Y z7Db)i_{X-Z^SWUIJl@ROZD-Hvx!p`5aq+NK*p1zfCcVx7(E!ske0Z!>XvRmh_*f7V z86wnMP?jGKf>RpSI*%Ve){+!glgOsM-12s%f_AUy>GRi5QHnG{%Mit+CnA}y|Y$DnPC z>%i`Q&o2r}3ga)&F-={Os1{!cqseH?u>tz5ABd^i7vT+0pB9-vpq6(c?)^^RS zg(ZqA{1;v~0^?#X**;MvVJJBPdtrZ1{0ET}$`NRpSov3+agn=!mtJ{UUywsq!-IhP z>1-CPM>QIT)H4NPmO!GGSR*9@HgVWHZ!F{W+E>PI5J%&$O;+HTNOZK%&=}|9Z?rK=qJ5RNZIta_X{w;})k3q2^ zgL|vD2joUzGEh=cZRY0Qqw-CF?R0q7kCEKGylIg(xF}nk20_4Ka|azY#m+~cgdg7! zZgX86(3(QZ54=IJAHxHt7h_w%iSo_7^v(JJ($w;d_Q)rw5e@?EU@%oeq;U~sokMl^ zWSVFt^i01QCwbV#m&wpF8`(RV#|X&2F&WM27k{^%$>gkZR?Y_8!b{!!pLgnw_9+gH z?i{>KA=tE8be+v7t(AEtB^}+h2M%1Iq)}^V7V2@I@38%zx5@Hzzw}h75LWT9E`Raj zg-g2A^+b|rR))hKCUV>;_5ZuRs@u^euUbbFfl>))!Xlz zOR}>;RD8W#^ztUY;tJkUE#G}|QG&m`aPs3r(S*{QljwA4ca5!h+2iu%K4BHRY(pw?2E;V&SaYsBWiaL&st zW4k=jdPo`NALs>{;y!*f!}aZWcAen>#cY$*bbA?2Q1x|J5I*WuUY4HO;iMq{>|+UU zYF0lMOGjYuT}JcbgV4I&o6SZ%q@JqmTG&WueWTd3OoPgDf77>CE1Q?kvl^AF#2-b= zLTM}WW9Gh)pgO#Mi6;^mSPb*hHi?9v0Gc5QZ^Pk=!R7y2J+8YI%S0}64e%~sgtUR{ zgtJujHjaB75^HJn%VkfbzMA!^_LD}Z-UAi>kH;4KF#aacB<9(ai+5Jp1cpo)0gT;x} z{AEknVvkB3h0OO4)BZaRp1+`m!FK=w&eHNv3gZloP5IYjNoB`u6~li&Q;2E%{_Kdm zF@L7QS^3@f?TdNYIoR>sw%%0>-w<+P@M=PQTHzJCmz7;y;JOe0GP#(gc$WL5LBwWe z3x2`Gx<*{;nqyC9=x=e;kh-l+o3R z;`N+pRO8*u#`Y1^OBl;=UBg@k#1qX#kb{Wt-GBUiifn<7eM=^+MFIHC|}fV#uHY;LXViAZs0bjWy=)*PHkf@ zR`{y4)G6KZ`6K-sWeOITkd$CNa^<>dglNWrvCky}ftH=~rS+4pESHoOjwvyU?A|>r z{Fc_FL5luxgTh$hhJ}uf4&WqVPoK(cAJW!z0E()gc0i?jX5I}?J~X8F0s^3ZHE-WI zS9wI}L0roawsjS=Zt}eJz5)lIY-U{eeeI@Bcd(ydg_)R2o=zy6Dc7!H?MTV=7`r7u zi)Ux2DVM zQ6HXZc)_gWycn>sjnS0$Pjf?z_ zWq_1WLPAFG`3_En`(6#6IQswp(*Ct&*%=F_OvByn-~j3UU7M7ZwfRA{M3Q&ahRQx4 zI?nat>*O__910YQe7o18?#bc6Bu-Xa>mY~hi^o{wMHDrq93v>!tYzmo>2;6)`1{Y3 zHKDuQqLL6=%HqOv!P0VO(ak;nI`Y|EmNqOvQlR(6$$8s0e^~Gya&UCSyxl)K3Zjjc z^;*x&j~4sLMIF$q*v~gL8`z(blA@T=f=L&xYww4&v)g3W8fPlLTQThDyK7xev2wY* zhO=|^gMg6^qf(Q>2Ol}=l8)sFKY9CaK0>T(`1i&V+L2wlSN2+Aie zIp>h&#aUQ}`hK&fWBL1tEe3Be0h>UjqxUt}To60C$i3!{nSY5^?)jqbf^I(*?HglV z($a~J^8yp6c5b~^R5H8&T5;nhzAY= zb^$UdAklgPgAS`6Sn+w-6}xRL8~pgD_iDo4o$U0-#tMhb%?sB~4>M9Nw^p5PP`LK%s1DzQ z^}pZTq9CLU=TZ~&M})(*HM`w9j~JJK{r2sS#`B+8w7$347ii5}VzM_nI=Ui)Z#|%J z0Pf%uKf1lRITq?4T;?4+He|FMGcaJqpTXeG=YL+zTLG9w`_}JV41IqLp?13~;&1*Z zOMzV73(ThO!QT~r?4~?U|DEBE>7U{KS?MQ2Ej}v?>HlO}3T<=0;(UHY%V_I5m6YmN z)noiK$8Pe{3CY`Qhhe)^+!LRJ>s`@60k2>}R zhJX!_Ega~wRQ08~ndFR&JIfLIB-x7j7>!{UNB9z82Ts8AW*pqHC@d8oJlbXO7yQ+A zr7;ycelzS`2)g`95>lZv8_5Z^*UO1QEeU;9;N!u-+si zf?O!@WQgr6?cMcHA~qL-cz+Yyi<>9|7}Ir|@^5vG=$uBf99avO&mxkB*JF;Kuze8( zi4F3$_3Q9m{_*rdhM<_)K|&sIZOxGnglIbesW5He$DoZ3T;{dk8pJ?WX?EVubCHwm zQ`h{@sAMv><4b3|q^=cocuxVpa zWGQC7p|MjlMwPqv-aQ-R7ERY2P9>qA`c6F#MQcKMryC=xDgQ>u9Q|ac#{co)aapD! z#+^?sEi7&?IMGloho^K*Z(2z673q)hY^NmVP^bd<73S-~hT1}YM9@x1GFaZ_mJ|B= zsxVv0tOaF&BqBUf@RToh8v6qi$zR}RvQ3mlYY9V9P*r7}qE;DiZx4VT6zOa(?|7cP5 zecZu;G3Lo(HMP0rZ*v@oX7Wl(l6>vL)Os)2H=IQ*GBV3>-@SmL=>BWPJ#W@xf7kCT z)=ZK`JXbvc!B>?u{Pp|e+O^1o5H_L=U-~0e&OS6sj#5oS#7lN&gzND=$bbF%DG_Av zuRG{N#A6?S+IoRim<9gw=3;uTgZ5rAOTUES7e@gnFr*Q`N3U;cS`V$pY49V+ZV$^x zzN;bF!}$4O;=PYe!z9WMgGLkUkx)oV*V=<|p-TH$di!THL_=la)5Wg#@tS4!PGVO; zo>U0sHdEfhql@*OALNVwSy~Vg716Hm^_U3os6PvQgOI?XHew#XOo1DT&Y5sL{=!L% zWYBUIcom@#XU6CPt%So&%E=d*Mh#P9R3VU%7Y@BDhKtEsMF1j^kpWe`aj#y9I(vXW z=8bSD!T8_LVa(c6zH$H$`X)=+ixw6kSiynwoShvepcX{dgCy(91L8mWPx6y^FW0Xu zgOG9^EHd@_iU-W)q~terlR|%Z zuo`U+2yn=G4nt~#o7>W3=C(tPeSe&V_w1pIsjmtZP{T%0KJJDCjOkQlS%a&-X6*I1 z3SFQ;ls#?iJqm5giLTR|UMDcv%+DwCd{@qCTs3=sA{E>$q}@4qdzmC``@*bzJ@^UL z^*dTSqtFWg=!sIWk0e_?X zZl5s>-o9ajpL#7gF`L9L4WCAwG^bn=(;=2(7-3YZibCQ-Qc=ii7a<09U_B$hYQFwl z@G&0OVR6vWSpE)6GXve+Pr}JClH>RwG9|2}1?O&3Efm9vpD5F?TYp;)kAo1tP^&X2 zi#DAlFj_x>#8+H~ptcndDH5SDQeq@ahqXj>N*HU(LJEW$JY`&9r>~q??|@wHldkpX@v)IY})?Z7BqTaX6o_DkcY#V?gOaWE>rMlGNS2FVk(COL-gT=(Gqkp+5+#~95%MKnNfJ?>Wh1*yp%Wgq31ScHQ2c=4m7tBZ>> z-o?4{)kOtx*N&>G(U35dh2>6^NDyJT(YJuxI%&E(ZR*x1+qGc>M=r=#SO&-;+mm>U zpaK4iv}Tp9{ZM8e2Tv1$6|ln~@rpQ`Lvl0(g{Z@3y;MSQqA`Jtc zj}yuj`9gqx+1Ts`$h&!YIGE;e zUbpW0EXBFY#Rj?j{>#EI4mZ$#F;EZCbNbp{%-&$oSvyONOE@FvSQ zBBXn@AU&O5Wt>JVGVb-$tJ;tPa&vR*)ft-#J-CWb3kf=SXv>C9YRbw#DfM)86qS@J z601*qvCgY7=hzEWm82Szup0h|npTK;AqySO%-_C#9e8JW4($xhKO?%2L=WNdCRVYi zsTaUy;_nSt^uz_(N@zTnF7-Tf9j&+_@xYxgT=c94y6-o!qq}a2EZ^D0N*&NFEG$gE zH7L}K!S>EX%!gJ0<_>iawB=j1rLfE+(*yBf^6Gl^$uj3o?gp+Z%x+tlrz%!C7r$kA zP|XRSgt2u{Z|ME|=w*o97GpPgs0r(#+;1J!Hgr<$|6E}yl}Il9xyzGs7+lcjGe&Dw zL62A&>@k(#A-V=S8RDTs%+4NZyeFZr@K%YR0cG5j#th_SLKaw40(P{3w2j}@`+|eV z)~qxYfPTfEXH*OK49VsW=^pIeH}mp(Dkm^k-DZ&Y`m>_^+TdYUOSoD=rJwcanRZof zy2k_iT)+R-V3Ozk_(*;@TdPFAygxzNZyme%)t!G{zEVyakAIMQI(B62cLY+6n7=G} zul(>~nmQ0ctJDy4;|!HM8fow|;iC5P<;xvX69<^z(#0-%n8?QjV-pMalR~^%2<4%t z|4aQA9$iqGqh!NhOf61_WJh7YgWSNVrgg(A_%9(S#8oQsfW*=f;Pvf+`EX{r%S3lxk~HsVd1K0{!DRZ zuZ+?^SnO-$vg8tPC5~gTwkOy4gt$EhH^1I@-c*|kiEx1A2r6l7*M}5btv%4PX2ojI zUcR4V0$ugFEJLt#`T)KJatJ;I8xLlz{%1s?621*DCl{SXK<4lmSf*mJIuH~twf@r5FG zPu(%U`ei??K>SalU%N9lG1jr9>_y1u&*H9SQO2{eW22+=&-S8KryE@bAv7s9^%A3+ zh|d9<73Q;F45m?HoVZkcwUFDZOU*QJ%N>on_%S2KlaiF(61AF5$|*VtjaJ^9sSf)$ zw9sx>_GEc?)*jtzOg-t|(OQBchvC=@(Bw<~%j2u4WA9!8Pe?eq(}&_U958fDO?hub z;>8B59-<#sE-Pq?(DT1kXOcefsA0eXc_f7CgzgUlnu>n+;tu)vD-fhYD~U9!1O&aI z7U1ClG%73F_s2q!krKvhn{nv!6}c6Av=w2uFgr`G@K5K0a>kn+E%RM z5d_Zbwu;{5IV6?Paa8%VbsgPs{|B--BJbthS%VcfNofTUbFfdFH#1YldEBggnx0pl zYe^QkoWyIn!GqCt-Sb!BV*!6|>5l?$7(B))RtSv^KY?#kz{WG2II(=fH7KrrJ7sVc z3vB%4PAWE&_@;2zhSrcM;>dMGdJ|@Sbv3Xz-M?q#x?=M_ADGQOw?q0`RF z0hG_|U%W6IYR(Mch@NPY{@LIBNxbgqksPx)8rGPSP9?Q8(;GVdXB`Tcl?~shnKGJH zTrjb?ORcim^2PO|`i~c`l*7lb=uD?tL$Lheb|?<%v>IA!1&c!%-`EQW;X;_|;@fI=zpz)P$?0)h0-k&EPV#OUKm2$u3=Ma@8 z_1l_4&?er(P6US-;$a}mgSO%@T=rEHt~NZd!frjMm897M4C{I2kV}e>*1cN);8Puk4ww5JlDhJY-=9;y zVwr2S2l|TSJh}&MUhgl_omZt1Z^n{6Is$M}sfbHTmD&)sWKzHsWO#dS1Wgyf6K2JU{~l3;TY zTUPdr;Ah=$){;ey2@?5~{E7~Dlw)xK^~+Dpvy^;0cKAWQfewv;fd7C%P|yixLS0%8 zSDdTp`{0b@-TE$GgoHYfS0jU1HyT#$XCVeREkj7if=U0bcS#pA3A=IldzKFMT{&%Z|wreGU@o7iY&VjLM%%t&EETO?R6Y3MRtbScq7IG>O3us_T@&K;za<5#jLT1VHJbSS2<`jL7G>v zj`W(YBQNRal)2`DviIPf))2>2Oa}5oH);igc*A72D`Bw*<5x@A9PwY$)>|7~br@p( zUqN5S`XL|fj~HV3UTRI>nlr$1ggNJ;xxq~_^WQ_kBPY3SXcSYY&rhEo=XOtNh*eRH zbx!rZ9dD$fE2zs;V{^*3d?Xo+1qlvc{(fTwxK?ExeC6$d*gY72LmH*tEb0SmAA8)( z38)Dtk=OVHeJHL1aYpdbWivQ?V=GB4BpxF3_!A`m*=NXAjt9l+d%33q@puhX1^#y! z>Ar&f+UM~U*t)iNavtD0q&851BBef%P`g&9bcTt+ZyU|XuwT4PZZSvdL{?mLB<{F=ru7BQhR5Sv_=ge=-VHWqeqEDSq zM;*5K*?ZWxb>kh=?=&f|W{iJKUl=p-9EhCILQe})QSZLDlg|54*wOiT9~l!A45B&E zalAyg;LfK}>ND~TCXG@(rlK*v__xlKAIisPCvrSVh8`J{LaQxESa^Z61tVBL4u%Uy}GEiW%Bf-lfBYK|l2PgTG?p zjD6KM(CtlE{ziC}TlHgNluDV#G`MD!?Ly`eyR`r^*q4g!-pwmgZp&4`TbJpvgiBCC z8=95dxwia~CLcvSz;Dy#K?CBgKxiUMuoT=2Lmqm|9O*$xLN6ham@2x&EY)oO?{D&F z&H!=-C~J?o;4cBxRJiIgvHM!Ta!BWDuNBxg2-;giePFGGA=0mWT5qT6r(@YZR}27P z50II*6~PZQVJUknpzaE)>g_=vH2N@SE4qKBj1~r24)gY2hlNeOcXwF#F;UGp_L*|mNU*~jJsJ|evG?rn7GX^3Ys3>%)MEq7@mK*?yeIQ&H1Rn2v4Nkwm|Iz{0 zElf;Bq&9)DH;;>9hdsr(ODRH+GVQwLfwUn=(E_JEP=1ZgRC?=?yoCqr`k&AIkU3$b zAS!5cb91pf?dYYU_XKQ-VtM%Q8Sc6w=||8jX+}y0qHv)X3i{!3cQBPfO<-D}d-EQO zF$trrc5}DsJ4@?M$Sii5f+&98Rd(O$U+SHrPEwYQZ{v?tFWc(+`K@@?{$bf2uA|%D z<7Ga4v1&Uu_Arybe0j*w>Gy3Qr)2vm8gcncOIrw=EDHQ_|NbR==OetPj<*Zeaqj@S z)C-ZjqG-efXY3S7x~=a zDsDa_6j4+3=jr7+#3QetJJeBJTujs62lWv6(EyXL;*9a7EW`N&-<8hp(iua$$qB9Q zRyjU5dS7%l!XlRPkW682qbt-hd)sU#M!Q3{c+Bvs$b&K{j`$D{3``8tfnJZAmJHM2 zmZ1x{*aBNZ)dENbCa6fom>TmD9MkheG5`#w7WI}G4$lDz6R;K7zkmPZ0RvN0Z9uOW zck_&n12`(opB=hfe}HW$`Y1N9RI6IFw6tos(7aKM)0j@xE&OU@2&^@V*+=z7sZkLeV!!nV##be61*(dj@QJ>}E4sSYnUN0>5LEhKS!3#s2 zfpc#1lxr=->1{+k{hy%qR?>`mlf62cRQa=ObX~2(RQ5{aSHi6QB@fE-mHTbqJiN;? zJQJ1I3oAC*+1Vw-Dh~xX@Y0LklW+Pa8!?HkEH7foL>?KmCctEp+Eo%%_ImR%VlRO) z3~Cr)A=&WOrRR}joBGIWoj9Jj_pZDK)~s-SB8Grox(1aO3}<9@4^86{JNam>^6j{c z0?zi4($)GJfdltb_O4$i6I+xJu5#+v%0q`!S@!2RT-^H?Iq2TK^Q!NA9v{D&Sgj)R zc+(2a2ZaAJ=^NfGAy6u0Gjq5B0$V>nIw&eJo6FE{weIULw&h+|uwqEq^Z2}*95U!g z!B}pCrAW4U6+Ov!_IeRkst6#Onx;Kq2n{3xHveDe1lJ17ELKywUvuDq`otWwR8WjuU zsIN~iFZo`x!!S}Y*@I(sv_MYmmYiYzbZ_7pj2w}6mVl#4DiIoheHPxI`zEs>z5Evi z3oPpYqw77Ox$ghJamgyOGa@^rGD{+4rZS=>DXUN^BP2qREu$q#W~36LWo8pXMr22k zEvwA`WXy=ktEQUeD)a&YfE2Ec!!Dj8&k91~CmNBN>UB zkZL5U6h{?l82eGQGE$V;ZYTCTlSZ#6C_7SDg~56>$76ond@RHv!|LL7w}S58zfO%> zoU@B#RIj(=%x@BGiH_h})k$1ow*i$wjup%D;NE@K&l|pd3)W7G&aYhlmD(Vt^-}86 zxbbrjqfAjT;X}H~henc{`hF%weREJ;T6`l?|wLSU}UnEdYd!J-NLRwt+Z=ttX~q%qR)^ywY{o~vYA(W|b;&t09v>+R&H zRad7~)goUOC@&=6geMSVCxm0a@#Vyl%Jm7a`8<+7k7;s^FI%U7?&^(8m9Sw7Y2O%e zCgsQ!MYk)=8*O)DGKWM^k$5f*yneHh05OgoChWD(9@*ZFIR2u@S`VJLz_*~!{}ROL zBTiE?pz*x%G-pk|7�mDlNVM0N zI^^NAS?h(28XC`^T84z9Fuxf0$cv0m#B7G;fOJWd0%}qh)^dTADFBHQf0r?b5x|`swV4|@U*_TZqEg$(uC>*~`;OpW zZe2&pmb}VuNn@Irvuw|w7licENt=>ig+b}6|LK95tqKjX0b(+duiA{AMR#o|WK_A< zq`m(IHHBYANUUUjV$ZO;FKe`b!$QlMom0oZed8^w+FAQ`CfwOj;{MrlsZBpSrK!|< z4Jv*V;xU5<_LQ2y_SDQ z*Mw)V-QT-s^tHgSokJ_A-j{1is|!49w+W=BM_XHY%ZRj&Riw@I&b=@7l61|uYvLhy zC@LlEiVf}l*dz++P0K3_l2LbuFUU2e_ywDK%&svkQQXO0XMYFN6shWGA74 z(oenfH}+=;fT?bW@%3qA%$YZtRsS}kY0S{fjEOmifqNa$Qyciz@WVYJs1wX*+rGPr znma=CObYvU@4k9?5wM*3MHSPAsdfU*P25IAg}c=~Gxgn3SK-d$(E>-@ao<{m}~RY{!8r z7VskgKF{KufbT(ftD26GA|k5)L#~84`a{7#@7i@AMUsS(!R_rUpBWR#7N zl2UV`KC^q$?6z%-J=>W0=N3o8uTpZHudGQtpa0sSPaVUk8#ou5eDJ3X_eUQdm8{zv zbKO%>BAW7pK+L4neO6W zTzu68J_n*LOky9i5&MGR@si^v$;yaaEvX`RxMfc^@SWSaU6Y50hd9C03jJ{N4(Tc2c|%fH^qNUNMB$>=TY)H|(6slnsd9n}6KTr^Tp z-qK|2eppWINyIw!__3OQ7PhQXOu}%dp`hbI{ zy1Ky7jgM)xhl!qp;kts=0RfXDK-CA&ovQxfR5+V%Pr!m~+;|X_Kh7GzQzjy;B@_`wsS+Fb_zJ$gLEz1Nn%H-HuS30S}GrF=?{bzzk z0A>*jUj{q*Q<)Dh>g#{Lv51t?REQRkTmO)-Xv-BXSaLKu1lJ(cHjHi4u4O!Xlq+_= z)Je|53~iI0;3yG$CYD%m2na8Ub0ppHz-~#k2bVNmCO&`hkNC1%O4kbz5QCJ*{spXR zd+E|sumL!D@B%+n^C3hiZF52}@?i zrrP9IV=}>p0Q-ML#5CA0cO>gLDL_L&|3}V0RWF9)`YN=vKXB|PV|*cpu;yp(k*^t* zW3I?>21B4x)(b0pHKMTrofBrG|8wgBOE(?Ci$@-Q)SX1yxMGF!o-8Vw-NI?cDw0#~ z7;XjC%QREP{9ixWeqGMy%P)uCpeVy56XymiX!i0Iy-#TN$aGg^(c-71G->8yE!e(j zFBQh81o&R$LuB;xm9=^wC@q_q7>ydtlwZM~nvx=#o0qpt?e)~Q+prWei(|*i@oivp zt$m^#gX{c2Llni*_e&lgNXScu_g!zJ&wxDYE4v>>bo zH01DMhod*f%{pBa=zN%EOw8EJ14|0kt%Kj!sc8+<%O2i;AZq-Vm4yJEt=zN>dUd_e za?pSH@YngIVyU%^g6Cz4JRYEynwC}?Vlwv*7HweKr=_*J`17mTV)jUd670cP(YABc zko}jHODSNRow%T&j6C^)c`mTHw}P;Z8v_d$Eg@g^d@evsZ|Y-Zysr_>pSkB|zK=*I^o zR>51$^2K#8q+IVJ?%lfu*$7~|Yt6NOn0~1%*S+g>IuzJd_q{yiq+|2Nsoh0?Hz)}k zi_LHO@=$p4LoBc!!!G!STpX5Od(2;9C@+UjE*HhMn^xCP36$ zfKYzsu5EA*!vj23Q2bCBvJ=(>8FF6+x2^dj<^jMBRhOwxpKf{Hbx?##4co#U6iDLi zgGLimBMwfVuM7K3-V4y(iCdvV%_z2>hK2@dOYd%q(y5jcbSZ%&Zr6QBqeKrmEKni3-a#n>JtH{PfOoY46^daLk`6m zPT6%Q0v$z20syEo70iCmzWd+0RTlIE(L>0N%!}KgFJM#%;-kz}om~s@ZJ%)RaOF=Z zX!m+^oL|NINZfokdIG$x_+K1_kOjFPRp`U`mqMnIKaa)(yjl=D1$6SPt(?K1diYFt z%!Z%NibN#)2dn4i>r#)TmH99~TZ6I@(h3yPDT+;|k)IY`H7;&moDH@Ug2Gz}?7eeHl6!7W zI|c+kQ~L25$Zg((a&5~we?*l&7hV9ORw-QieF(9k8n2I$MGAH%q&JYCN6_aro=UPs zE4hyDOFQa>kuo;3f0G2}BIH!l{ntnG9!OGKp{GaHo^#ple=wZXl39d;w2i$&ESFcu zLY=)=DG>Bfgiiy4?1ehYMKz%dq%1;H!7eCzE%o!SUAeOJaL!d^eHL#IF4m>=B&v}M zdyI-Zt8bnylG7r3D86tpg;UDM(0c;;3oyRQ+-*rUyL&!%7qDSY7j!AF7xlrs&lX@h$qiiQ<;o z?L$LDT+VCWJAg>R=>h?iV4{P-Co|Y_`T6;EmEanKitZ4%LJ~=Z$6G+4a#Pd2--BRw z;5OVF#GA&yXM@+_$c0|12R!R+lmq$bnnSDWwz!0ZmY%l|+Ko2VOSiXIMb~Vj-JSS# zWKnD5gN|Xx#l4R`T_s}VclA!ss?7YFjc?p%5U!)8#U^LtKX59uuyDR$!mq8D=#j|jM^r)3#Um~XDP(&R_Tae6#nob(I>I^_X#Gn^( ziR5^6B2kF~DyKchkEdx>|EwXwEEu&(ngHHB1n(nS0G%*cFiD0U?rgl97W_@Nzemhg z6$e!Mrrd^xhXHZRi$9VwD}1cF)W>8fwh|E1Xgh%Z1QN|~``cx>28lBeE{)BE10+W) zcnWK4QNHj1a%sH3(pTa~pe(gWm-hrG7ZDT7#wbid2%amZEHhA58F`+OO zVp9Ou@)XO|!oost@?#{!;&txBpTuCzOTB(TR78ZqHiXLSV#bz34>CbY5QQ3fnUY#A z(3%=pE(t03=^G;9EL>NVCsTIrY!XOKBw&7WP8Dkp@q4TF^iRQCu8qaX(J?Ya>L5edK4&j@yl{enIK<5HabySuU=hl z^C>+3f^-jqTgA(~s64v)Vi%|&G)HKsA8$gytd&*&y6RPb5bKUJ?~uk z*!1DcFN9Uj)YV2W^MCfB|0zGexc9|#bN11Qa3u&PAow)Qwtgf|WFrE$8{yxVQ8CVU zb8LuHH1`E{c@6y0drS(%a0L+;Bzy3|N1b-%MiZf8(ce0I3h$lX@y`Zr$M~1}?-10H z#EKC;0nRGM&5E~_~wH!MV)GXlh{Gnn6ANv7fo_36p z(uz3HRrxrr;sH(FVz=x9F^4w?aY$DH-V*iPWE7yM_xj9ur`n50uI|JkeiE7h{Hv)r zO&!*dJi9$kw;*qYmXCBjtfGd2)YI+XKIFT4=G#>tw|wJ?nIAb#`c_ELS^6DSGyrsd z3&KbsD*_3{Om&kaCK1#=A0-zs&CQ+yP5-an1(*Sm77;zjjvb_segZHEI?mz;SJ4*nvSk1 zmE&I1t}XiF@Y_H382K*l^XC^Z7Y$@vDHc;DPHP!5 zXrI>6y@c#3jA_oU{_{Ni3?Kt|c(CDg-Y^s)GQgxfHa0dwritE#$atGpawxMQ`_^sS zG=O_WMeQCPv4J$WEU*f1j*-#AsaA>{g)@~u=!?$WvEFsz+QLUA`yPrpI=?`OQxs)tBlgHYs6FyIDfe~o*pEES7r%C&G>oq|clmj8w8Q2{TI!tN_UXU9 zy=tpPA)K6dqPZm)9=l602u)t7MZH~LZ?W&)i=n&dUgiHjI5*x!nkiVPt|Qcj{8CUM z-yy+*NMaGbL)({m!h((^(VBgqSbeuh^Xz`eJzT8`MjT3u-+S$HyD~4I)>P}+wtjRh zI_zqz>0ycU!xx_B94KC?f8^s+X!@%(J*TmMYSrxaom)G_4!v6AchLVs(%QJMmA}57 zw4nC9T`V6T&%Y@_H6q^3SsiN+pd}~)KYaLbCtj6O6Cd;-&uwjx-h%5Ejy$|ku8*wo zjT<+jHI?B|!wGSAJXy$Y-skSVa6>4+=0~vj5WNp#I4FTed<1z8FVWTu-nX!s17HWQ z{`T!#3wtq>WF#AS8SZeq{-Ahl?;~o19?K69f~b~wCcX&aII-;tYidH1UwCWA=;ipz zk+ZGa*B&_FA8}5Wo9cr6gQ)p%hOVL>?>znq+`J4lrGcLVf~o*^uxQC}Z56|z^WQJW z#0bsht#HQxAcT=u*}^DBdo8qG&<;FHhSggB@~zg^z>q_mquWwq8DwPIVj1V&wYL>@ z1*JE|)kx6^z2*`-9@X=F-%U!Zm*GvX<`TkB^bR?iXMa;T5NnX1f8rMNlu*T}=HWMy z5zPFKtFmhQ@BFm9$2Z7KwQL{DtX7nl7Xy$Cd_j1KS^v@E%kQq>(@<1Y6qk@dYL}JM z$x{ay9u;yZ;Uz1TgWD_mjj%H-a$4k;2TwEGtV9{DVkl&vDZ3#bI6t)iHxRo0gM}l11lb*$R26a0kP3?EPo+o zy=|0sZ@U{VkpF(^E8JO-EhmCUJP5>8|A}30t#nMC`2A1SS8A26mP87(aK3x5+nln) zr%V%+c}%PJoY$-Pf;5#j?X;^+KgQMBJ*y%t8mb=7kMVs;Ndv%<^X?5yYDvZ|BLRb) zGgogU-bYZ%z2Hfo7_$bPBMsgzrt219sDUXO*s5s#Uk4w2-9vHWT7w3c&~*Xnr5ewD ztMb}n=FB!=$bwQ*Oi7Ij1Jwz(-}+rTgSpQs zjPn)v(&cwr9ZsL)tebUDK-67Nu9>%TLvbh*1V@oIKa96O$Qu8&t`tlT;2QGPr#fk`JU8L|+c~eXqcsZL>eDH6vs9yb|ZB zQ=qSUB>EZEm|=H&jfU7;X-y5@A#9ilma$astrn~!}$sAGV* zOtXO&3e8gM>;ozbr2@>c-uN={B|jZn+8PMVJShpWuIn#cxZr;&3k41bFl{1^4=jS( zsZmW>Y_vj$Ij97m^B0}Tuz?zge0Z1%kV;1EO7`~rUvf*|_OK*g8(x%apz#w8&M2Jb zGF06ba`e=#75BZyd5?m^WfD#d2j1S*(B97NBpLLvPqOI5Dz}Y@a8y9|yrq>|+Dyk- zbIY#Tj&jYQRs$WKk9`gd9e2(Py}tg}35HnG*qC~6HsYT#vdnmzC~-;ym=0+3^|E znC)JbpU&KQmHb2(hs}Y{fZRiG6*}`8`}?ETXqMI?l7?KD=C|o$&dm|7@p#KiJib&ZRKl%;~L>%(DyiaQY6zAu%@Gxl@t5P57TDS|HfqQo!y9 z!TDGA^}lQQJEOx%TR2?SY9Y#eG~)|k%9npMrDV^Fh+T`mDp+Ro$L~J$P|jyIilx!t zSn(+|Nf(uWTSS|s+rTG0#3V3mn;LsqO|>^E?#>;bVskDx>L3BQqXF*=1Pei+PF`;z zX%>!$4cxEFiHZ{;PRL>5i<%aD^i1{M@~>HU-0%1negFa~eUkt1IbJgSw2(4!Ip_R# z7`rNvOC9c!$+6dIvpAculxnMwm{fSP&VyxZIX&mYyfGmHRe`(8zPRY-cv%@J#Y*yQ z+E5!8_syPdYRb#8D6iwtkt5IiZr9T;&TSaatayZqCyNdzy`z*?7t_pN{qK)0NS$>y zl@81jA-w>vl8ii911!ki-1IB)o0Kly+sl5#!+{}PDti`XPuysv+IQgY?agr=wiy{( zqSJ_ec*7Kx&y!lYj_x7D;p_e(qaVCNHbW=zCOf3*jhrFf?9QiCv?_5-NUuc+JC0A|^j3@Z zMi|=S%k{CQN!-8Cd*A=SPDAf!U%ikvoO8aZVPqDczWEzQ+3%+)1s;|kQPo-N7Ol%6 z6Y_avRy&qA@XYqFvd@;+HYUx6zv->67i(b^G%CvDxnjU`;G2TweVO8QPsgoFMh?9^ zMp64PL3mdS`)l0J!Zo*&LF#`zt!C*na%d9L%9S#Xk zQU9@KK7oCR5wRQ?GRIsVLir3UPxGrnF>o8B41s0LpRcEKYl+I|rBp6XfPfXf=7uM3&E|L5|dCR1by2r{OR zm@zBg3X3sAGu<&JDk*vSWuENyM~@)nO_WdD^*LpiizEeORQSBgnOYF8pjC<#sgON=pZj>jiL|3-75*lhXVxz~Nz!txjEkR` zbtgq1a&l~MOgu~Glp?|P08MIYYpcSurK1~asV6AY2V4!D!x7}XV4%J6K&%gLfU@%P zNSu-&nB5CjZs}3pZZ=RQ$tLU^9B4z|r>9qUk40=h)HP&#ga%oyJ%iz^kJ$5Ps>X1S zyKvmvx-{N%nA1)D-If8)h%emjCyLh5muG#rp?XA9l&)>8pnKucXZ>%n90N`dmA-1) z+1bhm>C>8|7lrDdK5bQWTKAsM2S-OoNanX5lin>|v0{G1G3Ng~h_+xsj!;C^ltZln z-(YflygAt@K{sp>6twuxF_Y82t&Q*P08*Mgh3b8^j(lJ;{pSt$xHqQ* zmpjtRI!NBwAI4kU=Rnr&nFSp`@hzgVP-Wm;MaT~10*NnPTx{b|f=3x2776SSWwWU? ziJ3X4V7O3;ff2^S|LE{Y3q5yTEolBB0D)?Wq0RMujDCL0m-<-Qmfx++%}KsIeyZyA zHxpZX{?3HkTmHjRw%-3Giys&NF6gWck}vxG&qrD2XyfxdmQ|wU%Bo3`yhS#tP4feSxTp;OmUxXc8!Iz z>ei22EC}sM^Yfb0WB1H?^DfI+ZtZ>DZnK6;v!%=o{YHnG-73SkqmbTHc&5OYeTE*~x7hKSyLMOo zwc+{%V=an<8&=_2G&CCcz20*u0ro>u5dbYXNvu>(%16Yl*mCGZ)%*a0 zg`%E)Kn6KN&NaVPKDn-K5^7SdH+|f%mcs<94p&(UOI~HF_7M~NEo-x5rtWNH_2ILF zky#r%7ZJNdOjUKuSvFbjvP$2kbvt%kQix6PJHf;^ z!`ZZX0y@?lld*dX1ymK^&=}pwHLv-lxHt>NGwYR_{=ik3x05c`F5QpMKuT!hbxmY8V+Z8ci&0~2+SO>ykl zF_MDz;DP*4_4KkK%s!`2pT@a)1Rsix&0yws*Z-o@$oq|d=#^+S&F?1uF$PMk9tH+t z$jSxA2Wo9g-0~U%pFTB$(uhB+4)YHMd(ye$B>lC5>Z;cn9r&#T+3I{M?NYeu9iJN; z?ObsLO~7y_gA>b$9Y<#GfkQxJo4vkv-=doQ8=tyAIZI|{Wz>~PL6>VcK9MFBmL(tU zAgvINgq90i49))MbNv2ZIB4PSV+`g^=oy8UH&I2wKom$gu5GsLeV1K!UH^TcSv2hB zFW%+_G255N|Ck5FK1kWbE6;=JBwE+sXz=O&p^9IQiHHAI#r!n?c~Zu9#w+p=yOv@d z2gk+Wi1O>WZo+p}B<_*u^;^H-d%J<3N3AG3`^f7qZYrFGSYp0)bP#6ed-F*etVW=kVP1Dc{rN`zCJ#kFpwh29bA!@QL;EN?;q=wVV}o z4js9B|333;?)&Q*XuNa0G&HUh*#!iBABSr#yw19G7mE?S_L}<#%faWSy?mMc`nA*l zeNzWv&N-m}(R~mC4F3Knj}@Rt0!sE>&(e51kYL+Kt1^GSe{YY$bjxU!<@s1u4`vFSiUidK0rfLTAb-qsGj!Gx!HBU zsI5-(%VEsjy<*D#K84s_a&t}a2~q%Bg2o-W2xzSdeXbXT6Y*bdKr&9m*4EZF6YIw|L#k}voeTrHqR-n>yPhlr2LaK3n8+LV(~BM zrf-`zja`=4(93^)U9hrskL5=>z&F?s^*8Us{83&`>>Xuj|U5sOd_MvZil?8nk@>fL4KSBr?kpzI|b=Mc&)tm|2|&Au|I55Cu(f+T%( zR(b*8LvYa^izsM|5S58ht`>3q$6g%Iy8icfV225U2vB*<4wdY(-Dk?4rV>ATk}Q)* zlQJ{bgary74TcWv9KYd4B;pEU%79)4FFTw}`2Q=ByAP@D`R0Dr2Ve)WBlyFJ0x#K5 z)LL*iyOTfsV(Uvm+)jixhy(XQuxY|Ol0crJWd{F)IrRb`VR5K|n1kCj8oMbX7(QGX zL<#M1V9!;BFy1{SQ0sg`nT$^*OAy$Zm8o#=NhB!%$n()ovVbei6sjkU);!9=FQnlZ z|MaFemwI|?57S+JLIYM@39V7)^-q)`#O|AQCt5`Oo|h(Xf?$M-gcSOIB)S-dk;dPGNZP)fudA@V)$73f>muGm*zEI29CL^yKntCID=s$~3=xiloc?$n-A8q6~% z!lL6*gpQf_98|siin(svr`!vmN?jv;>D$f&4RP`R6g)&6TpC=(1Ti&6r~MgU4HRS) zP3~d!SFsp;tDZo&e(~J7bC7lD6upoJX65QCj{sP_(YM&M_zja+Y}+R&#P6!5(8Oi0 z$~JCzrG0q*T5nywm`lAYW2#^EtC7*N(eaPzGuYk_6_id?+e>rdV=ROE>ZrulH?2i=JaW1o)2dx-y?vKw6dVGZ2aWT z{+o#7u?8gvcj1Bk`zy=-Wc zgBG`hh=W8I2oI0Id;bTmkLoy6dy4`FfBr-vZdyi$&GN!5QT!qZ;Wx@z7!;pvmywAi zksHuKpi^p0EodQ8{55p#6r_qh(5lX17yD^RPI&ET7ra;2_}}S!^>rj##nWA<2e>f_ z;0C=${_^GURZ>(WVIj1$dx-*+gf!qELB0XH;P6AY<4NpzXFmw)cmT|HaeTOyxaM<% z^B`lNKc_75p2O?8+9IC;5xCT_C0baarm` zmu{#|{$r}aSZ@f6x{K%w0IcW|0TK%(pbJq0u6frIg-~?mKl7vj1hvskTa&iv9~Ck5 zW}U7!w4Y0;y{GStdm9mVY=`0(M&(xTUSl^+jVt#GfmTzCdUWeZLe?wR6X9mI^M9|4 z?^4byF4euk2jr973VP!|mB+CTo)@x=J{h46%m<(ndC=sZ?qBePI zQsH2!)I{34jk9;}%2dzTcAlQ$^`J`(%qZ2p;v}Z!Z>Mr$g_33U!}svk2_Y4gH344_ zxLg&!w!rY^3(zmV^y7Y|jWK?#oV0p+D~TUmTy6^{!nM?0;84Nst5lYyE*WJd8;u$D zhaYMTcf&_wQqh05*UOd&&w(EigDKp`sMizStIez*dQdZOG;c-_6=l4zdr#2!nC zXL4c@O%bulL2ndv@ad*jum2Tiqka5%Yi8>z#BIbyhm9Q*lOxFMTg~DOGgpy11|x3> zsjosGOAqE!#tVyharVIMnu1FWm1fACNP4>pbrOnWfP#=tQi}1y#moPcA=Tl8ie7FV zi@DGIYnp#OGROvqMf4ESlqKtX7lB>CFH1OOX(AWQjY3U~ft$fXcA~Nzi&p(S;Pyei= zcnsp&s`DEf z=RdPlcAjlJ{Ok1DLB)rTkN7myC-&8ix!(PrE_RZ${f>a3;2J+uB~3L|Z(VB7$@)7= zvHD-WEO@XU`eVNYP8kX%oY3Y}+g-^S1_ee^l9e$gLu7x;M1n`g(;|j;oc=@20cR}C zn5l@MFjPA3IrK{~CBWrdJkP#ft8DanvF7Eiw;FSLgT9};VwYvQuc}7usob3mFC{(W z28o>mWi#C|@U~#maSP)eeoiT%!nbqNNmYAKzY?iYCEkWs>xw?Q0n=!Brly^o{SPxl7Z!XgyQhmDOv`_O}#u8$8Xkvg5b zldHVIYk6T!2W{dEr=+RQoMBG^$V1Fx*r38*{|x$pe{_Sj4jKUEE%Qh%9C_M-EvsFc zzgo1o8Alx?rV&Jb{s(2s>`O2}SVD&sm{Dzvj|UKnDkcq*StID9@<)z>1W>@L%@2(k zJT-$DK*GLN66Ygyhq!}ybBf)O^041)lC^TJ0|0EGqVO3Dr} zUAW|Tn0De^3 z^ZfoBT6RJ;m%5%=YuUEk>GbVhr4*_Eu&Fw06HR&*&0s4J{u~V0Ch78jVkEO$`sS`h zR4-9qnf`qzvPJ`Sl|CORW@i)1$2K#1U;pSPCND3qaA40yy3nF`5V_k%A_z&{@_9C@ zs9xm`OHh#Eu7NGR+mwoU2q{Ppcp%G(CYb&PCUnP-6x|T4LDD(b`&NiLASjXRWpt+VFC)qPNp%W{kjlty z{LJbe7mtXG&7I#kar>PWlQ(vr`wt5t#2%R$odECE{HA@xx;0Cb5>Q z-ge1O@J-h})E}=t``Wy?6M9sO4iP=_^Fa>0bJuKelko2rPTk0_vn~wjS3bE#?5AU3 z_UwI0?8Eq_uv@KAasvUr?lm>NvPUfHl0U*LYf|Y8dx~QQ|)I zhYin^ZqwwLtVSfHr9JJjfeW{R@yd2koR|f_CABa;I{VnA^`!IFtJ6q93w1dmB9g+6 z5K0osf_KrYu>?5H5=!+3VQjI0#)C+=&`gpT(dof@8~Bz;{0gyZoj7ytsaZtvzr2~9 zd~)<_6ARH{IzQf}ab^{Jmc@@x_bK+ma^2iKQ=b!}X=7s}?X-fQ3OJ_qM;DTR!M+dv z*K)P+W#ZSNAVCjrE}kfJMSx7ng*S+j@I7hXD`k2Lg@uMgeA_4ok0#e&#^#dz^yw5- zT2lyPa{Ori7#AGg_2@lUtjo+>Fv&W>;uwC>a20_#Ylsw{@19;0xf03qip{McBEAJb zIyDl@kQ|#5b9qy9A&KcCFB~z(1+wWtTTD1CJmYYkXt=t%Hhi*`ZdGTs3K&RJJ+Ll$ zajsy}z?#xeIN;RO@~!XF#U1vGFFLf;B`c@Lj*adJFCO%bRZ2*xQS;Q}<};Ykyb(Vs z@I1Dylkxldy5#b-=mOJl(>~4#t;T@5W9pGFc?$MAeX_N@uDJDEh~c$!4ZnZiVwrjR z{XL;7)R;WCs(UXrb#;-je?#_*AKxj0D>2d>*q@k}r}X{xRPWc#%yWzGqlb!$&XjDd z+bnLR`sl|^Be)>!s-WTxZ+*wlP*i(z?YDdtr4xxd^+IVc?pRwr3`m}HePp>q=@wNHb)3jzpFO7xDPL`q zZj7u(<4C7zQA^v@zY7SGS+d=~Fk6-odHn(LnDAEIgxC+KDCEhcC>@EM*k2SJdMQYR z1k{ZS@nxL}<2_@pR}k<*3XdsQgdTyLZEk6yrVs7LSfmy{Ufl4`?U{b6Fd70ecfWXH z1%puWSNI>~U-om;V-^Hk23Uj)Ox&g7yjKu0K=ijb0Ey4s4FW$}mmXr=@!|GCj@IUx z6|5@%R{#wEy$lbihDc8yDMW$YCVa?+Kk&(!4^BTK)_)7z_^*%>VRRNzt3<{~WbZRs z&;oPafaiP-jBID1gEasPij@ZGl_U_4)jMBGQaB#z8Q)A+xg0Y4Zvt7sRogPLjQm-y zNrSTtZy)9~+VW;PIL7ZA2S9OPk79g2)V^(bSEXR#<5OL1ub18=Y*TqKbn{(rO=W64 zL!|uq&mT9aCFDQ>3mR|dI4aTdp4pk^Yz>EkyJ2|Mhe4;RJ7J}L^EK(0osM)I4ePSr zAkT7E$ab`CFC#-V$DMwQ@snA=KNC)~d$tukEn8HqI=GZaxqU8cG3V6^&_UyT4M8CkSY~-EaxfU)=@WSvJ0KQ2M zT(+WPuLFyy#a_;`vE~?^;po-t4-&3k4lG=N!0lVd`HG$ZON|U_gO3so)+4|eMM;ZlV5m zchX{EXjp9Ty?BZQDnnC956ZO>l>ne5!dqCkt=gLpvdNYgmLpL(oeczEfFau*RA z=9LsK>nrbopX%)qUZ?Jo*Hzr}+m?gstx0-ubwwfDI?mJ1EKVPIf!aSdy~E|LbFg0I z?CQ!gaxokcLZiZb4m}fMHLK5l&Y_{o27lp1?}43T4>k$$=uZ+7hl z6rS;1a}63ye(aoJ@{x^ATktVp+GDfrDvIZbldpI=!#GRmy{=>w66U^6mh!JKPkQ9; zuQ|w&2@@8WTHq*xARLGHxjJCU(pRDJ5c3YF{@INLJcIl@>;Mg0kO9v@J*Yrg&( zn(Ub4X7Crh{bF&L`Q{}Ce>4Y^IIQ|@peCG6S ze#KJ%VX}n&FOn1V;JuJS!kzRtk7Oz@dXfx-VS*G0TLY;;3u`#0zF2TJjqUB#B%SKn zgxmHZo?7WUp|~Vi8C-!%@;Vk#61O~LWx)H?FYp46bAa}5EmejYYkzok28FJgynQ!= zv^j1RA9M_XayCshU31usX@5~n;8~j{WpC~+TUHcv%{;e!8Q$4sdW!ptZrbdZ{t@kF z7q&PX!QH|S9$(uOkNzzWlW5_uiNQMsSndyqoz@7|9pjuSm>cq>U%y~A@$JX-Uo`NE zp+`<4tb-PJ$@Aw(w4|kXJs8RlaW7D1c&fnGsJv8vjy(w{EjBhRA-t~ce>BT4hhLA~ zR8?_qI|3k)xUwO&>41yN87HdO-bPe^w=5Kk5^Qzlvf7$6Pg8x4TE$b797-rr?Zy!#nq9$hwH)?@z+F~L}KUK~+RL_Lddvr(x@kDW1f1nmBoE(OCg_;Z4mVo_y+!sQoSYygFlUe`CK*o#i-RH*t^FI~ED zVR-kiFY~i_7UxchnHO9^gj&F#c@6skuXE>qWR7_0@S0V8ZOSNnmU*rQ_vA5$y-#SdiaZQHHpH4=`=hdP`XD>%2FF*(wOR z>v*v$W8d#NhMOB}a${qo%s@a{Ap%rzLW`bm>_}kMYvwmFT=2wV3YZK2MD;_5YMyCH z>e0Zo0=Zw%1?6221Ni-FZ;iCkEqq)^M2S;ykGA^V4R7fpU6otnm9&{Z)r-B!iv8@6 zYVM(=cYZVboll`{ri?Dfs5#4rtZXmXmB2XD)fHIU{0Kdt8Q9|-)Q;;2O5I;&I9}JB=y(0@4{6U0)e-B z0t=K`Bh9sReAnIkZEcNRPulH%dan$6`&fS6SN?w2rVZdn$>{)E1CS7eO+*z@EK_lK z`!r47>WyV!{BlBJSP(bEZ1A6t1oz%@^S|_!!}uH^r$;Ic0ZC0vHnCTVnH;`3J2ZNH zl+NJcji^7D-gV(W;a}`Z4Nj@$z_GP=1x{-`_8jJkRc#(@LN?PskcNy}FIqOu98iTS z8$uU2c4_EW!)QqXGj2wH*OM69ivE;KVO`-QxrI0s#kOs`sJTcVI&n_+WZU91Y!*lZ z`bn;Ke(xn2l{ouOGS5Sf2!{7Qn4L~Krm?>|~ zPO4(oMa~ewq%-cnwh$Ah#k|?~KLTkgcSGSSK=fnn`P5kX%hX|w2s;V2cl1BzLu-@Tc8zrdL+@sk&#zWW0H@#be&#=|9Y=CWq5@j=NAc4qthGmTyH+D+3(`9>-eE(6E8pYR*e7BzJD^j zQNX7AydxLuMzrdbp7O8x8f+9Hb1H>ViXwRZt?@1FXMZLQU%yha4x6s_*9&ii!#W@8 z0&GM^V0B&H=G}hksS*e!SXgj}7@zz|58|cnR--8@3EmLsC}-RYM26(DmS##>Fyw&v zLs^#ar*9AK5FfE~lU|waa&aI8X@v`=%yK&o6F=?cnB-+Ej*1SdPS<36_2P__$|2ev z&O&Twb$8!#pQ{|Q8hX1(|C8_Sf);ml?L@j%R84g~Q{Cv$qG`{UFRfC;7lhbrcSY;A z*}b(m8KI}iA$2BHJeWlEzRVMnS%dP6Bl=Sv4%I)r1g|F+4LGe%J$4FS{iDqWV{_cf zjMx*XR&YO{iEjyP#RO+`H=;%HP}N`x1C}|sUv+_n&>!sx?Io=rSR(k0ESeL>Rhiw) zH-9wS$}FhzaGxKpb_e8&BO|kpl)(4iq$T?<35F!}0*>pUi1l~06Jgdx8NI~l#rS@1 zMg)*;=w2+<;$;dT(JSX9NsoZ|AA7J;4INB5dOAAhpC9kh81F#}In9g-)N_1TUML=u zbiAGkhYOHg`I~zNqt+<=z5UIgSO>$OYZk1Zh3}vD2ehTHJbRRaB$=tDldN#kNr2F8 z_3ca8wrD;=;+ROIw1Ebx`$!92<9%EgUW>L9@iak5gT;S7Pov5f?YOC=<^!SY303$| z_Ph+#r$f%Uj@PDWmew!tW8qJY(>G|{$vAprc`+f2<fcep0I(=7+dI~;q_!dBuW0Wt(iw38>J&)#)m zS7WaF^eORQvr3PYiTB|-CG~+I&v1LL^PgomJTv;Z(L%lZQUwd_3 zEV&Xs+NH>JtJADw=W%Mk`0MtoFVA+-(J8p|Hm}=Zj6?Ar))Ab+smDSw-M6>h{npRDL=%s&Q`Oozz~@QwFK&QIw8I< zqY_Z@n)xqe<@(_Pci?~jT>c-fh>;kvuclY@GP?i)>O=hiNgaW3N8hAgix+swGOD_2 zyS@bnhR`=(JmlsH^^et(ckaa(ixnSFduO@f`MzV@v&nY*?aMP4qKco zA~6{RYsCL>qUVRqMEKeMS6fL{2aiP3>00GU?oj`+Ce$8K{TaK;vuP7vn2E2V=Jw}g zqiJgWxwjQ!b0?_kBnS*pAe(9>1Is{R28st`t^A4hjq9rz8-HSzsYE+4CWu%<{*{h5 z5z}D~!y-}yHoB+}V}MS?Jr0b~k3@2Gp5)a#muzo&heQE7D|S6gExvsMyVsxFUou|a z!h`)TT7H9LqvhO00NmHtxfGR;UwD3qLdCg_GOuIFvrN^x`n^P zT&#(73l|Kv3ccp>Ko;|Fg`hUfWBqqcHNYVf5*Qi)kluE=n!*b%5jqrd@Y&jOy^ZI` z2?ql@vLbPEAM){{N(@?H+o}P>^IGItV*I@|{a*PD4nAaf7&CRKwNGdUHD3*R(C2b6 zcJsB922}JoB&-2Flm`fy9jp|^k2IVc+!w1tP9<6E9re_F36 z93C>Qw73SU-q~sIC*q9@!f$Rp6bA-H3;W=+k|^lkN^OwBNmxvZy@O{_W~&P)+WuLnDFA#H2hLy3C%oW8C=Xz;BM}!^XtTy}!24D% zAO}sXY+Q8r?%i&&#IDaV<^ClKm=8mCd*{URzGhED!^Rsu$37;eA;t$=7UO$^AhrOJ zVFMw68qZ$79Ds2732Wlvc#0qIXT)~J^bLi2YO=CHnv?dNLwuXBR1sC`P)hz!X1=So z?$L^^df~6vo_3W}RQv2he#)BzA7KQUkK80VB%yWNTy!W~)?czuH4A2S% zqY~g&a0(S0;G)F&jV)F*=uoG;FD?p(WK%WkbmIffM!*&IoX&XG%MNIWFpZ zL(aG6N6OPJImX{smJ1SB-O6ouB3^J$UxtOR+EXJLRPmAE`dPsbl(r)zu?l0|hp7r4-}#uxv!}JcdE?Z;d%vZ3K7lFlZLfCKeCys#4@$K}T zz-AnYd(__GPtkU7pLl%}H7>Tikc|~nW1)&dpRIYwOMu+@o5M@^VG(iv6(kHN7YPd9 zlU*8p<;QFPP;KaIFuLAPR1ZW24R;l2|DGytYU?%JdFWAeUu0Z{5JoYSZ^M9KnYdN@tPr$GGeg$?jEi-&RzepAj&dA70ZnimF;smz9}n9mp(yPd(Ged;{_l&M>P^tp$bwne))tkqW)=P?U<-q(|yDR#x=DJ_awi zu2Gk1Nk9mmX%J%Y;!I=H@yAsnM-%$MeNP&wl=?71OB9m&;l^IC)Vm`%se6ezla@Z(D5)M1~!& zGN%U%?)#Kfy^Y6#cV~s7hsVTJb@k%!CH>tuD>_?S`+E+iw@mz#+OW>KHcWBKA>osx zz21OYOm)b%e2gZ61l)IS^7!!`YWUbU%gr!#FaMlZzU)Y3Ipi?{50JpEfdAyh{g2r@ zWLa>D5PAZHF^nGrtRJsX5|oRClvF;62lP3Z2D~SIc_yI|!zuLvicPc$5DDl47vE(sjIRu=q$ni*bwHs=-yn}q!B3*EU3_5;{qLNr>1 zSqhH52Hp$6)kMh6o&9{xv*`aH`QjukFAKb8IsGka!T`LN%u@m_fl8MZ3{{59`Z?2A^zQ@cDyQxE_-Lp632nY{y5-ZGlF)`54TG!_{mENh&kZ>z$73%w%B4ObG z*n>2MS{AmvRB7f(=J`~v!(=+3(Rqa5qrpz#C~*DxTR3h0T)4#J(yvBy+wT=pD=9#+?2wE>48=ZLUdqb z5bXU>>yB3Xb}8jGowxb;%RJWKjp3!<5~ICvaMEfngy@jJV5)_1PVMXG#ZpfNHQRs& zN$*?1v{#>YwYyg_=Kh9w`fMrB3i*g8pHq@5x+ZBS_Inqv^*vLHFW?TMv zvuS~wMXsy)!C9?N%uE;9W~r6vRL8Wbb=Ls7972diwCAj#fEEQ#)j^;PiI{_EN-mCk zNrkC!3R*Q1+Ln*0C^RHsH_^ zlcZU?^+ausJR@vy{^LA$Au_WM_j~X27g@hhKA8g?P9~br-eV(*kv;mLTSUwN7!&or z(DkLbdvD#`awTUr48t8;WADX_$iD&Y`ETIAe}bx;l@d2x&(`z90O&Im*qF75jqqPP zrc3xvkO+nz&|b+r-)mE&qH^!Rofz%Gv23%I8yVSmAYYFDBIoW^#(W#jD+^~O18SdU zWw%od(&_G^g6&Qr`zqWbe!IqU3(~*+Bv3u}B;~?I(%|2<)AaIyFLepil~Jd`*}@`PNNou=b;)^ubVI z8k!Y_@i{njd<>*&mrnbxakjR!__mBNFi%qYc<~y_LuG*73UoM5hewC0mTVBV3edBC zA;f@8$HYV?YinxI3DxvJ5{sQ^Ky89{3#X&cZ%Sh1eDL6g^0NE`+!b&$RAG)>}6Y+>n(my?m;`!o8&W1nXYKLS}%0`C$tn{X}$8&n>hYvpTi zSklN3I~Spf>uJjodF(-vWe)nFsSgYKlL7&n@3e2$M+?Rn<74=SK`fU^^94{x_6|42 zQwk?(Uf{ai{%hOJLwVtgZL~+Jye5~2_kcFdxlf>KxK818xY4)3^W&RdH`i@VeEGF6 zcXl7|$hMiSYSdm24Xc>854dm+`MkyK5t}s)*_q8?Mv&qa!y5I^3IUGOEw!rrnZ{*D z7UnUj`3}1p+BT(%3#`?TGuV%x84tlPPfw@4Q+c~(Wahk;jl}Tml4+~lg5Hk4|G0D( zoJ!n*UncA(oL_!6LD2Qb0X|7dq|pX8+sK-||BE9O4lj>R6?9>_Z?W2ZE8`}5GU z#>r#C-^^THTb{`c{$o2fJp6c9z4Fdy4S}B+nP(}>&y;lCKG${ZL`1yhN!Xlwc)r$) z?A5gBerw$1QD(T#sI&FvFAc5pfmRAot~S z$Ns<8Qa^KBd1)=>_1;UhG8e9t0y&`$4EzGK!h?if7@Z5%wGm%YVKptaZ`V2=`S4b* z_w5#&FFg;PdaVAc_vFi?{CYV(FAsO-9khI|Q@&DNQx)jr{lr0hLxnnSrS;6G_8n@E z6qT2op@f?wDl$?umH%wTfm;TrPs0uRG}K=d{CD$$IYT|T>?6lnBWhXp(zI?vk%`X+ zux1=KILlE2!&ihJd(&}_jNDf9f0P7-_UD$&T05_a!)RIljYkAZ|mFju@tq&^TKaP9t6}p@K#*C zIvXppgERcy#Ia-@G!;4RH!?5LoZfo|f~AIis<~Am0lT+?D|}IYTT%Gb1vbczA_U*h zLHhB&`#x1ckjm9z6+f+XnWd6x-7lx97pPYhYg~#CBONFj{fS%uL@(2uAOV_ZWf%9v6j%spH<> z@|Zb8iG=eUGuDfkr&bJLX@(ml0A(a)4stjkg$COs9T*RUWjuBqb|ViOJ}a8;Z)3Ac zANf&1u<}yA{S44l|KMPFc72T!6Vsxeo*w*%RTQOYN)hO?%k?G}EL!?S_f$WyGUD#u#ta!3Eo&-R6ddfhtAU-X$& zUeWK5m2Ue^Ep^P_Nq|-2v)L8J)bTL=2)$?V$vQ1vT!GGa`M+z8@ej-BJUh+K=cUS` zNRH4~^_CB2E-g|vx3nBZ&;W!Aq z?%e92Y_vGQxPP1ci@R(`g*gO37`o^&CRI`G!~0l{GqXGOcUncWM!<&tq2KkVu z=Kcx(f6qh#Q|E|B>2#3V!eI>>p|vKx!U`5nkMGRJQjarw!P||k?3>s_-tE1QAd4SyzkVoy%f)dJ-ov& ze0=2BLoUL%aa*&T(toELs1C7l8NuL}!NZJeRrIax@zA4~XjMa5p#1YZ)U7@9m1kbG zL8!mu{)@uTPrr>-Tje)Hc4@R<7{+`2?E{|_fb}3R&+S_wn#}k2j<2TB#-(utP(n=) zDj=Fek|&KA*}yBgPrR%tupC7A0-t%>JIgI7-PKuldH({r{bkI~0II z-$ZcexqkxussiIXgR59~_N14)PyHgOt>pZCmJ~5(rBarfM{Y~V}ch|h1rssGf!2Nrb?FPu^HJ*Is40s@a_-QRvOQ{O;_gg z_RX2kw3@0cqmJeYN9>g80HR&JEXpqQ&_+ss{oDb#GcCIA(g%Tqh2$^XU=I=!e0|;` zSsnKessMaJaWmoidJH&i(Cq(l3;}aSMa3RgqkdqRCj^5;0GBbA13>&%%LtLFp++~& zZx2t{{4W<*^+As8&GGT^guG#0=`yZTrje?gdJ54MG=ig)BGh_S7rC9{*baP9iM&*K zfTgU5K|B|r5L;_M`uI6F!KYnitKWjIMR50IHuYO7*9mJsg^ox}P z6n84sJv@f1@P74{95fc?4Ac8?h9{CVVE_A3ZD>6uD75Rg#Ls6v>0p?^+Rf>qu+m}p zJx}mPLaFTGX-!XgL?t~Q%$T)5=E5PFAo%o|=Kg4d%azYF*D-4ue({w}qoyeQu{AW{ z(^rlV%LV0i@tbMd$=TEeF#G#S3r~knv;d-gBJ9 z@ddLK%Dx?lpqv?WD>fpN+n+zf;nG3>hi^rWf$)EP4Jsmfs;i5OUH?}HBP(1PkN_Z% zR{5F!DcDIsc~;)Q5W%+T4*#!%cQp>lAAGIKfi?@D3~p6H!APD$l3ByiItM9{jW^zuk3qOC~gl&AJjCx)3b$I4=z)A&QccS{D0nS z0fB%J4B6MGMJK&&v&AqdEc2VAwQ%W*q)LcpSctE~&FxgTPbTo1CUtb++jQbe`5^uE zO;!CgH#+yHeR4AYU_*`b8)jOZ%QgcWv?5L%;rt||mb34V@Gzx-P(W^1ydq39ArQ59 z?7AX_G6Zx5pdPhGn#@G77K1Yzbg0K^X*B1P47b6>19%r?$fOEOO@#%aQtKYoJ>-L9 zx+S~Izpq&lTll+ot0B09D(U~4#=pAzp4%1v;NPFn5VP0oihKAOwkmi2#H2TdK#%KC z*_kjDF~;xubuhw)!>TFJzM@61Ypu+WiuDG+`h}>J*@UTiVbx^86Wa_KjMG~{iU1V` zCkb`;#P&=Gf7tTEH3sIt(dL|elVf<)ZpCxuYX4DoI}9V8i)H&C$eoM&|9x^dg2dND z^=}3mhW=g5^>zh@gtjpxKJHfe<|>om4c5CG46IhYoLC0P5QSkyT2jvXf>$Rk^`AseGz4x0(>c%rvMFP?^GsL^3Whmr8LvBg_}Id{Z)HNp#-a-JJ1} ze@gZZ4-d6NMak++#4!>d*$KqC;M=Pl=~Hi7Q9T6GPj;_Jf%72`PTH+;{%LE zXAv5IFF996T=O&S@bBNp@IXgLA8b1re=zp@!)5@s$)p`Bc!aMVui8}SdlV85TuK7! z$RUzS)5!)G5Ohyix!@%Rh9Orz^qjB33lgsZQ7N)bP)B1j?r$o3_-}{>(+2-7HBAj} zx;L|`C;e!1?qEv5=j+06-)AE|d+6C;exWT30Zs2>TxR=9tE?(_y>Z&vWE+3qNaAw9 z!+M)1<=tW03_9HhhUV-FBE6n|&BmuzD%;WPup_pvrLB$SP!1d2yVVt^2*(fVfG)B; zk!R@4-&4GV^YH(ESd^G&tq=EZsxT6X-p)XS2UAaYbfS~~&Sqibp^YZD@(%ha0aCxS zQz7$GX3g)gM+RA^PU!EQNcq^U#Uw23T@lnMnj_1)0k5lg&{9i7z4qsr%5cQKVM?@Q zDjV-Dz2^`f_8r%0YHWP;rk!0cbW2GLyLvSCY1qBuFuzsV%Rbn1^~ggkd{`K05ML=I zC+GFOC(tS`in#0x5H_HS z;HAU$iAY=SzOBs})cih7%Y}Q@y_HY>JDkp}&R)U7f2KTxE+#{;jit|!jlrjq*=cNh z>x~Z$&G~tus{9qEk&3)bQaIKeo=HqS(pBrEJ=xvHkC!(3Q0H9cpombA z*I_U0>QKJsP@B_sLfh?E+j*yj8*$~W#|*7pZx@;J1}eQ6NK8~7e=YC$FhIyObl0$O z`cwGkIhj9iQyv@;*ZMp$F#(}7fb(MHv3#E^1OKO9SL~%@BCI&a;RZZmXJ^;2{`qai zTbOI({X)S2o8TrH6TCZcSE-yk$ANoQqpDH2>awIbqG7S=L+T>xPvO0uGO@)E(iLJI z8MLomoB!|YH?d?dnSd^lzMREu%dyGOlC%3mb@ocBX1!IVtgh~_`H|YDK}(sxIY4z1 zHQNl0oN@LY!!vts^NX*1X&H!Bph(fx*43Svb)G!yc>cVa{?xgID?L9g-{Ad$6G1Jt zXU??`72MI(Q@IY^Z=cx*`~2_cJ%g&QxRoeghcE5sQwW$9-VxF(yJDfG9kHV%CNOcw z;dvv9pEpiO&Qc!k@@g&Q;(U7}mDc60ubZ{F^u?9fCZ0c<8>uK?*6mm9W0^XqEz37t zen|8489Rxm{+UP*ruK@Y{v8?`D!pBrCAgmR zffTUu@$n4v4or|=y^?^Of$)8yCT(n_ir;{9^ek2=P>BHOVcMGIb*%M1eeER9`Nk#o zNAB|n@m~QIsLOcShUM6#rb?@Jf9vM&ysRm8cGsng=N5{WBonD3tCxbGDoZHO6g1h0 zLdQ(6vC}ruxhg{AZjosguj0Z`?>z1BXmB_~Vk5N+)NVs6R1x@IK+%CIr5fWKbjSbu z38g-E=s^YPcjlMRy|QV>=NKm{czuaG!e~S?Jd&=iZ0N-Pb(!}@ZQVx%7e^F7H&xU$ zF{ci1Iq&{@R#N}rL-1~& z7cC=o_4R=1l2ijhbL$Nxx`d65q!eSys_Odtj`Ge@V~5TG7xh-!>!t$W-UkE(oa3rw z+2but)@TMVu2yj}(uWLa37j#xvi^$Izk#3Z#Y-12p6z@b(bm7}t&)9ze}9ixrpD1@ zr)C)T`9i#a6xE~pGTZr@HfJT_^ww0Zc~_<@DpImo$*?c(yq-7{jd7JXvM10zH5t(eXXKUT1VlQ*8f82;b$O3|>>ayLO%*-2aw6I%Z_l zM~&z1>`nFP!=10Nd^C*xvi;a@t%;Pjb83o?&L1=s9srKM z5szb_je}&uuDyG~9+I_s^!9x#S!7^=fwxfjLkmQQF6M3>Iymenmsj$S1PC_|JvVrP zdK3C5zaPiX?meUFD96JX5WVox{Mh8vr@WR{)z`+(XTIX8Z@bgWEX!FygGFs^UutqW zm&5T#&!S>kx$?ywj9;Daw(*<1)HE_)YtM7oUfoB|;}=hj{=xgsvsjC`QwaK~DgE!q ztdqQPLY>TK^DOIG3;T!roQt6js5Pj&KHn8pY274EyJ5rd#C__OoUfn1wBP!)Rc_!y z=DiHruG@N-YesYP+>asf=X-+Or$nVwrh@i%B3S?)XQ-c$F|Tde)?lM#5-osdrmjJ= z$0xCx%2x@@N|9Z=;$w1U%nH2#pa5WnUp2JKdFX}yZmbQ#noKDwii7qQ&)tKAtpztH zdn(MbJOyu99|r0b(F^zuR9#;*>2JBv=Uc%G@je`_!ikxWbyI7)Yg7`KMCZ(ldDSLo zs<+LS6T35bA9CNk{obF94wN2x8n&~Qgouvo^goz6KB%-^Ju23Z41Ib zJRJEiL_|6JTMUv6>n-{A>U864O+A=lqlRBo1XO746cRK0CvV^sFA7@hRa&ociO^A6_icpO}CzJ%*$ zT?C`Fvufrk@dL#owp&vw0-MH;MyovMVvHJcaT)mM8ACCgMI#c5N`QslL^A@VU z|GN14XG6UD{rv1V_qO!8Nr)YsS=XR$rK#EydTsa)_B=x3;x1ZmS}UD6kzo>tPmV=E z{r!k50&lihJ+FP=>j*p;qyk8NBBmM)VG{rSY#{!Pr|wR`*+GjnK7LtHTVazAFY1YR z5GdB6T)HZz!Y@60diPdL74QJoat)uMH`Mpx5F5Nzbo{vU6XsuE_{3OX*J6v`xBm$C$_6j);$2@)cfE(%9v(OcT1!Fq2J55c{M{Y;PQ? zI|VMC9CnS2r|*?W%gehwH{l?}8qG*q@p$j}kKcd)e*NmUd(TI{ABb(o&F1M)*-YG# z25OrAn52mAc8+LA=* zK-8GL2~;eM8QL`HhzRjY?EbShTZDt)Yevl55Y)tB4_Vvw6=K@OmZVjrWUWIQyjyGD zE` zS=hmyIxeLP8cF>t;o5^-z`m(k9-cW*5!PSOE?MIL@PwQXA2I)bdx%8WFY3@&xm8K} z;~RMqBy_@#feHsxW(d;A3xgh+yfCOmB}OIJNG>x$Y)}+-xosz&N!1lY|9F4C(Iy=@ zRi>6#cHm{9JWSCNWry5j$M$XcAE^^01s~mi6usW=>n)AytP43_G7Fx(!8yGG9~%kGuoQSLdF0>!2_1`}*}W9 zpa4`$C=HUaXps-5WX%tcnysht&s!PUEthuf`;_+jHDzSiLVG*q6K^%OZK96t@27P~FTc`)=C?e=9G!ShR9rMrvDF@IzX~UygZ_l34%>>3`Om88pSNpJev)L;VuUXVw|>QGKZPeewu8TA3dJ1CPuMnz@&{8`BJCuP z3h@GESXd(Bolp)O%4XTfUOhv_OFbIdiIf!3(_QZz>w_v-c3ir=o99{e;hB}qNZ?Ux zbBoMw0Tv;`HlceWJu?%5Z0^3~A7}=GO;-Qj7yChI1k=#)P&k)~U?_&I0?Bl8pKUvH z9TL7eX$>i196n;*DH!^oBu1?c4ukvmzZJFHQP_nOg)v$N%Dj;bRG_A!AjX{NUv`38 z-vu&*n%^aFjP`@TW|C`G8Bllm3$Qs@nL?Tl)ecEXAbtU|x*$Lr-C19PTc}%0BKier zo%b}q%4a>+nw0FP-O#IiK0n^#UOErM`vAsBk&mZe_@6jHof6ey*q+Lx6>4+&MRLq}h{u)aGq?+g7aBRe`BflN(X`uOd-7^(G<>cec10>@yrP-a;@R=wVLd%Hy zfe2&K6J!^7-Mv-x?7DNzn46uWmj^FNWlrA2$A?6NE!%~34xbk81Us#zOJcLXcx=EiU# zUzb_EUt75!2*L>AN7UYU)^+@b>5ZW1P-kXkMF&qu#9Fk5;`Td3G10YWvEyd&W z%Op7t(wGCp8t&1(%uO5{wO9Iy4}%<^STONt@9-u5OW>b2L%a&9J2`lWXS{!9L*X@G z+sJ_hBz`CYlPz0w>#$=Pz+wFh(0M#A5r-|H&s~+mj)l(WT}Of4x&@GA8Vcb}&!~t1 zw-|w`LajCK1Rw~KBBO`AETzDnJ$nW{4#yJPABkW>=|hzNL?m2rA^Y1$ zs6am9eo%XTxn0@8;lLWn9~++^bS^FejE2m?0Eu((o=)(@a~-CU;aF^Rxw9yW#H*FxvZhb8SVeX}4}1hlq=uwMg%KLl>o11*_{z4y8MP zeBI(yK^=Mb*fpukopfAkOO0}`-8wF3iq>7oZaL9^;E#&vwJ(FNI(ZU`-l}`&%igJv z3^_XgopSwsK0?Da`D+!omsI@8IB+02m3S<56_7deJV8tly=QcMy!0521dd){WDq%d z;Q|*nl}0{q;Ttg84{DV(L`PedZf*j85G9IYdP{R#n}0Wt+_XCL>_w2N@cn;PK!6E4 zs=?j#F^?XF?=y;*r{O$p*m-f?GY8MGjVVpEJHzStZtybkA4z+~8e_uS#*#ugIIVP3 zf$KZJ_P*o0!VIe)f8!6@f~wFh`>uTX#z^6H3|c~(>S9p|c0vDsJJ;KAOUNSwuPA{HJ@WV?4L#si!McDSU|}_3mOxs1Jh(q{mcP0a%;ih| zt-qx}X8g5ec7HYR;zyDF_rmB&2%)|{p@-D)%ii#u3NZ<7h$Kh&oqd~mYLRHi$~>dA z8s`6S1zeW6uk(dvWo3lHhQ9}K)5N`irfoxg8r zVgK)p4C!wKO73}rb1SYMHH912^Y(`wd8dwiykA**y{K#Q%ZbmOd(&rTN*=bp=2kIR ze5LG2BTwGk@N^KPn;lmqDWcHn26#V-5iN9QxlW!qMOz&QO$>EmDpw-ua`35B9E7y35MH{Cc}0(v&G&^IxJt!&cp!|3#}kuNyy)nVElaBeaDd zp!uVI*S$Jd=y&MeFDH-+DDo1HJW7#eCQ0HDT#^Vm5LkpIOa=xAo$+tTej!{-Xso&0 zqZN=Y5 z%=Z2VQv}wU=fbUoZz;5Jq&(6c(%apzNtRptsRfMd&>ug|&fc8f@~nSkGa*Lay?e~g zZcJb5;unN3JGq!2(-m-i`ZEj)R{Uh0H%ZB>>Qd85e5}V4t}MN@vpP<(k}+B zj>~7Am`r<&&6S*8%lsU(ne*N1b-2$kNvMJN)0j77nyo9{klPELb;g`Mw-X4{F& z0fi&^HFw87!E#B5dpGG{zi>)NcP`(*hrzQy{rO+cO>`?a@_P$ zoU*69N&j*#gxgqQsd;`vS?UFI{OX-;#_sFx+A1<{NZJmP34fFZzf;@kGfrJszd0eP zx#|MJ%aB7OF|mQjYvC3FDw7Qz8|=Pr`pdd;=Nk42Ub#oqwM|0_ZPJ})OUuhx9ja+B zjQ{rkl)y4?BYv>|JC~B_m_f#~+mE=<#br9k2Z}TOj&7)Pi8A~WoH5nhGSTDbvOcch zsLbE>R-e`zSnU<;o|TmAVx;n2r<}}W_-WFjP3UgqsnxJ^g2N}G{n~6F9$JjGNXE05 z{Y_@Hvw9>LkdDGaaNVV|MlyM2PD73hh#j1WFgKwBOMU*_*5IQ5p3qH&Ke%QKv7Eq6 zUMBv=M&6QHJ6jsbGw0%KYk9t{>v8??v6A{K@E=;~A^rVN zC$lcu+!|pW;eI=@@tTIUIL4a9_C@e9wmp8sXuoX5T zf8C&Bi>{lzOg+TyON7Q0q_IcE3~|`;+s%ZZ1tVP8jhnp;F6G@3^_J+D>C~fbE?w${ zL@893P|lpVn)Gvk83a8N^%Z$?cc)!^MSM=-z)byo9$Oktt>^QNy zaR2R$AgC*v+S;4(UEw;d;*2#12nHM0R_DuMI@araxr~#V{uXQs4S~cSRk*&YA!`k1 zY7N0!WfrV1}_C77;I-4ybP-BLfQZ zAR8V1sDl*>@r1Bsv4sDW1~`-7DuWO&Nf0}JlSf2~?$W3u{X;i#Nj!40ZqTT%1&klH z1NId|F&q1qJg2^ivG>0(I>J?QtrAid%dhNQ7f!lLm#`7E zlMsbDx^cl3`P?`$K#f5%5A&T$vHeQjrlOJ1h@iB8%4>@k?pAVvd1grT=rOOzV?W{J zfNTZBnm%sD2VwZ=VZ6I^<;ufD6CZ6{5PP!GI9uwMF;6IO#^;P*Ar%!tgQIr&N?V6^ z2;xqa|F<}P+3%^P^KUVlQ~~-MS6*!n?x{ccG%9O5dq#7{AsY!>Px*lD)WU-5;1#ge|Ao$E4C19Qomfd z%vOxNX4}TU=lUXrehQZX=|>!K_^rzUH7!n8q$NH_&!bdqFErF^aCcJmKS_qk=6i*o0^#7y>~$N9Tf5k4J7C-bTsQ=>TXhb1K?{NgHUE;9%UfFv>;Ud;$hm?ER1 zD$uDMxm-rRM_>|fRgW&}4cJb?Y6;_t;O(GTAyMo92Bpg5efZhA@V}$hBJ?{CGsC%t zDT}G|p)wE$r7V4b_So8_7fJ?7xcs5R*Va#d=J=JvMu4q$uIA{2e)q8dd^Zw zLH%xx{{w@kx8$N7w$ssk&PY)^xuQ%{e^KhORHJL_b9$F@D# z{z96cjKw~20mfedrW<< z)EJvLtlaX!=T$`E^@BbX zOHUR)mO1IaKn>A%Y!YRqnc5CZ*0YgYxGGY4<6~f(Wu#%3r8LSk&@+C-vRIO> z?N%QE!Bfzd&7)(z$Ge?7OeGVBVl%SwX!^C6dAJo#TQF~^!1d_*y3DMSh9FGA^Djr_ zG2it8GYld5-p|3w&T`raP%Z*4KhCyrR;9i9PdVIs(Y{{bra5AQqIM5sQ36&ajxY-6 zv}K#o0iYNX6nPbKe-b{L{!SBAjv!QVbBHJ)TqiE#(r)^u*c|PiU#rANv&_`+ z$Vkw?n+FfwhTVfX()#bFM~k_TP%ZZ!DH(5li@_;3iMoDa9X^O1&)eVsX^zis4hxBTy{FjJU z*VL3G=F5TEJ7!zNsP^X%_nEmgxJ9sPL_P|Bo_M!J3xK^E`XAy%gB*#z(=&ITfXu6< zWrb((`|r7P1DZ)9*z98ahOLoRwZ}ojbosi6HVL2Dri)kxMrnmcTWLiFiX>}y-`l8T zpn8u|KUZETfw7#msz&klciio|F94FlA*fhz;IsA}>&JSBCF--FKR7s@UCwh*e5Yr~ z&S1neKGYqXXlGe-Oj{pQFYzK+v6e_~>3o>PP0pIxe`%Ngdqg}wMK^tl_}nmu2UZyv zmv_guU68o^5W!4te>&?Q$$6O1B_B;@Qdf80+kQ4|PfGh5Y)fjQ>==rK;I0xKbjv~* zJ)~10W0}?5h$FFwP)hmXrmWe)1gLxzNAr{>OUvDy9GT|2YMF^rV)MhEwqIOB#l6S# zWplZ2_gZdeNW5lR-Fz?pF5ItWW$i3q#gEncJbfyl1=UqXEmG&VF;5{oj(B$4S^8>l zzAjL&P4TSVZnGVbDqmzI<|IhocWqYqCkkyOoPRfa6T1Hi?Y}>Emgj2*yR4&RRK=a< z)(Zd)&gWFbR74~nl2=aF>0c(6B*x|6eq);Ckz#nKemyE z#<$|Qe9u$oNnXzdvtL)kWj0~L7eQ-$L!E89cL&ofPPnqswnQBj z^Ws%ECd$X0&U*y~ZKxRI8!0aAI(lT~HpSr`u4x_hD%j7rq()jCPjA`NG~7ZEqe?1b zej2H19nHtHblu!9T7N8VGrN=BoROX(;+rOPZee2LPN$L0rKtOb1y^;9jF_aQfFid} z+^IIJ3BEE5P*FM}68tHns=!x=vwl2n8z4f_-(Qz+ZM8@2!(&algike1-gEhZ$Jrd_ z4A?PawzJ!Vib@)QKupN>&Y%AXdMe-iPK#YeavdvoEMyRx!2az!fc+wS_Vj_UMP|j= zjzm0X#J+oU)E2GnCCCa;!{ROoq~xp-lF+7o@6{jlb)yDA)A!5&36lKvMc7cUBOiUS zM(Qo|wUtTUKnRUmylG0NYdf0G!x2q_0{>?RS5<1^(6H1rYavQ__Dg5`y zz5v}7yPS=L!^$Yt`3`e=k=@bOlB<|Jky#|n_)|y%h-cB375n=J?YQ%MdSS=*i}!Qk zi{Qri(XZMxJ-9;L6C%0V3m&-h_Ti?_TB(5N!39I4%^vbQvc+Rb3?_UJyhm;bH(-y} zK<1AE>c3Ej4l1nnDb%#M`d13$ zNi4&lGw!y(q36$%x@)tvo3d)MK}8?zMarXH2v!(9;9MEsH*$(OFb|U}nGqBoI%~^cUMSq`rqu=;2 zDU4ax_E4hphkko;COQm6&!Ma$Eet*%1$c-<>4JR0>g~h6f_u$f8E>R zh0b31Tk3>cWlfl~h_`HXbWijN#gyLPg#(Qa*9wh#>aE6}c6|+Y7f>_V;`?+!S}c7@ zY{1ZIp)^k0QHO17h>+>m2DK25(`(kA(rrElsqcSU3$wG^-~v-g`MQI7rb$TpkhuM( zE={#)fpNFx#66LA`R$)=B%Y?FXLHCJY@;^h>+%Za~s2!0LEO(8UR| zvU%LNfjYnn@UncjNe#1^z$b@yr+c%fG}qx_9v&ax(R^3x3_2=0AJM-2+1f4Tlz*M( zXJ;8*cG~(c40Sv{tUjU3SNJwtY;OAE#r~R>GcP3*-5te5x99AZ_RjLY`T0ah$)3E+ zHdNX-ZXD#`$h*})UGLLjO4;jrAWr5?@IU}{t~-~9fAKLVF(Ys3*!#X(!fW={OGndV zB8L8c9Oz5X#N7pZ<)zz88^l4$= zedg`ot){#_!t4xr03h^ySDC0<_N>(lLBx#osU&m9vN*@)J}5JavKiNCDt(W%guzj) zYDIq&_jbySPbgN3K#)E6*7N#PNt9)ShcwC5Qdt|zc1jqEV-Rv zxBcc{h|1}pw+?l@eSfOcwLzlOe!eJOf{~xq=e|jRiWIkP!QSD7PoDq8Jm=zfl(c-K z<&Fpm_*zL7NlmTJ5FPtd+cs^ph1P*}6%oRemvavmp9Wy!rfuED2f{cdc! zcR1wHmu(CIIvjs#im4ouR6B zKKbKy)F|_?_@>3i5~H5!mgGXcphVrMmxuIhK73vqg^rToaDN*dl4~E z;q-ZWaKNnK!@Rs%;oFmvt9`C>7KQU>xpkDh7cS(r+#K$Cr!RI!Wo1%s#U}Xh?gqx- z5Mh&lk1~NL&bMvbO+gMk7qD^uVl4i*RZp7?(pA2CU~2*P7`WbKIu!Sk4u zBCWTRz?+!7PZeUaNuGb{2?o1i7Bj80pxF=z7?yh?XN;Vx0|Q<{H5f>76C3S_7xo>5 z+=g%(88GDdiF+g~s|Ipeu$;c6o~SJ?%RTt$*XJO*a$4Vs-<4i#*isZ_Z`^)0ZCY-{ zf17a~nGc;fP*#NvV^;=WeJ)u8ty`3fmvS)fUnD>e5S?Pael@_Zskn|IY$^xG`1;!v zoM8c&;@5`#npyP3o@q--S0k2;a+&52S!*JGwI-SQ9hmV=Uu`fDDmZTc-7P85rOCh3 zh?BGD;kADEAK{zmXee6`8T3V2S|QK?e z^gRN@0PIV#H|pCmgDVwl5wh{ZU2Ut>Ju#0G?Szt2#mfkN9dS`GB}c}CRbn@ zdEO-Z8w*`s@e;+I#FKvE|4f(yHvU~)9AOXpJ7b&9a`2#KuY%@@6R*y%wP^S8e95)S z3S8=Sr#;qsMkkJ)ARq#^+f(#FpOao(ES>jq;JAV)kPCw=?hyMWaZ9fYsEPNAwOPZJ zt&-CGxvT3FfSR1svxF87MM_FDyVDV2K2J@EwF1w1SVCYERWW(?ThmA#7WYq^m1XLSh5U#$RemD{O;tDpV zR9=LXk2z#a3<3GD`Q^-SgVz%n6`iY#nLD8by`^n7QpjWtKVra`NZG__R@i@ zA07mSrs~+owYKWxYR?%Wz4g=Hu?UW<(TSNnn@v0my|d&Ri(9NJgZU4NGnsljJ5?PN zJ$#1e-S)pN!+!!_%FM9AC#QREmjVaF2X#N+ZzChQy;BTEn>TUCev99PM3++Ii6ZLP z@yc+`qN87GH{v?2)XVNfSd>OavU_QS$78cGiwk}jDG&sOLA#M3A-abO zl2lmd(pNm8aBpUQo~)h-y$P-gSif&{Ce8WDXNi!0v7UbCmaW?lG*%?(T_L*%6L`xXA4S@!(v%VrQ!!0U@RlRPH}hut`ovGq}? zurGwc=ZVz|Be(X82rYv`yLcom;R)46Jc$4GK7Trc3|hY#m| zn_}t#;}(v!v^TqI84&%7*)Kke&e-wpu~|qwp%v;2{|-MKt~gvlTQ=AS;rRd(3R6E! z)bp1-vP1#P*>N?h^DGbpPEKNXHgOSpDSol^%F2hvMk{zcv2bjtTYuIftb5(LR)@=% zLs|ERUfyGt_13HNN~p34!@Be17k>(gR*Tpe7;ANuUrX8S<1pMiH^+zU~TznI1Pv2DZ^2JYQjUex~TM7e-ZdlC{ux!p9@$ZV9IZPkOh z-U|||N!8lYxAMQg9d0II$mEC_6d2#Yt3gl522d72PNV@# zF_z+zhWKO=@~1|O@jsv|Y%F{`yAKZ+UpMaIr8>DWOzf%Pks&DAJEGAv2W

c&|Su$;PH(!%Vkv=?#CBOdz+G==lr@A)cVF zp#6g)+p6U@Mr7Y_)tAH5A9J4Fo%74LZ)%u6>lUjgWI^Q`^WedeUH;EiLj=@OiC{7! z%vuQ}5Rrz!=lh;A2-|9$fyf#2Ij11UtUGe#4W86sqlEeK+j`pmBaY8EsZBOm>GC{=&j3x< zm+o$b%zVGCtXAvEElvPdSikq7fU!mn6%O4gTOQ^9Ch5=W*JOCVG%hzDh)em`(-O+0Pel;CYFd&@ne>qnUP_Nl1z5) zy9J5q#6jv)!3%Z>xl=r+6S{YtmJ1qr2OR;3yQTky!9YHG0&sm$B~{_oOiF6nm7e$> z?dM%)J3h?Tk?-dAb}GyPBB%qo2vI_1Aq{pu-ayR75%5{{ci`h9LXbkM&4Wt64oy$t zMx}7R5YTM97o(Iz1QIpkiXL{5eqeErrksz2@mSh>u_1KIB&>39d-nnj0GiACQbQS# zK;5agA|T;5pDV;3qs}f1)uM9O}^N$}g$(~b~z2mxpEGx!& zy8FkE6DSIy&(HVn(J&jwOu1^`gS*g`J3S2xSY0KPP?hV~7ZXDkImzk+wj;q4_ZsTP zK7b+A_ox!;^VT@=|A(>vj_11n|G#lcM#>(gvZW!LtcYYKyCr26Lb9S^ud*YR>`f)g zD9WA@N=TyYva+)0d3*MLe?RBvcmB>lPM6E^z8vr4=r|6&JfDwoyWeitTL6OL7|ute zq>^mt@L&4LYG$YUqLH`$)^N?9!*`PBE0;e0E!!EhE5A+hdP?ak9mm<4U7rsLD4)Af zyBrrX_CfBr=Yfhuo6DRB{PCUQcZDC^JqA^{NMXyTt*~Uh_?bH-x*}*~qit{}gAz%o*$@gaG^388<^llCU`OZ%m_=8!FAcyv! zEL;;M@dyx21w?j0WM|O915GRzm$h%uFeW_SW7F>RrWG%!$jpt8k86D7EBslNRLB!O z4pFaQmFZgy78gI;#^I<+ww~s!@a%%T!FSF8TUqwP*tx4Nc>{nQEB2+#xfL8@-tba5 zG=5ZItyC5+EjtcXHjM$P&NkP&qAjjBRP%17R5gUXY*f1T!XH zIQXQ1Ou^9;v9E{RL|Y`>qgYkWkh%LuYw{xD3b_?LZxHzzS#o0j;2f5!_sonXJf14# z=(*m0d*$Yp&shvLQ<^Fn@?MQ3@5r`xP&v-$BW82;qC3KRT=> ztP9!7J^MoqkWrmyMM4p~dB>B-&S^e8B=QNvQ&1sge4k%`PzF zD^U0A_iv7yt*$bkJ3H@P&Q-_|5!XMB>ja{#s@I>}czP;8O~+1*BUs%TP2g)HD<2J&7y3Yf@{B!sluW-(G+rc!mYuff zn6p{F?;~yV4F(G+dR=(5Ns&6WJo0GR%rBS%5%U@b8%h_9K!rod)8WS^g zpz2xO718EEv*P}{chk@3{NEoZaZ^)1-!63INc0plgvMtf zFf?f{F-gESt9{3tN0$kpNDNv)pYv-Q z-Mf5J1kewhNOg2|6;)JJng)(BrUPtI-mAdlRLUHN8eo3A1kBXg7diDY3BH_rj@=+W z6_>sN$6|}25PxN@bra!Te{U<&aoep|S}_b^40`xn?}I*>Nt=-MlCB--ulzo^V+Bb1 z9rW`bo0dnyhJK9~ygQKTp~mpcK|*z-<--Ggata(6RUGq-cD%&U{V%P@i;ITeQ@?Hw zJzTnXGn=xPc0b!%>`uQyf2Pc`|HmhZAcIA|wBG2QHw-y$@)p@=5oxFyzhYUv{CjY| z>XcA}S=j-li+gU#3C3KsnO0j@(<4#ji@5kD+>j?(WaaY(qx;KRd5y<-Q`zX|Ruw`i zSN1;&_b(TC;wzCtIiPxf^XZ|l?fk`D4Lnk_@M7XV_q@*0oHS{b%g}u3}<(<-3 zrY8n|X0}p3G4jd!I@@^obN69|A3t`oYkLLM`4%Z@DXX*W$P4;062k4HQ4?)*LSxqS zycypqHtvyoj>giC-W1(8Y*K+s!TWQeDYG zZBV*%XC*GIpTOMS88Q@qxGP+rV~D#KjwOwy&MbkMw-VQiR}3$F3s)_nOqg}l^zb&& z4q26Xzt1Px)NXy7YEb`Jfs9Y3PP^ulx(%0}O41ow1(kS*zQ`@_)OEFo2LR_TN9mTR}! zn-1n3Flj2}8u7_`>=KcC*8FQz_`#9H$SAF6l>#?9`)hvB*D=olRRbLgR72^0GZ&fo z=;mM24%s9w@ArkOh)pW)0D%D!B}vII!08bFYnf%l8gREDnn@7#JP>u7ka6IlLHL-9 zE0Xs#w4?LLX9L!3k#^Btl=4&E(`(zD>h%w>Ub$D^YE?7LW*=)4?yt{L@qqK3!sXXJ zaeIZIeh8eOQDc}}p!wJ#B=XzT;Nv3Ap~6FrFNH{ngsg@}kYE6HQft;hp(oUjit7r~}^xr|JI6#~WdvV+j(OHXpT?nH1 zQmPaqwrVC;R(AU$NZ*GzE|tO+|{K zGj&rH)07G31MT-}Y8~C1Ict}4sJBEt;%n-7JHS~{F+uhy$;>0KL?~X1m9Ju}fh=3f zM%eIIA)!wwhTlum#VT@Z?FC;a>iVzwHZX&#gA5BywRYo&QS2yx7su_{ST+z5G7^l zK{bnHfwbrI9&@AayI!n_SiJub$TMO?#%!$hQT50N1%8`wW(AgP2eNfMILK9FWbCBT zo_JV90B4@ew!m7m-SakU9tyb55ic$H^(j`&)Tm#=hc!GrjBAgnWNdv&Xdo`t7;a+w zXnFeSqUqY+m$>OQ&F@M;HgGSEY7c4D=!aW_uABi`iJzZMDF^Yr38%OunC1NXNubBH zYu#lCB8Cbh=KCHu2OLm$roTSSvA0sJ=)B~iE>gm*nu*ETgJk0e`P zfADy}>_T-x;Z0?C{eHlq>sk=ntA$$%Zg-uLO2`sgm}i9s#jD++}BZ-=}dr+D@ie6>U=;y+=8F zB;))sj(v|!akT7u)^)||>denk=YhH1ueMwA)X~j>PhDG^SKac*MLTf%PWDrq+bcBp zww7L2c;2-iT0=oW0r=dfZrqqEQa<|f>?sVBp#lz(|C(Po!ORq3Uw|c# zF{~dHkB9GmJoTzH!S(@Z1gcf4Mahh;u-rLgI`J(3TfQOOiq|54vMO#{sapC7+gnr= zXI49Q2oHrKqXOE%zJtOR57R{I1Rhh{e-?Qq6i*{6cJGFak?`AGZI*S4_w(x;aQSb| zm$kMY%1U^KlsJ$2msc04@^=#Y{C~f8QGo(}JdZ;wPNnIJ%NY9UJ@|P9!gl zr=?ZING$nS!*7$5PMvqS*_!#8%~I5c2?sZ47O$hmYOB>%)@YIUtS7AL`qQ*D+L?E$ z2Ok_c#}+C5)0XtgqREp>Rfn>}4vzTtn6_DoBk+KEMx6cFr&_u7_V%R~o&OwYIy#Ve zVj4cMj~YxvgbyJd5D>80#iLBy0d-L^HSLdjd(w1;M_sRqyZGUyrVB9nLDK?{FT^oh zo7{(!(4`G>Hk1ck&4$PLL;9XJ8LcbR%l(uR@xF76iRnQbtAbA^wJi5m!JCS7SsH?Y z7X(-XRHt_w37|&OTT?|v)b8zG)#!&?eYJo1QvcFma9g;rc0%?$=EwYw^H@bl|MzFM zkNC{2tgJW@-}?GKUiA?IkBx6uH#U~Wt;Fpc_|#Mwb4$1_s{)L_w~{u$yz4NxNcWgL zFQ{iEbe6EdmoCz)U61Eb*t!&%3 zn?im{mh#QBF1%xV^Nfl*USj0EdzrN8LZZ#J4)jM7{;xz1M#!<#^yfh`32mR&LUNzl0Al>3WjWQIf$!p8QIE@>j;sO4BLOqba&ld7sFQ^b+K!BF-bUsfLq$vL$z7a5SZ>7Y&lWy1Erpl1 zd*@0lTWVqO!05 zN=91{62~$^Vgi+?{ha2jeUApluPn@n3&~UQ+!`1f%gj7sZ9QS+P7)g$#+4#p z;S=?xsCeZB&n>e@GWf&|&i+EbG`+vNy6~;MGuq)h!!3`i0X_e%S(c^Ae`%i=#1ZUd zrKJz0zNP)ob9b1ywH=o_#A!KQGIBwqYAL=><<{blnmq@E?umU$OFt<&M{CP!D4UI~ zAOm7>4$}Jq+wdAZR57x(QJggSJJ3KYy3uU&%1B ze{^!2KUqjrrl&Y$6FSHs1Sk6|>yN#Snjbr~#qUdh(WDMl@kLG~&R4>RZ}J_fLuBtm zE)NRw$$8k=^O28HD1M;YU2)l&hpBV?H}3tsbe85Fvaj`mh)SLWuAh zVtE^Md9UgmQHL2|1WE;Q!h<}mOE2H{6#ie_xPI0A&K8!|Jejaz0%R(NlLg^SgOVTB z)YU67-ri*@9+9OGBRz`MA`@{+y((y!Go zd%LMq#G76w=4kVEMyU($0$?ipJN>1&qmM5dXSV6-Mf$fL>@~Y;;iGl*RY_{D)iT{bZxHv5i9mxDK$Hv}$g{O(_mrRr9X5#0Gkty@V=w z1@K;7z7HLVO_c1XpRe5yBrE&%EqFV%Vk#H^*Lp_q*hB=*ahRyz;fv(0WqMC5!5@6n!y@}=sMvv*_ZA@rT8V+s>XZ}gwe%SlVl;Y)r+z+4CcX)um@ z+sPkxi`jcZ7*0kYP=c98YRxuW{WC~+pl1ppFx(Wx7T$mU{#^safVf>rdeL$jhqOI~ zF}px_I-vvz2`TJfjid47xrG=VP&PcG5lj0Xl%MkT*9|q{Qk#gIP5O3E>ygBQO5F+L z9hINVh5kxu%eIq=^*U-|T6&QJ8%HH+G{bDKU(Zb9q*bJ&Bp?2wk{KsD z=4!4-N5&QTLg7bHt@i%@yBRsp8m0AgqPa>i6+oXw7}iBt21`@l<$}vc0>>+T3+4~z zWt$DZ;FcMJrI5k|Ee})_t^5Su60wB1=#LnT;NOrKVIJ)KP$hv<^)t$$^q~zs|gPq zF?A?*yjnHLLFW|#UFZLpIW@XTgA^AeqcI=O`zSbAwmNyLsHN}IFQSmTpiOJfUk~!X zm{dEV#pf@kk{It4nYHIqllu~kfMJA^2wNqQ6y<31$bCO412^%b=~ZTnyk70{UZRck zHvS6)T0k;(XQ;}F%a_>!lsRPdQPOa*~~QEQvm{RjEY2SyAzf%=1GWkHcidlfRki z2N`~RA|RdA?3I0Zo`x z_!ZFsS8TeKCOQe*vNRsuATy(5`ZH*Ib?*+o&m5}lixPDn>`5hY1@8>!6sLEVo_K3W zf4FfpC47r0w|1u0!vn$={o7Xp=OuC=aVxt&xU@v%*?z2azwM?XA8rVL`joD$d4>>Y zB}K)C*O4}ebIY4}CiS?<-)Mc>D|Udm%|wS1B`YwkeCS1f-C2gPo?1=|+ zu;=4x(TB?(1WaIUsa_$Yu#ovd`CBMzD7#aus>uH&|52jT2xQ#x0h+O!uC7inY`n&d z@_5*GK_KS}3Yaq68sIhQ!ov%ulETv1x$;Ilvmyf|nCQX_csXzlvqSB}DI2^=#DiGO zJElq%e~+fJB|gx&z@AIsQe&C>bgQcG8)nP%oSempHeedH> ztvNo^$SWiJlTUWnUHM|K3jb@jN9Ma8+~iYKSH1pc!>{=eMEyOfqUXrcrkg9AS;JJg z_lls1+uVZ1cMY?SLWl4(sjaxnflnhlW*2RK666PD&8Ym3`q!ZT?>_}-f0wzpn)dgM zOfR0MBf}Df6z}Zk@p<@yLR1C|1k>c05Bda1|IO!a}Hbu5w^`4h54`EN8-(>Xq{O&? zA;-8p0bnWz1RUT-2-mr=+kF4eU6ka$J&qrbI3q)d{vnUm?p~F#zqm>h$+w?(#*Yw9 zUza`CZ;FbFGT7ZfYKg#65l!j@y2v191C~m%3QVBk5fFuh`t61ZY$gN%>zfV1{UYXe z#72R6t9*ueqOJVe^a;D-VV0HE!A^ycdYDMTr`>d8GGvZzxT9bU1C$5e8ipO@(!?Nq z`0>RQa%z-65)n2cz)!qJH1~W7+C%l5wyv%Q;BZbOl!6nV+v5A)VGM~wMTNiHN+x)$ zTmvMjd>ZBv;IoV2_+os65#bS-7PJ)Bolz0j48LF&bbhF7HRk&FKQ-xY`6Z0IGFa}r z^`)Fj$hLTUQ@euc(iOS1t+?2JvbdnIGWG_I+#l+~wx8G1i(3P_|5Wm;jf5zQQd2m_ zZF}UGPN!AE8UA>3@x%6-DPBSCmL$$+lzBPm7QrBV&BLVLy@?;jpH5kmTm2R>n}s~~rCqWlQR2KXKkf8*hn%V6xEI)kGf{4Bq*S|D#JIpBrR zD{*f%G}OQV4ag`Wd}MI;ENpL@IN%H&?da^R@2|dx1x}mTr$FwZqDaV~<|&>FzsYn4 zk`%&!4isz_Jg}I;YVpjj?!KnD2SrNou(iSCqVa9}yPxGs`bc`yS9de5E=MtaXyVNj zYMH9ANQHRPTK?Dnab8icsrCm|eb4&iZS8bI%r}0P$nB+B-ejTPQTC^xd!JN%&WB!o zUEyVhVdBz-4=LNYT%P+M z+ISkvcLK(-aaxwVRabanHpnHJMldAo{JCcUE8^)TRZg)kFuXprD$3Yogq##AIsRgu zH#fSOhFy(FyD1)EiqS2c#Hax5TUoqDMjp$<|6a&n8`|pX{S*pIq?^l$n^Z*Z=$6x~ z2LV0C>u~m~;c-Q%UJeeQe};(1YB-6uES4=EBZv$X4|p2n*(UNrE0O3UsQ~$b z!us42tsyC)2P>XC*LgqnRlfIePEO+ka_q&zcHI04T{|$zseWlXSVvm_RH)s%xmCr< z-0Sj-(}E!{=o=3HQB7wr%HsbW6ld?kex&Se!@`~W{5-?sAp+Wo9Q1Q=7sHX`5SxDX zg=&f7?444lx@QV)%Ii7@z*-<+*p3%BSAfgWh4Gj+y-DY7Ri{iMXiYYk^AyCsFKsQ0|tfc-Q zEshB*%!lUYbO89^)^I=+>lMHocnx4Mh;YYzGhB*J;tVgtd}d0%tz;~~@8=miu0OpB z!60*R(Oj4@wAZ^ec;(?Fk=V2!Umdu=g1hh)+2hRPga4hNkBvI#`wMXKP40gY-x{GX z)09zLdqFKSz`p9VqT(Egi3=Y#$H)54cN%@QE==8+usj5HA|WA%SeKc8@%ES8@Pa|@ z#$ZhRO#(B6osT2*2gpPnCMdXd$W2s+c>V~jvcp6h2 zY&=7?S#}VuFJRVQk(Fu+5}+^gw{(=#IBxSBxnhOCpP~kbEl!bRyDlos6Lvj3!zX7) ziP(sq@SKGO8JLHkz?meVRu%?&b+Dfv#-SJO%WHUqES1(xu?7>ETN2`V3^xR?E_VjB zu&wq;F@>GKJCU8=GH}Xxh2g>Zq#K0!ZUYUFK}~FchYe;Y`rj^idP$>AfLB0N>#Rz* z^M0hM&b&YzH1Iq&&9l|Dtc;)eUz_^M@$9>z*gO0pv#{a#MVmdO#|_knqF~gdnF~A3 z!pxj`o^p7iYGi^Z!8VD}z)kwD$n%L8$FFlUZ9geGKcI_S^mLU_5VuFUrma z`Vll|p@$N3q(~sS_i++MI*?`&P|$}Y2e~&p{sNCW1LBxLr##7UBOtgaKi)q9TM?sw z-D?!Q;mPTA8sA#EuzBiQ;SldEf(yHyG})!oX-nRPkOn8fiPIt$^eijEx|z zOZ+)G*-&LGsY=e1DHdoU8Cw4=mqn9O$+rFx?=qG8!!-}ypuWK0?Gy)uRC%^vkui_k zGvPuR2oJ%}N0+s|AJ8w#x_#?X*4caOlK@Mx)ufI%o5b+2bi~D16Sl=3H==WL5)QBq zymlG?v`v?T6WS2M3DQnlxQp2m7(GO@h&T|kx>wpo?^Rb4*&pKMM%bhfx2;FMfw(DL zDEDyJw7dag8{9vj7MNG9Qrudf?<8u1k+hK}>|(V+%*1xShKM6u=L|}XXYpq-FsVnU zpL*hmI&^|%>41OeFV2%R?`hZITjQx0|2{tYnpa?~`;QAY#$&%a6OkQ8eGT~oY6nRP zzM@|=fUSfBl0Z7!S7Ei{M6gJFs0B&-fT;;mwo1US20rH>J8jYiwlppLj))`l_6i&# z8{lvL1f%53qFgHDJ@C?YhybUrtlWtcgmnKq)EkkI_pEcW=Irz|X4}KqzAc6vvc>2B zf**)eEMbHeDcwNsViT-P%3 z!6&*F%~{I2ak{B~gIxa@`R@>L|Mjcpu;)y$+U@0%ch@+lI#|al7_-y!o0U$uALi1n z5>mUu{7xjA-0q6=onyskbR1_*t-oJvP`5v_rf0!?@OGzdL+5~L zo4Y*6p-0v#nwMfJD^2WGm>O;t8zH3$JBG?-X&T&)BO(-0p&pXUAxXs}}L)f!m-gR+Vh-zWOWWH@bvnJMG=-OAc{B=`>5InUGS*_ygk|dcD1M zC0CXue1N+d9}Ac&E0KHh{W~sgeBx}<&ws1Elu*`9dHM3;7d`3JTT_=|e6{M|v_j02 zXE8YcI(d(YsF)aWLD9;57!|c7Cd_0Zbow-&QoGc`4)p`#GW~Wk5Hb%OP_r2Q;uaKq zMD?ceC%L~nKyabOES~^nMo`i}7(7VW*9i!wl{hi<#heJ+Feo8t;d3v60itBOih5A; zH}CiV!f$kWCkb%!Hfk24^pfx@5`PdWVqB+GO3MjaZ7Rn)<^OjG&VVKdH6Aj4S=Y%l z^fCjZL!^|$$KK6g8#y^CayG|EdTiZ&^pvp~)-s64uUw}t6MH5s|Dwc#k7EQu3kfK# z`~COzuv*VF0{S#`fj>j;^-lc;NP>KqzQ-njj9utUBqI25c8ge&`dO}8i^r(lwP3@h zOEd}tr)b@jJAbEiZNgIFL?rJd;5oamR;>{SuH6&TIF_i+#~{?;@XZNnVfrTqm^=}3kg?`eH??j0DLZhX%IIJu$Th+p?MW*W z`89_V+(ZQ~H&JjLKOMRjrO@upZ*VH{Sm3+rV@vKHB|d(9-KQe2CJpAEeX4f&vDDR@ zvJziq@))Dhy(@S{akQCU5#~6o1?;Zwk2EBn8&$!qW*)!CJ$v^v`w0X1=K)n}@8~$` zS281f{#7%qCXegyh!YlEP@TVe-m8a01tH(p_$&!*PIh)0fLWNz9=x4Pu<=Gm3Ahoe zo*??6|6wZXodP5C^O#1Tay&fH_F!MhxU#8VX+p8;j4F}(buBXAN?qyps?Ox;g3B)t zNld3e{Qer>VDD#*I6#t*eV=ua1n^VL}UWd!v_?IuO{QV*eh*wRxTh!mw)ITXDn*Q?Tb)($3 z=LI5qrhmM5{R?(-=-AB;RY7Znm^4$CLAK}KL>CMv2~lk4!wCj;S#TQHbFgi3B-d65`NxrSFCn z%er&sVz20UMb;gtFbn)(~I-R zJ(G^y>XyNXUe)>DO)UWrX4dNGXZA{waCjV07^c11N7FIzcO>iOLcg=r3^hYtJIUiX zZ<<^U;fHZY^5%l$#mV=mEs_oX8o4-kEjwY;_kNN>%(r+n^67^DYI($6-1gcIt~1PU zf@5OXV7g^!d3T=(QX#*9{vZ_ELSTYibM22yye86y_z8(Es7zTA*Y}*94E$rbrC>A7 zcr+*r@*LKI8fkDJ1D zXk%eNQTRbu`Oswh-P_Xp%a1?juZwStSfSl_g8LR2q3PeHnC-n~&K=(P?A2O%CDQzs znF#tS8py8s?q^yeQuyTL6=7G2b{6=%E*J|Ob^8Qf&ZpB@gn5`^;#*H8Kd33++}F!1 zK~H$f5k&S!;+G(SAWwd+eHLX6_g@iY9OYWI?LHOhPcAnCtLR0l>ms_?3-)0{Y#F3n z?%Q8DY0ZqCnkA4oQ6L%8Tx>N;b)EVN3OsJX!Jw6hJ(rAVxH16hUHFawb{S`@rh4&5 zewG@zvpU=u1X?ffen9!=`qVWmt6eh^SQxyFIIas=Jgl6jt?u3>@bw;#J9)mtpb81W z?a|%sg>RNFP@@=bdq^d0H&@9N?-xy(2f`1Q2-PP-VfNYaqS;yrww<_aLJf#aOyrQ3)MWzf=tiO8zdE7b{#H(P25b|kJn=#Z13Ha! zQ;JJn5T4KU^w(Cc+<3#J5ZAy7qrSZ`aYRn0qnryhx(!$0q&j^~HjLRKe|LDj*dMX? zC`Ac5+m6c62*qr_TF2T{o%s$O2G6Ity9Y`))6+RAhia@NKT)i&i#W|Y`HL4k|F)G& z@u2d{7kp#Ix84eljy8^kEgdN`8 zjtDQo#|<&KtI($N{_3&caZXEwF!JuEO_QQMM8d$2ag#Tl0~q8#Y0o9B0M7}$4s~wl zp5+h*dP&j{i9@c_s|rg$4+*3}<&d^^2P0+SPdC!uKqlMP{80dQYz+c1AO?>GE>wL2 z5m}^AsD}k4KHS&>b{dre*#wXm^6x6~sqz=$!iT6qgTIY5(ovq(;(pT7z@HqU7AzBJ z80V2-F*_FU^%8;2LL`f%+TsaOhz{)bartm9UWVI(m=LtZP48UKW}e?l`mKPfbvQ_Y zEvKU@{mH-%TcaXwBh>qiAi>LDZ1wx?sesZbDu+?JJ9 zQgSjW0il~|{67%7ZTMMK+eM@E9v%}dw+v2MZ7?S@-|cSmInGIxBY@ORpf+`Mgn2|0 z(2oETRa5Lyp#ei#mPu!P{;mr8Pcs#wQR{R6z4aT$Fz?vO__c-)9pNrx_2Io#nl5WJ zUV^lKQu#ZTg~$f(zfs{C8ji3Dce-I(Xj-f2<=q>lm3d-&kO41qjmJ*=idMRLSFNzP z1;*f@Ah|fL<;~r@Tm5gww|Gn}c+&%q6!`w5x@u-y^KM<^z}}~^M4|ad_lyUzvEt9V z^Sf+`LsIa4-LE|PPY00fC#C=p5s7nD&&|$DZhvfy&4Cf2@4r^CO|3p}(_XHw%Ab1h zr%;UMegqIn5P9Ud32*w=j*br7Z?C)w8Hr%V6L{@5E!iWE>`y0smrgja7n7VW$Wg-p zr5v~<(xw0MZy0kp*vTCHGGBy7RxXD7u4)j4@YpeMfSD$4LbGc`5*4^csGOmWg7pL7 z*A0cqPI5ABCthDddT^ln1W!E?zp1c)`%kU?5TWSArzhEj=S>xd;NGRWqf-5Tj2Z8w z2f6olv*x%Tz5IpzbaM#nsTf4kKu+8{5BQ2h&vm<>CA;AUJNqJ(DT=)bY5Xl8#NJis zpJS>E&^zE7!^kpEp-mrmreucP*+nJjTP?1LlxY@uGrLEoD9XPv)6eZI-A{AIm2pSF z^b6j+Q$1EUX9G3)qIIcdz66g+&AV}|zLRaBtSL^`jXlt~!jNf^z;IiG>7;d4<+=m9 zn%Evzi_=jyt#B7}2Cw*LaAGDK(^9l$0fuHVgr#YgPX?G;}jRnkEz zG7*jzUbreLtRFPIe`RkS_t?G6DSnr+ZbT9sKmGPG6pS9fe!sM*o9vxbM(C{Lmp5-y zh-hcGNg+22YwJmlZ67+nzw&igbR2!Pwk8}p=_02nq!QZy!G=?IkBm$}?nRvkTBk#4 zrgFpBz>+#d zl6wl2#bF%x=Y?Huq5l3Kx(#oPc?khcB(6dtPe4JuqhrjyorJhJP{A_K-NE2h047OH5l=m`ZCu1{45bI6nQ`1U zJ56UqbrlUUHzk1e7hJpYuC7gOZcO=c%VPxh5$h<}hv$m^yy>PRBgMkHI#rHS2Qf|P z>1N+qzw%!EAG8xkiM?2pB9>k}Rm1gNILVmr(3`y(<>iTCVPRRl-}=-taoU2+KWCbI z5h=D;106`4I8cuVC`=va6gK;FACuMqAa^Zl-W*mIn=iHU_w!Tjv`o(`w>g(qWvIqv z9=u{1=WQSze&piN!Jm0aLch7l-3)0a{`NWCou+0O^|#w$(N&ni(3I6`s&T!4PQor` zU9w5gctWV;yC2uEf#3lt+2>-t-@nJ`+!u{)27)?#^uceREFtwTBH_I-tP(*mIQCG{ zRwT0qp%5r0qLEBMVDF>P!!GZ6o`&b+!4B)PO_=ZPjR3`ZuiUK%IOt(f$?)HG>@r4N zaN$Bk)wy)(SihDrcZO4f$1cpZ8m`DZpQ?5?+t2hdzG-QndZZ4OjDb?gRIHks%=R`9 z1n=t)gJpL)%!{u3<&^z#PPNq}%7jiS2U7)icdSd_Wvkkj2{G>OgvkmVe|}byw+*_ zzbD3J4+hLWrj;3^RgXwJ5}18lx6mBhOiUS@p4t@}5?vbDiCGEl5(C+x``Kd?8B35nT0=_kx@4Ky1O=~|ve_Wq}PQn^a(R8+`MJr)QwTzHi0w#|ze z4ujGN(mnw?1G$vpm?#r8IYPj?pX7;f@C-puu9A2UJH%cA7HZ5Q@ZIit5Z`WEc{A<8 zR>tA;o=;Q;fa=|v1nC^%yB)!iRJ4Ln48m}dSDP6ftpj{)ck5-BiEkp$J8iq)ghK0o zfQhuSCK(QE5#EnFtDkl<65U5&ERs5s8KTR!N%dAh6Na?}8jZiCx43TNO-l0?L{&gr zj?i7baKimq$)?AUwg{8bvAGQMpK|BR;yl++xp?nnnd8&#voG9!@ogxlNy}_x)P7bi zYU&lXOGh(}Ozt@gtv8p}%(1>WZ6Y7x^!Hq3_P(OqV^dR^>FIo550i8ap9d~?zoK>| zuTN!PHtWfC4+Wrd=GEAs^W#^60j9KUgES`W-f)rE%l-KL$0ht-LX~D_Zmy7HW&8_> zXaJO;9VIj-LFyqr8J8Y^N>0vX*1m2{x_L#HaS)hJH)y7sqpB)qA*KIH%@k6f%DN#& zo)XUu){?*V)+b$2t>$GdmSj3n;=pZuSdFWzRUs-{Kuv3Z<|jq-Uvx8ykx88@=iZRg zQ!#JBvt}ZEUqeSkp*Z@}s({Kp{IY+S*BP$ zK7Q**ScacvN2PKv5CPy&j^APztO~#H)*ZUd4e=Q{y^QrK{r(YMBipCWGk4!aVqAk^ zE6EbZ@Xdt2i3gS7MKBL95s?4?h0Ae>jQ7>He^|2l>fu6#_f}{p`Lv^s#oV<)DqU>d^&$&oE*(vx* z_gA$3EWL|wmWV_pB*-74STisYXCsJf#Pk}bpIx{?&EMa8NBh`36K7&I`JVf^U{exO z0!Zj~c=G83ZP{o|pOn>_nRdUq?l7N>i5(G9#-2`$yV7?4YG$^`skIM7Ug!i^b`wF1 zuewasUPtR3U~(sIYVxBN?Caz5_FO+~_?g1WM)Gs}XMaj7z4f%=Mmv#5CQ6|+ig{vF z1$IH;&L~F3jg=-tvlnjybU%K^OFJ+>#s;WvvwxD`TjVK2H7)vmpUBXKrA0=C&B?amw zNd?4sZKuB#O=+(#90qnLBgq)g1r)sPAJ)G#{M^LA5fUj3mw)l_)VZxg2vkJxl$Cg% zZC4)sBCC7`G>DpsdK_lC9LwZb<{$3&#r8}XoJa_Qj;3|lDvl*Q%RfvN#aI+pyFMZQ zl>!|DRK$8JYW{9pNy1S8Und6Dzh!%uwVLMfGaxv9fSiz$?_E0qqKHRT!m+zxctEGO z`9iiX+H7oFKelevDejBYDAS+%P|{JDjW*$@Di4z$%`? zJ($Wp{5<`Xu-s4Kkn%C^nS|hLxWa{v1HjCPbDliQ|l0b35-EJg0Ky6n6Nm zk$Vo(l#Z5|`P4}f-`e#tFp?}dXqIx*PhZbux@prySXs6H_~a`6v572ER(Q5TDV+M{ z(vpO`dhU$Bgz8QkTuCK6`~vTm86N0SM~m|}Jo3L#)bFkGp`Z&uaMA?L3nYh}=Z$Un{j|9~R1J7VYm|Db1pIjHVjz;E#XAHj2} zM}8W29UqlcQ~1B2HR~D~HDMxm@8w$nqQQY8|DZIB-Cnkmhg}P^IZWX)pzrqr;jI=a zo-!hH2G~Yh$%|F>&XSpXPhn1T;jP_1AI@p`a;6F92*!TA*85&`GpLnJ;(wl8K7iNgp;RYD=i6bt9ztfOM!r(caZ-{ z^3|lO+!Z(JgFV^8GEqzM*R(u6H?uN&n<;tKT}Ehk-)7{a(v8%y5qJFU+RSD&W`GIY z+j3cr&$Q(2ZVL_y9*Kz$RCr$$a1j$#?^5yaqew%Bjj zQSqtbk4sy7RFdyb^ROROVyv@s-s+zSEV|u#j>+}0xl2{Z~4B2v| zf!?8Y`gA;Fy$lojr*O~$yfNSKGp+gS^9N93jw=k77|q5Tw=@xQ9#GXclCL! zlK~a$$3Ih33eQhQ@>gA9YV`L;Bp{S-({(S?pVXhfqJ^Wpe%nu6E!)r8(*A3}Oy@;I z;VF!#iZ`}x>9hYFVLc=@)D>V_*?mP+_mNRmed+5TXF9@EwA77vEz2HMXBjuI?aD}t zR#SgXx8pl$*Gq+>uq!q;vfH-ZMgmy>q1;!xYBJete&zxDOS5Lg(LIK$v5a@SQCCR= zmJ6WZ)x@1DYgZhEC4!&_@XZI%T?k7xhHW_TquQ_)6vK;eLa<_n9uBZfTlbCo_yaB` z;B#teJ*Kz9JdBddbTDnWJubnF=mtAh*!N$tH_93?b|Se2Y!9voWPODD5RFAic+Sv< zKvcA7Wu~GJ0QDA6{V#wtqM98vqF+H^sj|-Z<2-~6Ch;$g$-P_}Lzt%?ezn{~^zag6 zGH~yls^*DSLShI&n6qfuKgE@Kt8SUWa2XRi2SL3C zU=g*71W)Jg3>sm}77BA6a3NM*ap@n42M74IeScN->TaUDL<<-E`T6;Owwu8{z0n@ z?-h;jL3a^weA-3ieg}dgB9;TCPGJ>>U;!_%_Pae&0%M<0pjc)1R<-B26I2H^u<$_I z6v?#ybQZoCn1&I;e1+c)m9~3=Iw))V$}%!CE(ozz%?Ul0-RHn5H%}v1+VihSoL3ubk2QxQXGr z?fk)&Gw1Wy|1I(a^by`^A4pVTxVI;Qr>tjSP(!pj;F#Nlg_1~kWoF((-n2z#T%Zg# zR)C_HfDc!@c~gSGsqx3uiHEI^JIk=UO)CjD6mos}1d!68*<>-(kxPL?~C~>ywzk@}WQ0(v^#)CFIjYBihdPt_oNf>ptWCHQx zy)vr}(*3hYl=R>Xi~e(m(ex)Y7k_qlf9$W``n_J~$-K^f(Gdq+8XbDw7vV#chNtrx zh7F5vm@-%ib%ZE5A><4Ww`rpL0k?d*cr8_k8pO+n->Vq_J5+B0U2611nVl0JZeoG~ z9=tjz2^2JYD}eezc_GmM{|u9TKGZ2I&Mhx4Dp6@`_qp3euECs-6PsTpkXU?THQ1Lvl#E&RDj5s2P! zepy^CTQLE4SlVq#m>+(oC|9yDWJ?^DrcM8&e>jbOQ{sHv+>uM83V(M z$|!$cf2*4cHGQkWl}yogO83tcxug^vBTL?O_Lf)E$BJE+eRB`B`~JM`dZU*1Nh+;b zUH#P@6N|nQUolK-=EA$UxAk!3{P5zTlhv|6`IRc}@oTBsx4pDerTp5@Lu$+16X~`$ zL@?dFDSS&WwKlRUkiW6}%V=Ll?2uU>rtB3HIsNOUrA%+$Cx5{_D6H7Ki+(QD`q=N^@$7thw@ZK0o4Z1U+COwY{;<~i z%-&42m%t5)j~6mSJs4ygaq%C;yr>YHy3+_x*?XgQ~}huffcSwL3#O_>i(A-y3biXG1PSiTl^ z?<+Ezz42q}iThqv#zMnmlS+@p{HIGdofFhk|0n5Js9FBAK*{gbF45GJ>l+?$Om2xd zD;_(#IlwAylxfkuTD@)kfpK5k4?(3$y6;gW2X7~s`grJN++#j>=hdAbx3WuHjcp&% zM{1EhGAjDzQ_?14lk(Ts#>R&6EG5X1vG!-Xjp*k2)Y;}8G$Q>z4yQmke)moWpvxjZ zKD^Da10wVdry0k?MJb~+S4r)4TCsCx0px@8caQ~4FDP)XY84aPjr0fx!cZzBxKqvf zbYB;bu9ZSOq@v)pKpcfsJ+ki1t+n%|Z_{#0e<7ZNfujM7Gw%RJa6 zY>GDPs=IHxjxYW66pXv~e+VK>b@zxJ(xOCPfk4w=;lCg!&Q=1icdpQ*(Af6@7``WZ zj%D_ap1e*Oq1O{`$b4kvMS)_=yByj_BFd_F`E~Q#D5U2|wX|NO2t4DoUd14BZoby_ z7igzJ!Hmvzs)4;ziSsEQ2bh#1)BNgpZ}-P^2uEzxcF0ZAyujrfZcyU#4PE@%;88hy z5!Ssu5ct~+-QsO|)d>zGOd6372jAcRy&djl$Th%WOXVoOefl_)!{VJ~vo?!qtD8X1 zqWS5R@dm5zmu;g%7ra&dOhONN*nW?ang`=nW7_@jVV;*9bFbIvB@c5mT$k2V?#S)Qs@$uAN$tMfd-DvFi={XEHJNIk{ZF1b2}y~z znB7zDp5sTXY)+N56#xEW`JpfL?b_UJn_Y)Sj(!a)c>7N08n3`r!2&jO;FB0KFZZaP zr5AT`Hr>8sZ0IDJ`&N>tD+Xl`Zkn|(z#E1<2vw|9>$iAer^;s>lw?2|i1g+{ql66r z7a}B?(Sf`&n8)B688=Y z2!Q%)^0vEvP6nmJ=bV|zNWs;0>T<5QB8pwgvecfk`qD&OXagOgRrqi!f9<54@t>?{ zbsT%~@OW&}*AOA^?U(mPNqk8{|DD_ zKSlZCMa!WnZH3VC_`sBZhT8u=Wp!L*_x}119-F&!4Cf(Yl1^L~kubPXb6q2{1_Q1t z!ghtR8iv-`;Ukb0gTg=zJ+PV&%2YhnTvEZf<66)~Q5yMWGWkup8JWxk4f~%;lI34j z6W{tL`*;}VD$mRFQ<@xfNbcAjK)Ish;;0bZvg5PeTVE%gddvGsx%BNuQRQ1g4}JGJ zd+}ndP64KDm1eI)W|J5hxBKrmNcY%JcWT1r{g3|;9k6%qd7_rlYDK6U@UWvmX70}V z6^YwM#6jm>SMVjcMKq|30p;_UR?;2Lx5tznW)XF!iL*44*VI7tzmg_t)yFBt|jElQ+Cp|K* zjq*=-l=4|~z`RHmp9yt2GIELL?Q6IdB6*wdBnu0;1gEHhz?1IAJ{NqgCEzFiLvkkj)U%GDL{9fZ+PvgKAt#?`=5TFD zU(dRzAi>NKzu@FV;j`%EIF#W(v%1y!W`i$TBpIv-y)DX+?g%eEYm4H4!_?ci51RL+ zY@k#e2%l1a{u)H#4{>IiflxAMJc7w*-jwFu{9-TLUlr zEWnb4BL!r2U5e|vNBaV_exO~b zXo-l-7v-T>skSn*`uq^4f;-2FSOC%|pe{$h<)Ykz01m9zI`A}ETaOjo?FEQn0R|0x zyLio;0u8CmPty5`yq{+b)NPZ#!ykfug_hycz4|YcmG#e9nkbbf6kT*@IW-0k?>NRC zz|GmR%=gW8NS>c&c-TtWThlV6vwNuWja}i|N^*HCARZL))#F!r9mnq}H4Ob~oL4C@ z{Lm71S-!|*(lW7uk!nBeZW06|Ax01r)PEc&{0|rV;Mqo;9Fga(X2*5`P3rH<8}7lO zdycBqL|||#J2p>z$h|9H-y+a~?|d5L?`gsdhXU$|e;!&%f&mIfyeW*MAgzPj!Vu@WpO>uSM2Y*=PxGzKVf&S>>0f<$sB>&JPwdr3Q{lp8CLaXrRQz>R}p;z;-wp$f&u z-nn;9xCiIdUFlxX5b*&=i;UN?!+{r+P$&CfJQ+rPMY{I@A_Y%tEgakAAP^jH8pjV) z@Z^3+r&_`OV(N!)sqQu<)Q?W#Dv$Fgpkvwbo;#(sW2L~d4XEmK@r zBSjVNWy3(Th~-{5Wu>|NXAnZ|0)(m3rAvn`00Oek8qpBxWj9n~%E8rbVhL#02c8QH z7gy`_G*w(Wc*?g<*~sbW$b#%_%H}{WU@r3B^6NNJo<(Jw}8rx7j&i__n?>y9W0 z`ugRq2TBiJrWJxdhmYBGxLR7pjm`U9frpwPb6L&sh$r#ByMUYL$asxWx4E*)%7%!p z)kDSQtMWR?vxP@oc*pHb(F5j2`6uU_Hgy-4D9@%SEA8fRG3pQF>&0Q7ojy_pQt!hC z9D!%xxCPmQ5`qqeiSyV7H6i@m005`3)(_`&&WxZ7R2e{$K)c<>cPJng79dbyWQWxs<<$S_YAJq*%ekGl z&X3oCX13N2d9zX5p+f4uMk(4luYAR1!}MsW^ykvxj9Bq=4_=aNOP^$&v2XSx2G!elK|EAXB6gOT0(7 zd+EQ8aA9btudQE5>v`V^q-Ygw|1lBRiDk52d98DNSG%Ah)AWGTYlrg$P_-jJoSNol zk=w+?-1hT$@IkL8kXm#?Ji4O8F)&iyAvMzkLiXm$wzh1Mx&av#!X;(Bsp7S(aspbo zt+@R}ZhOCpgO3x*z`_tbE)g8D^WvE4QC!xk!(*;9t?k;HVVIEX96WOqEE@WAMna<7 z5IZ>i?G?5qIV<-g(u*Fbs}v`*7lLqa8uUKq?o{k4u=lG(&HG*JBq1X!CBlTVW1pJ3 zL@vuKhW5Sg%T)PZT<#_ohGSYZE6_DTsMa(!l#{+rTb-Y3ral}z7Au8~8uRSw93loI zQb1hj;=L0m2Sy!>zv)l-OppPjLf;A27+)_h;(Yh*t^6!c_QQL;IOTEE8*8iYqqS3*aDVL<5d{Z->Q!~P zoup;=W90j*->(-%Sf41U`+Ys2)~cJ29Q;1ze%Y!BU8@JueUf;~&lbuV=`RV! zChqRx>+3Z}JxjftU-pSMe#>OOmpJ|IQhdXX|Nk9n-AX?ZMr!L-Oy8iBq_e*n&&HKA zCL`-_fxpi$oGyg30CR#~!xA+FzD4xEPhi7=eHId@Qh$Q3$D6%$HgUE#!7+ALe>Qj? zXUg90btHQpI5nlmq*K>Xf24Ab+p579qO%n>*i!wUq8 zU7*p5Ixe~6pQ7Mp#bk{#5YZxU^RGT^q1=1&v`W6?37*fH$Eh{3Iyl2FL!Z9UmT0BTXFq0kqM!rWcZ-f5&_eruebhAnHw!y`96-Oi)10i++IjBWe2w= zzQuI1yYPyGgM-u2CBwOCNoRVO+Zg4Rl~z&Dr!va(_$pGQ|r_$ju#YmB7bpxeFbqMoAgzk^cu=a--(WVC}kJ`6;x4dZeKW@a}2 zM1ol!&y$1v)dj(`mh%8T62wCQ8DW7a8}S%{i3p+-@!EItYJnJLc?_sFxO=nyW+?_0 zc&xp4U$;+61&S{ODW5?mZ9>E&*$XyX(;@5laM18(m;_+8@x>kA?f<+67I-hFG%|~i2Wz6xb0j1rg{v|zU$%TvqMeHZVKThmd^e;ObSn=fo)&NfKaCU(gog7XuMCKK zOAghU*CWTFaF;vKx|as)V0?iuWb2GHJ3*3^-i*N;w%cEpVys z)M-B8z4D3wvRxHfNl6LV1G7DO^4vT=?TxR5NBxAXn;S3G;|Hf_#Wx!0MOO1ady1yY zgZJLo1CJ=&pp?m7*Fnv0l~?!!yqD;n_xb-`&q#Tk?lWDbX@$NuvXX-`Y71nrtO>ho$82C83^?8((N#C7Y)))RRofy6wONPDUdm8dj`F6oum(5X=%Lk~ycR&PCa59oxyaIn^?Rs$J0%8H8;n z4A(<@bK)n7G>O6V*{TKIA3QybZ_c0Oi+S*!rcOU^RhL=#ODZYm7ZJt5EBS|L<&sB* z?9`b~!V6`*`9~(d?1HkAH(rg^WSo^>D_~|~0tuSI9~lvO$dD^tMVFYLZIY0N0X@{ZQ5VIe=M7b+Cb`-mS6^4Km2jDW(8Qo_^x5cy8KwN zv^FuH+bYVu8D0@TCCOKFs_inA3l}%mk|ZG!%e{5B7W7fZ`ew9#^Txzclpoi8Jx}nn z@a5~WlR(kh_?x5CvryO{$VEZI+*}lV}aDMA`)_u->E$0IVL`fDiq^?w_)gQ_Ph^8WP z%=ytOdsuS4;5`L)m)ILRY`=b6%r}@nDVGbgF6*+AFYX^(=x&O)R?iO?P--z<;SxM@ z<&3r1>Q3Jf`s~~%^2)%kvQ)k&`c|Nur$FGN&otMyXzofraIvPNTa;|TA>H_%l}mo{ z7yCyrxks0mS5#~qPKwidM*Ha%*3?K*->6ELVDWE4%CVqUm0~P;lyI72p^A{;wI;ao zbE`zJl!IS`v16I*KK`)9fJ4Js;o;H?ZK?6R3-1-u`$RmHl%z#YEjUmQAV%symO#Lu4g3WM1FnVk=S*SPga+O6?)^KK4GJ%cT?i+R#Cr$M=0s@V$ zFHrpVsXc65yOE}tRbY1am+5Di48OjS1AdZF$kXdt`Qmgdkk$-YvP;tkTfFVeyeKai z-x-dc!%ytd#D(}E)>u3^NIEwf&ENJ+*^pK5yvPb3L-}TBk8Ed5SUWbU)ZhD*fBT)1 zq|Ryc8oTW=mwKnjY2W384HhqJlSj*okDopXymS6X=$4UfSb`o)GvqwgH}wW+DYq?> z3UYS#RFRQt85k@&E?7Eu6c;^uG&LVj3K4HL+Y>{VqnUNI>MARLY}=6)3^p}98!!9y zO~4rax#Uy*-T!wPKg>62jFzmmt>-%Wq#nVRFr`&!1* zng8xb)~8A}7YMcqB(=0iPxtrVDG=_m#p3n$_G(ty`2uK4e&wi`0UB=+K!jm@K>tpl zkoDGC@^RJRXE&pJv!N=|z3tk4veY;HnJUW^AJ5k25^tf~TyeHzpTRq6DL3@@RS?Wj zyY|iMO;6e;uMT5)wnC4?X@XVQol;URM)zH|o5>n~SIE%~`gKQkeE6|=uW0IyvUBZz znAC054fXg9=3cNjF`jGkD7qtKRPjTcKJvaXH-v<3cV;48! z?wX0$o{7rYIZ$^ewvUm){S$uvPN-MPBC`fkQ&Uf%>3fKQ5D6E$lNMypW@Qh4JQ8R` zjqm2)qW?5jel$!VA<@@8H>ok9&YR&%e&^xO(PO19t=xJJ+tuWUId6cs&z zZZlAriZ}bHc-Z27lOD{qvVhHMUi4}xpKzBz@KRXrs0sDA-lZTeoYh-kA>SmGX?Jt? zYQ48YzWxNGgc@JzUER^_Or1)OygmI2a>KN`JZz$fiT7XUd_CBN?q9x`$$7`=6Y=eU zFhh`)?6-JWis;~w1j>fB=6UFF;TcCeYWa%^gV=YwghJ^l^J;;ytE*$$r^(T(6UMnR zx%I)x3n;BDX7TW)r0i^92G}h3(R!{_cm}hLka^spqx{8y{sp?z3^1H%K2@Amfd>T!LKIcDa)xfutG{%58@Q_Khv!YKk!3{L zC}z#d2FM)qFE9N;+cfF@rmk!z?kB--vtmC#dWK)uv!>FL*l&$ReI?bbO4gelW#^C~ z3{%LBCbV#jMV+hNby6Z@G-8Q07Ygspdor%&M2d-WYFdF6LYf<+46;;sF*+kJrDyuG zUhzR8_If}|Q*#zNQc9JJ4WZDe0(+&`lGvc$S66FLO7`qWVtx#&*gB`}ZyzX@h9z^*~hxd4X*XQ$B3e*ma^nMC-) zPoj)s>koqKfxW4aMT8hbf!9>|(h)ETkmMnHf_^w%yvmC$-aO12=BNWn)y|BPz^4Dh zzw62LyV23nE)b`57)fe1$*CB5U3#WH+ux*#a--XCQ`>0Y(yh;H;KDB4FyEWdf)klr%Jt9;sYB zHDqmzWixS}^u}>MKc47bU0uEVnhLV~+<*ib36YGgkmP3-LdN}WVq;?4R`o_pPP5Y? z(y^2AyM9Kp+L7Qsg#S1)-u*oINMmW?;D8G>kjzP^t0spaWmoNc z*^&x%u2zf$|3jr-SUo39B#Q>h|6 z!8u;I{zrLu&0qi)43Yo^RwX+tke@kf({v4C!H*^WT*pw0+Bd}Rd?c`SMZah$t{zrj zzGl^TVB)<2?S2pYM?`?h2Mke-Kl8})y8uayhYSJ8($er8S65XKt4cwt#%O zDqjS~M9fa$3jufg0A$Qn`V&WB^$>u}TR^6==A`{E7MlEltB`dQ$p0~dr@exgw)N60 zM(wU5=!^^SiSCe-8-cYBlBhj#m^cCDh_$i1JDGU+LHGHgpgpkrb1e{ONv$i~Cq9A) zSZ8RjLuR~d*N|%v;Y<~rv=?k<$$FS~))^EHoxS;cmupet6xIok`Rgqpp*!jt18idR_y|FuUC-CDS`1vzCWiB8(4AUp7`i`AM_-wV6_zj1|ZJX7zC)C4@ z+jhLPoi!Vn@&5iRR%4sh`75Lp!h^lPe;3C0mtu|_xgmen6WE<$}akJu6E9RC?q1+M>q z!23+*p$}3tFK8m}1Mteq`W>`vF^P#!wa!_TmxzY~Cu=F|d&dl!k1FFAG;=1F9C4op zDX*aC`;XKNY$Yd*yzLjdzOEceByl!p-w)tD$3$M0i-D|s{9vsoTAXA_=VKo>wUL@7GOIBR7r@gp6=dNqzy(@4Mm(Io$ z$2^gg==)24C*Sg?Oo6Nhx3PWIX#46^>dR25&Lgo(CjR6kYeF6FTKoD8aB6_DPe!{> z$w1V=4xyJ#Eai%p=cD%Bd82N`xCP+PrfjULp+|x(xNk4m0x`tc4Pas8g3yqW>)940 zLhwaE+%$vul>!NE0om-{CsG;bhj4&89bNzTR3!X-31@k2o40<47x4{*Mj#B%)q?Zg z`fb=Yy#OnLo2Ap(8{+dN!P7_KZJ^(c@jCGLngliK-#AO~OF%Ao;FyR}-#D4KKBIwN z7U)d&kR-l?=%PRYRe$VsZZ*dbFKc`we?7qG_O5}XcBK5f>Wz!@(%fA zEAyjEEJePTZ(tXen|Fy~HJx&szd&KI&`nQ_PrhKCR)+V^&P}@o0qiRw?qJB zq3B(sq=raJ(v!Ta)Ddlw-gvDl^TYy=pb3P8Gby9>FS6Fe9Prdr;4WoXoCU`n9OPG7~Ov3uug{I@qt)tbQZVZq&}?g)2NDwYg^iH zL6qamr?mwmlF-JeU{8(@eJ>3{&OQvJqeqmYY3k?TJUgAf zaoVm@SIim@=8I?SUPl>T<}T-3)@Pmdh|CCc5^fzPz>=rakN;~>AFJSNb{;hAoQOb; zd5Eq*krX&+B*<5k2Z0CFP0lc;Y0^1q*0DLhKKJ_mQOReYxtH=?7&yBC*X(ns2Wa&(Y^HHy@lN*&rU)pbx zQ>w865INwU-b`_M&}$wU@FKQ?hocJuE%5yy_r)$&fdsRk(leu?z3glW5Jm%w5{gGd zHDd?|OX{vLo;1iI`)g7>?3A^_6A=*ssmNQy-JVfM9|%L_*D8A@NfvW4r;9HItL^Gg zFawMI@};~ZCQ}jrh)lzr#P2yzA`SF%aK3MDvcN7v^r}#rL$Ne?`2YdgTKe^F@4{(P zPXC5rWP?3S%)<3lv)eP#{=T4TEh#L%uiM;tvT^xIty<&1ovy5|9%WhUlN-)g=;-R} zzu=IEf-Jr*m&ovg!e|P0S-Wg~jJ5uNDzDr=GUHy!cl)eibqHg%P(X`+x_xywwbI6z zFihBpd+dHTQcGdIrv-GI?$gokE$9Z`GlLV+Hw9Wvra^BtFiU+RT3{s6^i(c}d}hm; zVKBH^{{(>^Fb;-V1h8k5*6!&vP7IoO;GMTY!PoA2witS5DrtF4a6Tn?56ueWgHvx0 zf&V5`X$|Eb!jSClSbN?3*;}tI=Ce!Z(>f=?nE z2gBfSaQ9qtGYx2md$PS+@UYI8Ir<0gU7B~9B_{GbSd$U;OGv(U;SdM&%AJhgAMUU8 z^*63oDmoBlr8frlMqs|Y%JeY6KB8T%Vwzp6eAs8Or-u+6j+<8oHueh+*P(cjFunhb zkHQ2lZFr?vhjiaSVSkK_?kFAhNONYI*BlFqegZ!YMorf|z>ooHIZ$=yr@AE@zyHY+ zR<1+8%8=OtX|zp!yi2FWK({7S3;3g!PV;L_vK+@=bxabv|U&Hu9m*Tbl<0Jehpsz)@ud?8I^1%80X$0BQtbyxg*`n<+jK0 zGK5S`4Pk0!FSoy#Y;C>lh^nQ|k-@^W*O`Q2MI@b^)*!djIvJo_q%{FAMMX`mttV_r z@r#2z;1Yopgv%Ezowuky0d2_Clw~A=mbWNtNSb3MBRe~R+uk@W2}#HXg2Bjkh#+!- zME;g-Vu7JB1Hj2opEkqli)PZ&*VTO+7bygy-s(4z1rv@f>8A{ZYbz$Mc>3|G@rquV2P<*d zAld6M>lv(+>5KkWQ#kSaLcfbA-kkmqO>s6t3Yq7iTl;)@XyB^&y z2(}I)WW(9;LbB-iv)i2%XyFenijf4gOm(L-Ms~v_92&RGPOG1ev@Ws@{WYh~@_l9# z+x&qzsjj2OZr581eFsnH!%V^JY&A}?p;sZlAFI0PeC6}4f#o>E63xqj{3c&P%irz3wBk(s0*?VHS+EME{gv8 z^I^B%$9c~Zy*p-3d;o_`!(M)p>uoB*V(pe+7s|7z(b{g;3_L?PLrE8n{f$)~J?Fcq z%ByPcS(H?9n<>vl+A)jED);{G>P0`}NsFzWFY%Og#&+Z z$`H5X>T<0dLzJQM{0%=$0|Q;htBwna6}czpyfTb~KeSJnz_LyrnOuX#Q)W)i3_Q-w zGEN77xb_}ZG5<3Roj4tvP|M#53w3`FU0{8A?ia|PnE^Yg3zI6Ib1DN2$B{aRw?L1JLE0XH!V1G#gBzH`Nb@^4O>G7xZE3Tyv%ep+ng~-l zbyU;VHa0g8vT(dvFs{T!h7UDUew)JMNC}AiMFaDq^swDO#@ibcViQ3Yf;0A+{{cm) z{apJk=*s+jtlGc8nDs+;w!gB&z?(z|o?6iw*^J&}jzz^yT>@;MXP)=|EIFh+#sz-i zvwMY>lF?c zqcmM4)tjbk>>Q{_f; zNLZLX^$qd)`N@;R>6J0Zk&iGda2Gh3ebi_*#ZY>uN;8Dbq{{{fM-;BFndy-zVCXrK zj07hmw~^~r;4_qT*kN)Ycourz^q23b!hmGyQRwT&+#+Ka(~rHBE~2Gvi4Ly zszIp3rk>lMjUpwLSEAB$CL-y%-#3nc?D9#1$H@bey$`=BY4zG+KZw>B&M!@2-e+qd zaeCibYj$liYiUp_+R4euTKx$p8{4ZWOby`hb#=)Aou>Q*BU&Z{avt^c^{3i#7K=-n*CoaKx_1kNYl_(AFjnWpUWL!-wSoO#s~b0y4u0fsX#9)C=n4|1cwTvc-u8A~E|_%q z&c7eS+&!1+DlQssF>1Rn{7Z@Th`~xHM6)YYa8^W_&v~%(P7;YaOq+o7hr< zKm7A&sF^l3*Hj;o=(gex2pi|vy-WcxjEtC>5I0!Hr?2|%s7kZJDQunrJ^P2(HF_s^#Ph3 z_isy{n%OtsN=9!-T*y^bCDHSQs8l+guLWkLgG;LxsnylE@z*Ha@Z1GfEUkR)>amH3 z=#R<&!!UqO1G+7~?bZkXRQv~d6Gw~~h-)zdtHL`$bgO_}Vpf8$8KAx=e)XVcHIF!K zX7;pg;!xH&hLNh{quhatA<8JL zbvDwX;(XgV{o!7zH1P{7aPK06=hG0mO4%EuYm zbbzN0anfE{+1OxSeo;R{_Txn+C|)7i4Z@-s1xroiegYS&sj2Ck!u*8>SKL>VlJhZR z59q!s)Z65e-EMTt0H$FtM!fLH=Jt zV8i}ye|{Q0ZR>mn&O^FgCU#6*u0+v#8T2=`ceX<_bPVW>!ZnZaDNQTwn}6Flu%DZV zuyUh8%u+>`z>CvdmtS2Co_}$lsGa}HU-swBf57sxw-oDG3jryn+O2bS!%E_Wo-zG{ z9?2x~lud0!dmTor4E&?BWhmd{9PI5a***vH%!qlCvc*O5|3J*|$i{i4aGNCjA_2c) zRnYMx1FkgV^-8yQ`rY@i)mF#3!`tpXEu#xp_wgFzKb*BmP}DRzlc1+&Vl|JS(^8tB zQrprDik|O|Wpd&o1KV0vTvk6lo2B0mr?;lhPEp<|k8$W6!NmaP*A+%epQLE++`X$0 z*sLUhtgm^o9r3QDd7)+KC+aIoN@(pq(2fH`GJM^HBYuqiu|o#Y#S_q<0Sk0#eSIFb z4sb3l2sZ~K+6;Tx^QeTueNS28-M+S*M2aU(gr0>(heK7TG2-1ZGQz0YLM^V(?bhgi zY{%$mM9{a;NBE;VjyYN}(^XK!IvL}OYtZ{Jy5&@YXZCCd9%RyedJzTq)AiSv=7by? zip`u9by}|L(0?lE+E^*kOw?rh36fS8Jm=%J6Xzz^6Hn%sp^g_eCw+Q`&1yRn<>*>g zwTo0Dk2Jn!XPYL4^I?bJX0jbjZwOhrQBPj%G z;GD0VAJto}c>LEQ&%Gzym5d3$NkB?E^(ZfyRU*VqfGdOUoDtr<~QZUU43cI^!K{>OX+AgJ_@K(jSn|7%-)jUVx} zQ%0WXQbuo82A1n!JHTrBC+WEtPk?jeE|{ZLYC{P8ABElq1knP$@;mhldOxNaa z`f=Yip9~Z23Jb`y{g#u1wo3$r8@MuGE|;hM)v6w|0)N=c{!JR5svX89A3lD3!oy>J z$5P?T0HYEo^VI0wXLP4dv1lxof3qoL|c0ANC%rZqL@IOOn;_KJdmhzD{vB zVTHVu4}N&YCp0CTp=6C|m0Zu!L4-9ifm^kK#8R$tj|}&Bkg+*+pv7&OBYZ?D#U!7Z~ONgMS2-JE`M5B5$NZhNrjx-Ij=c|qRlO)XZX+G380KhB_U;qi zMSUVh0~5WV*jY~9ex0q-qq2B8q z9fdlr^NTOWzZ|=K>x#a|w~E{fXd^xG@$uD}tN~6a`joT66V7nGdp9n>l%A|{xv1`tPic`ah|=tW2JQ!N2{@p(vbCzUDc@fwJ&E# zg&gRFACLqIB|0vr;E*zRa(J?J5-97gFInG$%=xtr&v(76kg-GsE6e5}n3J50Z=8Nv zvIk_@9ekV?5KQ$Q_ZKi zHmcuT_fM^Cc)kBaO%{e=<%*sPgmhv7KV8 zja9eTP2RF>1_>yyb`gp*Mj>0@sWHAXEzJo1E+l`YqpJ%hQl;AsD#?A=%zN)7uA}JF zk^ol}rqbian!6X1kWk8J0<=7jn{aK&WQb?_z)cuCm0zq8az+MzadL!u$| zwQpGS3qfS-$ zrA8xu<9y@%`26?S*tJ=lo5-9F8K~Rttq$M1+FE~pT)zN>jfL*B)ou&~U`H7J_Lh|I z|2@kZ@4pNRD^!I{8>aV7J3OGD9b&%Z-?G%$#C#F{h2?Nn~0!o&Q(pY*fVe zu<>>oF>lyMkMz7UkjicvReaOo;8vvhPGzC9X#>~dA&VnGreW7t+RxYImh~$@8dg#g z0sSna%JcMIGe3X-*q9jAA_ltt#^A9ZkT{8+lQ=pay+$X}$L!n*^XCggo+X8ax@K3t zWz&J6?-+(F2DY}>3kEgzQtp2C8>$_I<0^jz=Z~y(F9`(&hZd9g4Y;)iO9`|Fe_6gr zNAFYLbDA>iVOXtA$eXPCdcZ2}^XQ9iO}j3Yse)n8aA5EH6py-!wyXW&4WBk~wDA~m zU7(}2wjnur=egQfv#Xj_tWvC37P{u*%fBoy)A8`A7mZ9AP=e0QHGWVCy#sVWg7ANZ@q}L8kS{D4V`S0(sSe;8>wy3GeKT0ild5Dbd^^`mY+eCFC-3 zoze~@E)SRXN5}Cu6#I#c%yIYA$SOu>TNdcGF<0!+KW?az;ge@bHfAXr=(Kx*~ z@^#r`{*1k#R+NXQLrnJr-_mGnah0E1ma?FFM|Yfp4JjyHAmqG-7p0uw$i+dslwwWw4M= zB@4amU1Fb^nMp@q<**%;_+9y)ns{R-A7|EFT-EuvYhnXda-6EF2-W5`I^p$`Rw5K0 znw5-S;$}blrvl|K01^AN5kGNg`isi7_j9&BUBmb>V>AEZ&CQ3j=t*75*bhH_uQv1V zTXICc$_Po#rrYpzs+%_`02==38j+fXpB_oNS9Si)&=%+lrjZ-v{3Pjo=Ngt7boY;x z2hO8mI%xHJ0*1MZ)ggK$^q4ixmB;HSWcGdT56(pq04D(59oSMx=>|nzAn$r*lbxW~ zafKJ^_s_iIi2o}XF3fqnbZ@0ylJI!@48y9n-`$1EdwctiRqYv6ZwewRw$&Dfc(Dml z>DdzBjdPj|G(AWXNr$@HZwjc%&$F3)Q>ygXIGjarHh{e5WW2 ziJ@b;bLGA#;5nu)brt`3AFLS!b3hX+2|Q+Rwe!_SZH70*!|6o(1Ruju%2+^*9DSOR zmL%A;r&Ps(L$xcIRl%~}#`2%0EO0imrqOsHYCxNb)Y(=v8e z{QUAD)#wRc>f)rYhV5op`mvl*w-tPo0yi_F-{YP|t_ zf;F}yy_^>=4;(tfIXi9`sd8Q;WhTB6c+sljxPfkHKiG$87Z_SV7Dz`!5;NE)4Avxl z77$X_5mFAC*jvxum6^Ul%>>OMx!kVy&fG#*SR&b+MSR;c{}wDpMon2#e2YcDu6-J9 z-6yCTyU-%fO`#}ifl&uafp{FG{t8wZE1Yk^myDAw?^Q@g4e6}pLGlv6+rMdy{I8wz|maS^S~oGSh%=Mt7P2Vj`Q)df0S6q zD?4qNKg*AL>r1IoaMPVoF+xAUne2<#mz_>*@sfhsp(o38gq^b{3}=RW>`hxW%i7?S zVz&0?XsTMn%x>Y>c}Zb>tfiRr4?7lCL`TQlVU@lf!t|^x?<3cP2AqymOsMEG`*$>*^@CUBv@IHnOT1Xg!lT^;$A zolEymSTue^I20ID0S(|l%GiXcNt05Grv4%mya^M3HPMa#9=fvao8wMLGP1KVft0qpvTANHQ4rbn!_ zmY1b_ zoXz^aUS0P7MEdS!r9cd`CJ9o@+(%3P-NPsR80Wu;eL*0EZ0W~_e+vbA&}sEib$)bh zrcTU~Dy>iEfA`nBwFkndH|FJ_zHwf>zrvaa$%(c3u{T>dajC$BKLJr; zjU_2CAwL-%_9!nGq!X$SjgHnlzsw5JfGc3oz{1Y1csp_;QXvZ#9&nEQyEx1(5A4h_dETtVGw=k3Ra> z%1`hNIxVV=onmmBII#|w-PywG-llw8TtSKPS=pSqdlt{RCdlwUlBH;x3qKrcnY`x- zDs80?r!xP4!Fg)UMgRe~|Myq*&!n%7Iz$|9r#*zE^6~v=1VJ@2e4nWCnv%1)8#W6+ ze3iP6>n(k4iZ$Qw3I6VU&hBo4gkhntkq2g|$GgJRc87{+{Ren06-+&46%}AR!Y~Ld zbl?P9y2M{nR2tjIovi(_umecH_?tJo5=zM_kE8kzmK@?h#R&*3!LErsZHnhZ-?`f` zUepIxsbV%^`#BZ)rweV~wmAqbcfIw!9U$pUQ_G8`Ou2&!q_x}FCdK?M+1k_bl4 zP7jlZSn#H+DxR=x3#_j2?e1V!p?aJc@q=ZN4SpGD>~=I*vi!{>(%4r}ftg-^AL~68 zCN2A6FJ8xg#urz#H~Mu+rP(TJdH%)6{ghVREv^J^gE2m(eiDj0F@~tQQSQf%fv<>S zBOZm$IQ%)>^#5A@$2Hzf1gW!eUGa}j{&&$l)optB z&B)Ms*;}D}pIQ6{O#YvgMbkos)b``YkEn9BWHJQV|G&x?kgg^cjSPpw?wsdiblXW++brm(r|`pkkRi9%dnp=44v(FnYnYx3>jd zBf&3z&l=LCZnWDtR$kux$jIw|plwUSl)C>!bqyc3>mCTql4VPHw8~zq--j!%ij41= zVSi%m?TCEIb}_X$$tmdVB*UHKYJ;G>me+Ux0fN%(+etDf!eE{EQXQ=^v z_s_pE3gs$#uASyVLy!b6nz5&^Pnap%0NSPQJzG@^CpZ>a@C;tPF17;*OXYn`y4l^h z^tGH!K|VeUmvsv$2+C#zzSLsOHbbWpR>>n~5b?SQU4|-*xcX@ofD=^m^4L835lxRw zy_pp%HM^(3`AVGa3^Ig^5xZT#-wG~*H3d`n%L)C9JPG`-nSK+CWwelxCoW8jsA@9Q z!L`O#R`zt%lULIzad&SoP9nRHc#rqL^YX&K*Um-X7Y{SFoXGq7W0~vS@9d9W-XHtr zEi5prETouLg4W;7aAR*SOU4!L91#!KqONMIcVATc_Pk!$6yM931e?PaSt)%p3GQ|#9Kz4TF zU^84?5Tqr`M^SM331h5OO7?R4VD%U>@emZWt{z?3k=H<772@@MJ34i8{`&4V&-J04 zb)iU4ChXvD)q+>MR&r7y50TC~x!{-XupZU2(;NA<%yb_f8D`CC%|3UG=e_LXx461& zy?8OAndWN$=E?QDjC;Dh|9YQ=v?Vw1+OL5DSS&XCV*?{21j1?mTbj@A37>|~_b4sD z7?)g_^3NM|If9p`miA}juuFp)?{vAz;zdMK+ZM)F2TT7`T0=FO!t6M!Le6T|%Y`hX zVoa79I&L3%UyP-rI+nS=Oo*B?y#))ljEt7;#$POWz~L^ZK8ZwS>NHH*5aMCOd~~Gv zQL!FLM|U?HG+Urlp1G~f8&0hxbqNJX9@JxUCihk^77R*)!6HmcP-m!*!cKDhjK*6W`(4ae+K&AS!pIWEw&=^logy(6(b#unJ1lbOK{=HAl98gd zO;8h7l)B&9^SOW{n6(4*Wn?WqHPy$4t7a~!8fv4VArd$N9-gZyL2+rrWmBc8@g+<_ z21vyKPJ?)Ss7#G|$-z~!oEJyUH+zK#o;BS6%eltdk+i=|!5X+^W2y+J(X&e|CN{RH zCJ835W9;3=IkQ-HZYpPq6=_xKS@raoRp^C#PYvrdZC~`HwCd?$uFVm<_DhFo-m;bd#2Q1ypi!geewd*_sc8Y%IJ0B@@)dWAZpD0Fqqjc zoBsI=Y0{vC{d^t9xPH!qm_+j!I zsB)~p;Z#oU3D$RutEKoe(iy$b3aPucsf1_hkW7N28Yaic=i!h#1)BX}}^6=%?8*!}Ex z9}*VUMSZ>sd^4Sp3g}N;TLYHtfWSZ2Jlp@722sTfw%fB$-aUU5%8#8E9+B8-97dhB$`7i!P6C87wML>D{_)BM{W$nOx2by%7(O1Rq5P3WZwb&k6pz!UO7X$cseiI;kr>S&HU5 zbi17K9{o@W8==bc2)k=*=O?N(9h>u;Y5WTq?+vo(P9;67sQiL*YcR69(t5ZfyzsKh z&=@tdb+4k-?j|`O_aQ6W(j_zv|8HjkB}c(F!;o?9N6J=aK^G(?Uwrt$V7pG1z5=}` zA?~Gv)|dB)ibqfD!aI!-R~lwV;@qPDS&Cd>quU5P{WP_IOyJw%EN@lw>qva_#3ke) z-P95{MK0JX_twjmyY39aC)5$FL(DU~cbz}q3;OJ}Nx7;aed2_kD+ZsqJZd=ZwMc4(=<1sxELe^6>#G#jphj>?619P!(7Z?p}pG4$CC)RoZ2MYjq#f621272{V*gzW4Q zAwn71o2<*;J0TPzWJlTAq00=}E7?2Qd+*Wjyt<$JdA`5n=>F$EN*$l;bG_f^`I?xj z^%J4Z#?|Hz|HFUKu_(jX`WrGm;Gb=1x%#A(1zW^)+YkqswF$SPfE07CA+b+)!W2!M z{QHc=@86dF;EAmjy>+MD{{d^NiRYdbrknGZw~8G-#UfqnG)}88zogCwA(a-g$Rwi~ zz=v4NWUHP|azzmmeI8{P*F-lu zGRlLhlJJczjQd!@O*6lsJQ^iL1>&Tj0a-Az@lq-sBfn$7C8eQ3(zt{i{3cBZvlNX5 z_)SE(S32vk$qzUgDh(G+Wl{Tw*PoZu-UM#4)$XGFFcHjIq8{YLi`I&dgBe6Bl#ho3l>2->eQ&x3(`%yaO zN!EHMe6YGu@p0Y8e%g+-b$b|R7C|dsM`Uc%0%Bku9&2e2MtLbX_{sLcSgra_iEV@$ z9}DPgUS3`stsfu+Rebyy;&7#eY&SS-44}iaiOjTC8fL|I+#D)mOe67F1qV?7-Tf1{ z2yj&SWNtz272zmrx|nuUS_@$1%^yZTTUiz6I2^}^TqHrcW@@BK(2Dmm(uFWJ^dec!|5x^rDF zdl)pOTzVbR*!Ifk&XQN{oyTc)ZQuMGs>WkTbUCnKlbxNDPY~YxGI1WDdM(4(NMl%1 zkL=%5Aq0bdC0;OyHDw=tJN@^?sU%-z(+sotOkvUuxnRob8dvPP&uzTrm&qyiFs|_T zGOoE|)aznKr#RngooiE9<)gSpl!o)u;*n!o|5*J6Tx@O(D&VD1R_@LEtqD@7VEaCW zYXEGe=nzf98%d4}UF^ua^mpINp(4-#6$lukP*=zln`^Fw!i4Lt(i%{d2+{?3V?RLA zP`@N79lIxz_b>gH9Nhr08v-lCJLmJBcc!3+K0sIM-}0;lrKFEhkqo5K0H9o|n_XM$ zqj`uFqa&k>?=Co6Hh3zrv7-|;C5U5(*^JeI)fI2SO|E=CGUmFtq^g}MluF_E<~0H# zu=s=uMXuVEDlnk?_C{n$4=-1)CX^pL5f{G%ViOmooDCo9ISQqKTzKNiD<0fH=$R13fLLo;#-i)Gty58tR9?@H~go6W= zijs^|-_+Z?X6aY75xJtG;xigrAVW`Kaat9E$xXM;J0Upu3OM6gxryCGfd+!tBm)FK+cfJlE zPb!o?aI%hiOYb+uG|c~UIIZ!vM-&NX!n>BaAA=n=x?>H$g}zQ zQ5a8n6F$5;4~Ju%@At$kkm6Q9Dc8S~aiPJsKMX*q=J%fk*w$bag@^!9mL2$zKA>kG z?$FLm2J!_K3eE3yE#`n4!sgH#Ny~im4QatxT3T8q)r6xURkf99DGB!AF;ErYFS&sq z!qzD6%Yz;Gl0M)5klVOO+7WQu+qW0cio!BreXgoL)g_D4p-bw_7#m2zn!9<0{p>1k z_y^bC{a-c$24eT+&l;2V_!GJy_+e?~mS}aCy?00HeS*PBh4AbM6s#XCtbVvx{~PdU`1ieS92l48Tp73V zprpA$ti`l9v^(K^BggSl$i!E352XSuJ-2(pv*@@J0grOHkETjyDC&r%JN10D-$d|5 zDF{(GWg-U=92v~zyqrBLlMa>8dj=s;rMk>9?T|zKh$Fsf*;(VIPkZ^^(>OC!qU_q* zga(NPJE@7Gp`n4yB7g&!5Wor00i?h$uxVa}8b{FF!;e9EE?}$xTgQ{!cbV&8Avv|G zj#0>jh2j@*FyLdfyF>XY8eF94IfCNnwKso&5tlKZhpZtfDalQ=-pEG+5#XI!XT1oU zgPnwDqc?ITV;ye*6iC&}Ay4fa{O`GI|wKxwuyLTNijY4tThA<<2| z&|@}R#oQ|e2lvn3{I2as3gyNDk^3l(sKM#@*E+vcmp;S-L!V=R} zo*O*aS7d5}`w#5&{H5$pPy(rHV8EHTJA4nstRO^h_U7<{G7UKf_DwD5;{mugtJCd; z(Gn%PBN{F)5OHD;dwb9w%dWqwGlX&ARHB0XgZsHl@O!f%3rA7hEA47QU%Bs_B9mk_ z@d$?J3vfC0UhE$?Atwgc{LjqeJ(e@slEn$kP1eNHLQGn#JpP=f=e9c@G6oc`K2OxR zmr=Jh@b5J641i#wY?aKX{I|xW2_&!gJ@B>Y_`060>~PwBpFzGMlZt+mCMmmW=*{P< z_laFzm&Qc{N2u?dMUxQ=e2h1lE7e3v=6vHUr1p@E8|8QX(@AWj(L_XdPxxH&L0!@x zQUrr0(09wuClXD1hZx!Yp@pl}#vxF&3_(y6alhg~y-h=7?&$dQ$+Jnk3B+NRQSST$ zur|V3>2-mmk^LtU3N~J_wI(DcLiU6WdE2=4ICT@3hIt|C$peU>tV{r@T-Ey2diapv zm(jOWr$bN8;B{w(0D^b$mo{XAr4=Yg38Qd6CuhItp{%G^l@4hmP}-!vv^910F%D&V z5N32yecLfB%ccKO-^TAB2@%}HF`%k_ulKl@m#70d8Vrc{+O-7cp_BlC) z!AXd*tx|URw&<62#oyi}Xl|cM#-3Z-xPxjrlkeO2oJd^_^!9H$KA4o)9PP^dAG!F8 z57NcBe{@81-1g!d`;=o@)Klm9D`n>a z&JZNOpwR}2osZ9XVR+)-n5Zs5CCE5*A*7NPJnHQ+%*!h)L>j`Rpdu>NwPr(8>X&1R z@w&Cb$<7YbyV&ks!mBEJdbO*kZx%U-eMFPmf2*w6=Li!%G%1p7F^>Cm)Bl^#(75}K z{9}PdMO(MMA&p>Ci+iOD!vr$X$8!BX?rLNxtie{7Ilhx#p1YKNc7Fvf*0($s-0uei z+e*K;$FzQ(C&j9qMob1h$uO*ay1`=wk+EB+=Oj*#iT*SAuHm9SfoKL*9UUEg^f5g* z$?E~~s>HlX(mtzv8+G)YJ;Jf1#LFvH={E^nvjam*YKE(<3>w$d8kmt$YMdW6jZJwp zX(P&CziFOf;uyu-aQ+^}=ft_SXgiBpHJp*&i;I*K; zZ~B)%fs7hZG`s+R^Et=+OiWCm!I`?TdOM^kpxaz$d#>;@j1n*j+~RVht}2AYxsn}Z zP9B6YqMCIj==`%Rzt1|LLM8#cgzFkm4BqYxqLIr={g8eO1S6nu-yre=$|T_#fo17C zRJSNQIh7Fx@k_!oZehWecgxk%@*1d5S6e{`1kk%yVgJ-rUH<7yDG9{3%9J91;IQlD zfk0wPU?y3?@U>c6Qyj|oPK)GkBDtu!?kjtW=sM0t5e?P;4PSulqr|xls zXX||Qs|Qygg?1#^>SB=U{?GQ@r@oLc+o=~%75gij;5f=^u>G7Tt+fQx*R{&U{bdb}H|;m6erIcK-O`!9gv2&&_x48@6GvHJb8VF>>oY;D5eEc&vI+ zZ+?Wh@~}+ioCFyqj*x;zBA+Moo_yeSF zkE6nA`fSzehdSTxM`3w|YM&J}Ih+iJ1ux~THPIU=+2>;;9lWSs<1fmPYu(!lzT!a1 zuQu8_A`z|V=`yb)#_OiWw{+0q?zqsuJpF9xc61#-{TQMWUTYI=`NBL_a4S8E=P0SM z5p()g{4{4^I^PPRt`1eT@FlYCB*_Md=pQjdq3;1m6rb~I--}%hqIdvbA6Q(X=>fz* zf$_T03?P62b5C0DLm*WrCnr&tw zu5N5(K@V8bFk8YGrsPX#moCXakW{;TZVQ5G_)EaL9Co~h+cXk`)@H+ReIG+fl~`Nn zZq1_GE2AX_N13zdZ6R}3p;tswZSf7+Yta)dcjv80jy5Z%T6lWxE5u}4Or=J>ZT;UK zWn_MsUdUan+fqU?DL5K8rom?w@a>kexM*7tj-l%v?4TFAZhDlor_bu@eiy%eoZTr# z_Q{_5dhEZW(Ti@vj0^e?hBzwxM#w0FPM@W8KF-{{DyC=UvpYAw@XqH??C^_dp8F+@ zzv$}uD))UiMoD)H)27GcEEB9t`H4Hh}CJP(NjnsYkKyA~35~>)#VciMmrKA#H`Z)kcAm<7O1jhk4#l zT?dZO+PWmx%)&_^bk^<474o!YlUvWuyraIf*JN@$wRlg!f7ynH#4(lxWeNi(GIO&hc$=Vqgm%-OBfdm-0 ztFK*bJe2&v(oq;ZuT9?x-e*3Pz;NFa1cs2P1p>p}-Q8g|GtSN(g>v6a$dLyGYkPY; zEFs-6IAQ%UT6VvdYU8%iH!j?k*4E0{UR8YJ&u*?x=28$UW7Xk(B^j*FWDNK?p!%z<&$XXd$OWJX97G6H^}=Lp17Woyz)v+^lBh(pvEReMO`(;_}R{yku;D$cc9}U2oK8?YNzR)>bw#RLH*Zjel z6yeuSPfGFgJBxX7a!CvtDXv*uHXP{hD^~;Y+*H@E5`{__I9Q~UocRWJ#-F;c-VJ@w z=xv{uy~sj)^nLWYfGq zcr8-R4SUf=DVElhmse5(Dbb*77Al}R{-Ffwv$E@qeg?x<3ci=LjX$dMsQ z<%Dr>_I%27~PqBofgO@`dJNZ1VBhrXA)m^!c z%`ehzD;a0Pf6K|EPca(LJKZSxO4|s<#IAcIScZzG=0mU31CdxVGBOHa>?&fR!Q8!eDlC5p zmVtS)xmp>RRbQ_6(3|@D(&!CCrN;fGtTkv5CWgQiYYd92Lc3fAKTa;(6!*emXXK;% z3fyl;6Lak5=ys&Lu3WveHGRbu2Zvsd`E=Irx1x^2cuIn9GLHV6E9FB2u8XgVf`s#w z-*z`H_ZwbU_0RC{Jm_J@6?4?c1UoAuf`FVyEc&F2e3+wG&m^ zjG-0pL++9TG%nDK2~jLWKU&bQ#7+p!=>7?MU5lxaSPsZZd3*bz#@W~x-|NHR>f`JfAlz&{EEfMAm@9~Fhi~9sVe8PO-QiINobInC{$1W>5awPF z-%~KEeww&c)dSn6l9Ce1;pAGye$T+bJF_2Nu)G|s*;Pm?P|6B)Nmtow3&2CQl@K@~ zGVQ+mrFhW)&*N3wnuD8Ux@8REU+bFpaf{f(kGzdu5EHNzI$U=5J5M1G{yth(Jo?OC zkZM+Ex=PAVe5p!fZ*1Yv3%OeOSZt4;B}BTGNXijpK>td^3Z7o6Jf2sE=I43G*w~Tk zfC_#=$4}2Oh5fyZl6CU2G%nUv5>-CcNKE}CWr$UKpHRh0bJ==9MXSYpguJS>$gBBdAly6=|^Xe{hUC%pWGLAL-aFQkT5Pa zSV$6$euWkt)@2ZFfSh3GpuqPR8gWRI+FG62jQqsG8Ml12L;j@y%T{Y`*DY=NDZvs86Z+5P zFUyA>Ktq&XquoFD#^P8ZQ&EqF${{emRkGug2RBSiP{C3Gk4dKxiZ=mRGTWWBB>{ZM z%4*Dkrol*hWP|E4uaXH!NI;%bTDkQMNaW^u$Mrkk9 z3>%)eSlWK@Ybro2AB|H_QyW&|vv;2#U3*+L_J<4k8Xzf<*t1jka{u(LU42S&sET3dcVy@OP)9upeBmuIKOp(!_}O^w z`<9os)9T7NUgIv}d=G8)WRSm@`!xHK zqRjOsF1Un+xx`Ny2G^pk%Qd3g$P`khIhEe8tMY?VpKg6V2 z=!33215q50C4An0wIj1l-vFH8h_}`ZG$z1--zaGs@Oa20UvN$BzrJJ@0qBz%&q24)RVR)XDDv@_#h zarZkisR3~YV#HSGT{#!git0_KS^E!3YQ<^V-T|!R>H#D40vU4p6Ewnuaq@zRfF;hX4(nj^|Nk0Fm8<4^^WSze4IS6zng@@WeSwra`m*v&A4W(4XJ~ z7T6yZ>ms&s`wW%nxmUwqHOX=(m_oQ9Tts1%_P`82=dH5#J%JrKu5)~R{E)rKu*_e# z((#>*EZ_@TCE3Ilo?l>^hG{+aXg%sE?%>(EB-dz;SQx_AznWNSz5Uh_mB4O zbei7|f{qj%A%8*8<*$oa-rj_MGmWf$BsnV}_${M|(woMwIs2@aCk)xRQ z$3iJtm6eAoI*ISXis~p_t5f3QNa0CU+K_<+0d`YBOdt$!n0?n>1e`pMWj7AM+Vlu; zN(hrMyPtDUSlIf$;JhR9<@)1muqVR*g&(c{NqHZ@_YRLE?F67OzL4eT<&E{%yy!-E ztgVPaofoIet6HzlV)i4>+~zSH@BT>j2&=rwk8dcu&uf>JO-J~lM=K=d&2X%z1>a_2 z^*>I77#*5R#F#G{Fza?=XYhPaJygZTaZ0MH!XkY>$GX*aR#}IUhmp~_MFM(@&o^vW-sBIKhRwBPewlF`&B60}pSlyJ zpuZnqkQb40GyFm8@9)16hPEFje#_l9Nqkc*E^!x}ER_UEa!Mlx{fP_k2E(W}bITAjg{Mz% zAax7ZY}|=LU(UO+$`b)9PNIqFOAcaisbLq` z>eb`YGml>_ZXZn2*?i!Kj+EJ*Kk(d|w3>_xKszz^VHY04*U!Rxqxtm4^Sl?rY6b(Y ztsjL@xMEulEZs@SH9kJZqYh0D0Not4=oyCBUAG$uXl5F)I(N^WLh;Gr?kX%;{{I8b zZLYz+Lj;_je|L6mJO+M9;QTlOBRr(?bl<{IeZNW%@gUZlxBSYb(7u^`4dFZ`X2P&v z+&^*k-(z^KC)!dZHhTN9k?C$p(BZas9xK${5)nNYswxrwT?Ja1w{j@n%Ch+a@{ZCk zHV6W92#&QdVNnLuV%8B^aaIS&6G)h#FhFAJ1tU=?)d~fvuuPoU*!VciLoAe(sUSrF zBZ+B*57be3^gIzsxYdZO9i=qm}UKX?4wx9r)04AKKri9vJdSNOLKrsur4` zqNKF}!uC~a(P zC^b=n%?L~z9e*x=Pmg1mPiGS>!JVs-d3nGX>Gq59-%j-!cTQj! zb-1Gk6q}Q{59E!AmrjNLK@@B0Jh$XLdnu3~&;MXwzpaZqoAKK${hH->)$gv0lP;Bg zOL=C_#Ig?ylAdU0?>6GZLn!#~z>h(rsEmVT2#O@ragG#zN6fn4nbOy%3W1bv&8(09 zW9d-2fIMQtzNx96qa*iG%|Aj^5#B+YDQ+*C1G{U-%8EOBb>Hd=#_}g>W5|_$Z~Mji zR`C66R+n{ZO)N1P?o7@a{vIt#_h6M-fFQWj+rm5Bf z+<{3N_amTmO<35g`*g~VSf*iin<~ue93AlWzY+S273fT}+k$Wm=Hz*@)aOrT7&h^U ziu(LeLNfi{{yP8bb@AjKJ=RH-x(;rg9MC0t&nuBrP?UV}x3;yNd|JQjErCc|cC7To z)hfLn-rRM*_pw*HM}*+g!`nxQ5=7eoaXj<1VSBm8Pud&U)333&zTpIED3jxB(7DKgz#a|HuZ|Gt&3!{dd=MIPN80>2!BbMg;Mh|9~q+LxDh z&UF0RZI*j1^DOxh;%Y-@W^z}g0N0$x*G|nCf^I(ktYNb~s@~pbiqq3qVAOFJp*?|C z6X?nw<^8-5>op2%=(sq%Q8et8^WYpkFYo8bh@7?cY8)FT^lm^}FsSSyEtsaVnD@@e z7D#4jv?*zfs-4_nA|kP^#yIY!OL=VIWWT<%Q)(s)1sz$1g&T8ZF1t$qnCMXu07L6MM|BBa)q-42UnBi}37Hq0O*!Q)y)59TSi( zA|kqd6~dRm)&^hhyE?BDN=8Ki=4egrRVyi-D(T!GnX>}T9IQOEU96&bZ~aQABq2IT zi@JkQX*E~?$9 zj|}t8x_iGxCDi|H!DJR8zbb8xUfNtBkecdyo*2~eBB$O{7!E>{)(EXS<$hkkc<)N% zJi-G6*N8j>!DY+`g&5%K0?23T?h!RSUEoFjNo>0Hwk@5wvM30aAjn>K=D}DdG64-I zNMLPjh%1B{!IH{0$`fk^@22&4#O1l3?ryD~Vr+=dDrFwOmV`B@U+^>D%;O{NF)BHu zi^z#La9E@X3wpy&u=Gv)eD==@D1?eKndK;N$`M5*%36IYuMh^X@uSk=Y*Y$d+xab9 z;XkKR7sUT8hIe%*7%+py2;8psgoHnm+BykI-Jtt5 z(!=!f5e0|Ba#bZJ=S;F5gjZPT?rpj|lv3N#IA~0y zFP6jz9_rP(*|_s3j`e&Z6RdXkXj&Zy``Sm#u`3RB5M2e4$DN1f+!XLI+qGiPV+o@f zATSR<)8&UJXr`f91Y*Xt#u$NxA(o+kEBE1l!==6#QA!CNpLJKGbgs2w!dw6#Vfk-6N5bEW z-2#w? zX{FsXdEVyj8kNx|a<|EW`pr}4=0+`<;$_}!rCHZ=#x`A&7SO5v5|WiR@pslX^0ZRt8y8MTCLm=NS}Y{@ z18RoyUFzy%E7iZ=RtdNIM6W*K`zr{^!{8)*hy#MF$krwpp@Yr-;R#NW!Hw11tK znx268jR+;jq^J&_-pmHwyb6n{w#*V#aCz4QJei(O58HN!*j|-CXx|`D0rp1exB0~h z>k;GYb>?Y~4I5=&zI=yle{e>Vi2nL7yEirm{jr~GK%dl7;pAhbV$5No@s#m=WygW(mL(P&e%0J` zv{c^{?4o*vXw3RXb@g%D1v6)7CuzVpGuc~jb6!zg`+G~m2;46zAoHKQ++L35Pj&!z zYy)EX5%vYXswTSZ< zZ^y`0A{h5YQYR9erJPOzN)S{5=Z~ z^l2_JgdEiS#h!OJ@_ewR;xcF$#ZIOCOc^D|ldenO5T4w{F z-716_xpm_`xb?$Aj0sJG?h*!u$=O!OZro~$$Y__{nJgZEk_?@Ff)TlFv)W2zM8waD z37h+;%Kv$$bkE)Zyg>o*1~BIFUsx(AKVR#-Yu~Q~>I7#W^?P5(w}sg67I;jq+GHV- zp}LQajhzdA-1w|7WuU+%Aatb^dqo?KK}f&1`EwwLU(^Sykh$gzeh~|@BCeEDU-HTf zx|Jl40yh^I+Z8PlNI?62Td#v9>_zV)+avmM_#lDi0w6NcP(eOg(Zb|EIsgiaJEzmU zb9b$QO$Pd&vt0`tLJIi!%#aZcs=$zM1#d))OMrhq-304*YY~?`pxi=XSf>eu;$IfZuFxIf@0!3F|FUQ8t zEi+&iYyx}ST4D-1t}w4~URu2%kRw?7fczs4hxJOwtHGA^0rUa;Q(?z@}z7J~ZVmI4ui zv(VjolP0eBNl7O5_CFkJn}B$Ac5!(t$H0&GaZ(xRiW!i-CabZ^JbV~(glHpBgp6ms zn4s2QhTFIa=HLW4-B@3Y(a7`frfi&!(=_7#DLo~tPqWw^E5x6RKq zv1q0hm9O^~_0W7#56RUm?AeO(e@MAW|Ka4m7wOP+-RJCD(at~ne*4$M6A0$UY*_2} zby}A<>V!_&NW*9LJ=uP{7ZI;gUyo+{5B@fC4d6FZ;;aSzI&Y5{J_2#n%w6*;eglKR zzx~Ufh!sX{(jEj$q_wqJK+}eaYl{rG@BR$=H7P#T1rGM8_U-^13>RC*jWY>03+T~9 zFJ~c&`g*mcC1h*SV)|Br8GlV33F>J=x+^4`l(dXxl7kAE-BK0Y8DLy}rfm_c76H=| zz;LkFG%xO6j|6w;W02y%j)>qE6GLe6`My_GN0pCeWoIWOB?&Jr*tEMcQ{m%`rx2N# zn+GG2_raua*o>Vu@V5-;*g7vGEhVMA#uuf{w8E;SL>ai0;;RQuHL|5 zEW|IcE3TOdtJS*opEDcyrtYTa56^`scra<6?s|#vdgu&~Y)t5Q-B_QJ<1PPEdRjzv_nyxoAHg@_rwKSfP-vd+8?wB{Eg)f7i=cAlQ}A}jS5o%B zdAYr$RJV8R{Y^{b7TZ}3I~LlS_$hLYW00& zFY}i`l!dffr)Rkans?Hp|8v1ndbcTz{rbM^;W6xlI&F# zzxkh$m|X0jYa_($noM+7AJ*_*Us(SVexr`ixA{u0(GUKfTkh5~x5{$Y!v~X&SBjYa z$_s3fkNJ}_D=RBa*=hlmgt3v4BCbjk^-iaXgc_0S;1>_@zJYcc80%1SzKO>3RcY9(`F%4g)N(y;SIt~+d7Ap@O<}L-J~N55DS+_zZ7suw??q}hVrvsTp^ddROp8~!Mk4<^lL<6s~_T<$%6wr z=f}L~r5_s@&;k~DtL)#As`xH&zinU!gtA}QsI@ALK7!*HR(#vN!H}Bs9t?u_b%&`3 z_vS9jNo9_$&S^~BySILgp`pp&v#+}K>57TzzK6=%DhM>qPsf*B9bqqtQY1tXvWoze8I?BIsQhP^5v(XyXk8$^A2sQ zHusW>?0b@mI(o@(7G*KDm2@C_mp|S65g|2or8iUX$iBbUG@8|-=b7F3jK2+k$84mI z1t~B8$?^HL9p?)7vrylY`6*CCQ$RWls)#>MumpaB>JUYAms-vCA7+zYCFTnHOPU;%S zd+>9a7g*0>hN?(_{#F+P0IDoVT0pLmI)t;H0}?gdQ4bEJYnar~rq3RNz; z5Bj@`TE)@={r+r5&g_%cixx46pSz3bUmp^Nl18k;sd1ctMIL{b)hH>x-h}Rs#*Rym zK(x?+)$qMzuBYOfjdZ+`x>*@}=aztR<7fI!Q&Uf3G7bR zAl@JtxOH37v3SY(&!YJhL%xp444F}W-AO*A*^H<;vtKpJlm+~?S{0I3}|n~0N!YvD)0Q4a|tsg=^j zd~m*Mue- zo4P!COS)wcFEv%^P;~p}VZzp8(YT$(;`_d#DX*LWLcEBRSU+)HH?}uDLOm%M`Qya? zLmF>5z0)HfG@jK-sGIq29!B&JsCk>KFhU#c{L4AgjrpLQ|4L3dU>tZ!j5zB}5qA4_ zcNCIb3AWuQbe9TI8f@08HrrkY>vh7{uRnX#78a5gRR=3}^sWtzVaX=xnK+nJDQkGO z2DhTEsH4gE=Dx-kDEU>g%V9_Ifuwv!tB29U1K~B@>k8vQTWJ?KDg_EAlHR``88A1k zhcWZWyDqHkS3r%xKD?dK=_MbX0}PPsz60b5KrF`Ss8%QTcoSR#&$dS@i71Lt7o7bWE24(F%&yuUNCs{`TGo6bhnUgV{~s5(juL~&fiWjtCFfS7<0L{L z*eFUtOKoP-gZaJ4(d2~48!t~njiA>ZrHWo&IQ+=OlDC(tDl|UZ3&zX6k!_HR?w|b- zEK$X}aaVcJk$-+h*6(WMXDP&WZc5DEVKUFqPnC!mSS0j|+{TAk!!qp8jvT1)L*_k! zh&+9QR!a?cMtixXIAkor9K<8$J6I>Lt`4e&=eD-Zh)SKBQ&xC70s^g&!5gq;X|NJpmp^&$2YLI55u;{%r zI~3ruebn;f5FONZrzc3x#VLM0bIqH@?euhhHtao-svncfrwj+1KG@vCWhP2q^$Y%j z9j+bLzr2i>e#tFlB5CS|U1uT@`GF&SfwqEDkL`kPf z^-MKgHf$&YFE03sLYny8MfG3nzHi?ySGS9{A=D#hj0*TH78y|25NvUxKp9WPeTda~ zO!fqWGV9N3a03sIolQx$2V_-6oc&yP6=r^%EO9sPb7dGnr||>#P=&Z4+e(;tZltyW zFJLKf9Fi(Pf&!hVqN7u7w#@_xx{y!@peUeh-W7NUCWlDyW|e*YDhF=G!e9)$X4mYx zx+!O*p&zJhQEj_$`-2{vwdJ9=Fz6gWDji{)0oL10z9cx$66S{1Fq5v#73Td^W{MFc z>YZ`a9*ILn&i~;zI+^z~9*AeMopa|C4d@vQc+gDtp&_Q1Z7UanGHUe61c^Dt(_KPG z6Ld0*%-;X=~Gufgy2*H3L zm;0%BLP^HQKZbdi(Lv^Gol%^jHZCuv_KYnIt?@^lh<_?F$ zwGQ?15XFh^aVH}yAF;$W1easf*DprX(%bTd-}&#%>%a^`2ZyD`^`sd`q@eGaV5xk| zC4We4+ux6U{i=`!yTP#d7KRf@PW};Y;tekSgS}cK?HnEY8ifG!Xa7_t({O z{CurVFY)Y-mYI%j{QSJ2VR87h#`?hI0 z&Fz^Lt7B;9qmu+on$bHzu}n@X z0li*Sw3%p;0C*5;^#Zfu&*9;Ra2*05sDmF>$op66nFw(l9}l6b9u5aZ&yYF-JbIT+NrbsHT!I?2_qIs$t(bbV7DESiP7+z>~2Vu~*vMi>*)I{?4NhIdNo%oZ>1M=O-ivM4;r)u z9OsO0AK`_5rfHDCI8{e${y}^tBuU;qdE(c#1p~!{z&Z_fgN_ytKI4rWHK`JGfFS*j z1K$YbwZZ-#w@S=v~KCqanrYNe|o3>nVGG-C&{ir1l$|M+KvU?^6<7ELzuOlV9CfY!4j~|1CB;i7a#5crRR*m_p6W z^YN|rlSfwNRr$6q@tKGCmn7ZoxHmBfd1VMcv)+sfnr6$mh28aFfne`!K|p*OxsIb1 z>ABa^l;^yB>RlI7w*>t*D!Z#W)zznJoH-(G{u6s+Zf{u(xoGUk0z8Pj$~h4jHr{(r zNxSYW;HZ*@pWa5Amc<@qIiCIKFH{Y-t(~aCTprs+SlX%|+5NGNR$Tanv*=m--S3#Q z>C2ai=@h~@#I^l$9w)KWGn_PA)%kQ+U!Z;s+zLBceAl}O?oIyh#S^C;gGKJknL9q4 z>bmPXLCrF4ixtxguGP=2N^cn%`FQwct&fdeLEH&>T}skqg5LeO!ijaHYyWjn_FDIQ zQ@k9^HpNZh9;_s{SM!@i&)4k*U$xFYT+y|&wif2KxDWI{AD{Xmz6UTf2!y$b2?WRG zkVUpimplWB*T6*46X%WP<%Z|HQ-|BP-(Cd{%)%sFd#geHTbSP<RYlylU^!VBIW~6-J;C2D~;Ve*~RpC zM0y5@srZL$YpJ1gd+}-z{)LZB+>H2nwit)*b0Pb*N#IUsA_Wvxfehe%kSg{rWDe&3 zpA))Xa;5)y%xbscF{_z9+kg<|bCT*_nL^8Y}&H9M$t@WR>3VfBa9s{oB>AlnZ?8VypVQs zTmI);@j?&?hFqkRhdzKrnIdGbB?!PDeC&H@+_oARZI)0`!9O%Kw2`l3%bfshZ^P>T zlpC{c$umL<4 zexH#<5g?axSzb>ciBXP;&@K$+x@wIZAO8MXZNFmcow<&wp}`PGKmkejFqo9}bScJ$ zU#>n|DLOeK`_rg%YFG_?{}=6b481PV-spBy=?9zeDVLU%&}X<^mL!B;ennH{fSgLDt8T0k+gJR%E27*DnLM)2FA?bH@aOJn`0ll_f~2J=7M7$O z7M-B;QmK~j-ET;RJ}Z4$skm6`c^p_;DIpr8Yb1v3Fk+C0<}SkU^+*hNV9|h9uwSBW zg$;UeuxZ#O`nBwBjr`c|rZGBNRY;zZi*}xNW0Q-13F<#+9?r@-T|Ic|h~@&Z+JFLJ ztvHK6+Vd0zt{N^-*l)D-^a_r(!GYh6V|vRqTMSTQGqWJ52m;L)_)$sS6edEzcc+o> z^MR$%D=n??EiEK26>Aq@A;hoc-qbr8$8c09X%uIOeQnH3-e;B9MPBkcAb0b}ZjfC~ z(IbvQxW`h1>0-!~MD( z5f`$sQ6&L}a7+mib4HR$N}Z=HQtVYPHPKb>u%z$Yy+}-X+_3b7a=35zeyhe|N_0q9 zSKaO(_M>}@^IP7#3WXP{A02pa;=z*%G4JQ#;209UXqnwEE-6tx6#U=`a0xJ)sJD32 z$va#lKNGaJu>b9Op8JTxMF}1raGo4aF8dCFWnV{!5`GMfbnq_cld^6KxGwsJ5aP6c z{Fwr7XLyA(!7D_M9rrM{-4x83kN_k;aGL*&9fnFs%hrlp-taAcv8y_H=sz)+or5%k zd__|0SPq{zBveIIo^?-nyO`4nj(ExFvgQsp>EyqB#p03^s$26ylkV$zLs=ea$t6J- zH4`G9oG}zx6o7X7&*2}@JqfpGIeDx*d$g(o6=Ee1QtSRXAhwDhsQ#g{{$zwCc$ZJ` zV-QnXT3tYNx-6K9gwt)!4mvReQ|6D}ls9uQ9FWRoGdVhY4Hf4QG4ef9Rr2qVo_{2F z*YdsA+9yMP$M!WA@mxax#;KlNIkAS;x6;}dECjN5iLnXPeu=(%oDC_ldp|fvz(am} zn%bTi1{ihqUY~>0rqWRx06O7_r9Em49<%~9j;wOrXS%u0TdWu21@iOr`950qaF>mC zEiJKt9zHZQ)JO01mg}O;=w!;KBXVQ)#YdH_JXSb|0sjq&VA0mn%01qP++>GoGOWl> zAH24^59khXpiz+M$v*kbVKNCZ@1?c5eZSu(E`ftj_NN)U_yxLU1votmf0(WP>Q_!5 zs%&*c*M19CtS_Wyeo;>%@czu#zc%C*8<_;pOX8DU=M^{Qfk(IJmoN|zv zs+H`{i-+~gpk6v)vMlF>J^ zx8bnl6TYmjZf0zpVb0qm9}Q}faV4&2!jVU+@S4h|tGMSJ2C#Pm{tgo`P0kYBb^x*8 zrovC_tY+9)A1m_4Z#6DSoaX$ZUGQr1zRaQPBFqE9EECl6@P*G{0S}t!z%EY$?&|6) z7D9j=Hw0f({8|WpTWm6+2gbmyTvP1u!74wA!g+3TFMLr==ifs0YUUK2FY^$f!5vG& zqNs4${jz(j6478Jj-QmM25IzJ^4UCC8z%fse|G=i&TPLY(3`D=gdo@I^YjJg3@#amLs;A>x}Y5sDu$SvrPKnc`=njv3JslG>@#6EQ_i$@&R^s z4n%!7!Y%_jiuP?s>W20KerXaF=g+CC*V3_Qu{A`j(>t7-XX$nP!`&H@oj4T*;y>R} znJyksWq%;{U~@nJF)vJo58Ure(XNu<1Zt7Ax` z$EUvMOB)-$zsOT3;Ut9#833Ybx6g{XY7mPA=~5C*PWBEf>^w=`fK>jn7exIK(9-dh z9muTpEVB?}0~8dxadtb0c+ShylcwrxHEIB}umDENhhR#KBLuIl+^Ut%gW55l7#q{^ zc;W&lZj8yk#(N2=u*_h0_xydNBbLF)bBFytqn#a@X(vUc2BX{hx*SJof1E;9SR8+e z2~nSA{~73RYisJ3$eCLfcp=--4<;L9xx#Tfp-?{<@vxEvGfF2=%DMNL`NkvmY%2?&@pV@?x`L$A7kmjCmz{qOYkEZ)ilte= zE6eegT_^=Vq)GMA`;TYd_bn=om%{LJ-|+GAA>dVkXIJ|5D~tfyYC_%zO zZY=B|ba)+sMf2Hg1x*9iO)hqkuTOHnL(@W~-E@`;i~UR^W?#x5!id|hu*HBXRM#B= z^_a*0Gds-7#b`*TX0(z@G&zQt_ub~ahx!BdzG!nzw*xnPcOuU|eU%GXAv;kdWllMu zshhgszEHp0iDh7Y=^G+s)6Xw(bTlxetCJh%0#E3l9w*Q<-~$L>k*S|c<{Tbyy>IQr zi0U!5K58IHZB?U&}QbM-`?)^OV@{`b<_Kx435|-n(yXqz{?y!$u%3I z630K25l`sKfr!NpBGmj4TVLt_k@em2T<-7xQc)6#kXf=qw(LE!_s-rSgzS+d*;&aJ z$;!^&D`bc-wy0yK~Ox`^T^Us&mf6x$oEgx~}WFt}0e#<>u_$X%i3MNqq3u zYj4au&`&;(mr+I%%FJMUy-TNjtX=q0QN*v< zr~WD?2Ff^a76iA=MP|H%DhH@8Q0VG?p9;h@G+<*aG`vfW_26=QklF{^WXk@4p^6gOREK!T7W=O1ssc$)h|Q-Ne_j&26=Www~j!bL=erXc-r2*qpmv{))rH)nV62(P_l|GG=Y+y){Xwf|uDE+; z<#{ntnzT4~Jxz-nJ-28oeZ0#TeOT(u5Zpc)2aJguUtXLtU4yLC$y0IvWO*}PWo-B^ zT)`{^M&50H2GbAGav3HoQ_R28y^@I?3z{wV`=;?t)tr{Fy8oj>XhM0lUo5s6l2G&U z9-B~*{D@w47lQ8~T}VVriwF$si$b7A0`E}5Nz#>&w$}qz)C9RHo|D%gLp3XFNt#Wd zCSINvP@Zj%IwX0q3KJo8T;|W71$otbu&sZ6NC02~ps}FU0RtxBdGP8$#)siQ`_oj4 z#4k4&y!Uo9^*xJ=v_SuX_MWTzRbMptw4rTy5B&O=tvMNY6kNI$@jM(MSE3B%tH0I^ZehyNBl zUR2+kG;X`Ni|B&br}whM6!pjS*#!HPJRcT@wYreaGxM)q@hfO9mvSJURlXXw{*;+{ z8u(|Gp(B&R411z4K85KMw{5@aBci8WYyvO(D;h`88suAh%ij1Ct=l;dj<`E9Do;)v zo!Twa=FBw(+{_vRLWwsrY47~}T-<&Q7L>w$N`6vh10F6Yd>c*g!+uzoQOIE{1^kU7 z009~$y$3DmaUgYuo`7$UGQ!`)AG&6PLL%^Sf;R*}A}dZ0X3Wc}T(SK8o6~h&$H#my z`2$OBQZYfAK7a68-NY5%4+Wn~mNI>j-h(Gn83X;}<3~ku0#OOj05ev%K0+hV$QKeA z-j{s;GB9O{j`Hlzs2mr@71HNklDg!Vp(AGd`K9-uV5L8C^0nKwt7+CDZNN%#;bkga`<<53M8zzSz(7yjd%{ego%`ea zcMP}=EDz?4nF#ZayhZbd*&&+)5>sGCK@nF=z2vW(jKrS^3V!|JfS9PTFRiVGYiHbN zCfMYBK*Pipo`g81DHQxg+ZedjstNAc;C5&fe|cO!zY0~QM5>g#zVrABvGtRVOWlQ& zb6d?@6t(`;duHvHhqX&Oc2lpNkGIQwku^@O^O(M=FRFfDoC;%FJr~q{!AFK;e?ew@ zB}f82UvuJ?`!S`@;eF67E|dSgxUbB1Ku?UPeAJSr;B)>&u9djiv6Mm=1Hb(Bo#Su{$jpW&Y}$y^M(c+AVw2 zR*Uw`DTbt&!S_!mozD4-c}LhWao;|-v!jKEOj@g0GtV5=9XI!`uiN&BhP3Ao=5HZS zNUIZ%D^1;8N_F!A+M9E!*{Zpg*tevkt84Q7c|Nm$46dX7x*`|c=2m68z~Qd#JAxz- zWc-kizp&Vo2;?;7$;qKgOs5%ySk1woNKoN{G#It~6(pFr%Ej3k7+O>?DDyn3>B;YHUBA6Ni6A?FWPdEYP~lmxzP}G#y$VAtxPR%Q zylq*DVNOFFY)o+Rf1snIt8&|!%R#V!k{j-rcM?=LH6|-ZH;I8eoi%;r0ZR_s=Rf7- z1c}^pn9)CsEpUry_ivfyDd(E1atf@N_6UIq^upj^fQjMS* z7#C|-IxXRVaK-nmk&@$6wd^{uI^N4;tY7?LfDobvu}oNrTg?e_QHR=I*8=DB-*kw>@CEzuq48rd{P)VYT&fH zniq(>DJV~4vMp*d*vp}z0Wqa7?~du=qG|#xrCV6J@z3@21nNP=E10IkNV(xjK4G|$ zgb7u9Vb8r}eBFO0*LxOyvhvcJCx^?^#6fI#D*5MVv#gbRALVI5E1+$NdmBqsprFZw zlv=0uX5*Y!kONO&Y&{p18o8OuLwXfX5{{^m>us#rw<`#1i0^i@e=K23RzPF;OxJ~n z{?z@XN$*YKz_S9Lz=KCRAIu@p33A%#G?go~nXqwjK>+V#bRU2tn9AV;7@VemgHI)M z8_Gb#OvKQN?K_-$e`w{Mkem8Yg5jGA6rR0w7aId~$Pg|B(Yq(B%`CC9fC9tFVo-?e zD8All7Dah+*nYaXb0KSf*NzcNo6JmoM^`ldm0$n3Jgc$aWtrE92T9AF zjyZD^ZTo3P!`p`w&F%PQ5t&0xPbf31+?|5`3nHyJw+H2D>yLVxOpkaqRtD{3Qr)2l zL6Gq+@v@8odhq&qEpnvdx&JrcMsjJ!&f#fx4qrf@wN@8{PuIl`uNR_KF?~y@V*?j8 zcTBbzO1?ICW82=mvaz`-@}bLDyRY}Prd{*BErO>))tYIUT(U}PG@-7p*h)vZ<+k15 zSu&UlehFGJiaW{4bZk7wJvCp(h&S0kM9FJGU>}6T01L!DgI^58I0WwwdM3K^@F}p^ z1&H~%3HZQ;QY4Gr2UD(pU$h%6woI1eS8KqCbUr6OoY(> zY8B|s_9pXt9_T!MilRdT8p3dFc^yYk>Yv7bNe1u4zuWBX$DRkfCz58 zNh46DDbqV!r=IL9(-Vx1jiLSkW~dkh!W{m?K+x({I`{!kCAifUc=VC$c+~OHjvHCS zjPGY=4@ngev}ac-3G$~$xA6~in-LAfnK{R>;8<|z5NI^nPFt(?S2doWerj*HlRS&b zG4o{hcMXAogK)2D0a4Y9jDuoMl8*@t+p~fA1R|zY(=XOWX^6X z=1FXEHU|vJrqsWi#{1`L4R6V~Aa(Z7URc#C;iG(sn+!0|FRVD`4bRe{r1=sS^^^}4 z)mZ9|fpxHMtEl*rlma(N&SXw~?dPI;&AGWo5{0KzZ-NstvRg$fhRACx$3&HF7IY>G zgnyQXAVSZ6w5X=IigD*LSE+LHue|;U7hztWi$+umWGA`47 zW^Zo^q@$ORugTJY6(=M-4-O4E&A6JWv!^XBE>avjaYjn72X#OU;YqkTd%WBE(GsX` zF1(LVPL3u(nu{4eRYe&^{oHlau+}XyG7^u8@CN8z61nZ!ff$C)Ena?6ug?pd>7ccS z--7mPvew<{Bs>koSCGfH)u~6WitNF!SRrC@$?N~>`x?ouWympJS`*ODtW63Oj>NQsNAI5*% zZ=OEtoNFQyn%GzLncux=%1RZ>Cyg?@J2FBr?QWUieEeI7^z!9x%JY_6IPE*wZedax z%g)u}m}A1Se=(4{=`Nl-R7!^bjE_8u>Dd?W;H5QOoS*C){KzSq*!ZlP6S$S&z0@Mri9*(U!7=3|=w1Lh+T2+aCz`6?x|OAM#x4{%hTcmF%GW{Qrtqp=LvnekWg>_(~6JD$>!eP$=XaVU-*x1 z1fWB-DXh-`?MLOyHA1kKgM|WO8puP&V^6 zTv-sM1o^6z$2@R9AV@Clkc7R3wlq-aTqQHtWhKtk)r|8az2To;b0>n3w&W08g}zty zF6-Lq@8|57W{e({JNy>1kZJcNJL8rQBbCX*oxJ(V3iS#j9e6x`3q60n&EGI2?pl#? z&_n^sW{}8N8K=AM9tl7ej&bnu|NjP+ui=^L@r?$Na>(UmByn(<^c`Tf9-=Zee2v@lmF!i0IS;o$M)$X zdm}|Ha~lee6=#AtrZL6n5&TPk@s@9Fz>oc%j;godaVRKI-{$v!gdfzJ;zSrCIqh@u zJWa3mYwISNb5qmO-lqYpP0t>rT-MO6OAREs{4Q>%mUM7*NP17h;ik3-WF&y@$b}G}pOt;H$ zf{&YXIHg@#J=n{2pQyXQJ2+j__l$)490HU|I^}FP8|>_?^njOyo6B z`bc1>b@uKB60rB%5$&W59(05ua7S(NQ*8)@NM%5f<1W;>5CQLh=K^hk;|59Y_m(N{ zZ`Vc(_jcMTo%UCUJy@1n!N&ypjTYC(LI=c?wVidov%_$cVTzq#3ufh>%3Fw_zCiBml?Y;CkZqJPgWOeDIutmle2YWqQb_2 z8k(tz>gAnnaq_JvK>b(PP5mN54!i?U0yv?dpzEhT7eNhMpY%HH?Csm_D)6se+046P zsG;%r@nh(9kavzy>C~`Olmi-3+UOv-Tz7w}DLuCzwJw{id~q4N+i_b97Z<{~E{!r` zvx*5N^W}7=E=c$jOpVS|4RYpF;7f*2ySS=KhB;0=iW)*0dVF>BVVP(7#m1N1)q@6; zVrfuM9VuyOz$k7}dN;A>?dV#Y?PE$C(K8=m3z$xzW!vkdO9d2PrJ{LP2P%cs<3w{a zvjyp?@f`JB3wa&)?UNnc@p{Af!RGTzRc(g{ws%8x!}Gcyd-Qtzn8j|=Us$J5G28x z*h8EhJz{4k`g^day8VnTm)wUyg7?PPvk%XWLgy?3|p@2k$yu z9x=NMYo`wr8^-r6>NIDoHo2ygHW_exyyvb1WK2#jL`bC7i7+z1T3rL6N9Wulbe{l3 z+igfU_BeY;n}HCROeYb~V0&P*p&S~iqAD- zM-;IW>ZbHyrUuU@KFg|jRicZtbIj8UJIJQPnY4!_r8)x$z}ySUm&_X;{PpVx{E7@Q zj6hFTeB$dX4EO3s`7dgW!P@zn-4-0sE<;y3t%Y<4lonz#VcnE{|Ek^N#IQc!^Sx7q z;W#o#Y0#9rHc8kQ>rV~3 zvx12|QPR|aeJ|`Coz&-vW(=&?+%qcEMGB0p?b8Xl?ZPXcxj09<@AYsOYctiJAFt6> zf7zoZ!-2q^yLa#MI%e&jVYlZm-5Wd`;D^$Mc1KWQne|ip?U1%-C0wzi)}Dla${Ich z<3u=duy=a6Z8mzpeaW0+e~!UXTIRGBB3b#8u|1hTnqGE)>p8|{0c27N?4_RF(5|0% z)D@oS3x?SWOlUULKc9IlCA*?UX0iMuQy`<15$sbteY_Ryz}3$`z0Xy z1;%|sLPD@f@s{-k218KFy<2W+l`LUWrGiAijhVY)E&~O#4PgVLzkXkaEZuqC#mrQd zVSb1qk1v2M;Ccml6*;ex02vMkn+nY@ixAlN0CtBbeC;X(vBc-y6$kZf95qv!TdK!w z^R%&w&!@)c#UHzULKu#&4;sqn?;*Q{&7Yk;F8(7m?5)eCS#RPM{`z>03v-?@gq2HU z?hI)kEvg6n_IM7TAys|!&=gD}auqaG>!2GrirxM^=?CIuIIAsD}Y z{R%BGW$@vDaE zBDS8DFahmB5{8kl2_;-IW5BjSpWshlgV?7=7-fV~w>qwROR8AAtjI-40Cg1E`$9cu zZmQ0n{(Je2V|G6IN@ev}H>U8q&TuB1Mjd_^x&)x z&HK(*)Ul>+B6AtYA37sv`l7nn{2FiIwu|ISdx#r}jVUOH6HsAKdxUY zhR|dMbHxN|G|R#6&PS~ybZ{tI6oQ)wf}5}cnuGE)ZYvbIk9}^znA^odr)z)_tMhLk==oy{omn}ZPj`5rHahTxwmWUx`1}NP zor#ctS2BN3?0@hHohTsZY<+fjvSz%IQ+$OB&TuiyW)2SUx2AgJ>Ph{i1JNAzVIE7#-wPDau+Cv=2jZ3I#Ec0#`M zU6-4;9@&Zs6`etnh#7l*rKJL4$W=I!)VFzm+4zWnXsJKE50W+y4-bR%9YFK&uX2aq z_f$n|rS(y)EVOu|<~O0Eh7KH94OWscp7$K^Sk z0~8FC^Ix5z_d*Rbw8@N&jF{l%u3hCBW6v;$a4E*CKxQ_sE2`8&`}xJfe?|GA|=nmS4pgy6l|p z(M6xtLpLt?vswjs6m<>3QCCgDYyZ2-ldb@A-YRe~W(wtLL--!W%MvEjiirq=-y_zu zv04XpRYQ?!6}%X^SVM^L#&Db&mkJ-&Ye5q3h8bTjhndZ0AiJiHHy5vX7bKfx<`Vv3 z6L?DguIgL7p>6qgZr#;*{kG(8Z2F^(Kw(@TdC~Ee<2V#2@12V$a0fnkkiaLLP)ExE zEu{3-W^%eLJUXNiv|*3>MB|LI%c+I4{gWaF4)4kM3JS zK5v*ABM)hF{Ouyja|05VVLAg2#PqIiO>M2-9=bIQwtUH(jT>3UeXk}>z6m_FkTVyt zqly>F8#dw8Y22rIkK;3=H&NRr?-(HQSeA38u)0sO4%6lvuK#(Pqh0T%$K1xo_uCHL z_U(MpDo9hjN4n!01l*_wv~qKAV+D}p%2ED!!3oM7>`|x5TclXU z!PlIss(1*>iW|Rw=@kc?G%;m4wxj*8|G>&5le?1o^%_{x+P96qpX>mKjFZXXFQ-tr;fn z?(wklH^p<4_kSmsj1k58)E%$pq17HOCIXkd0&oEEKl2YA8g^yfWjC2AY~B`+L|Iu7m65=!ur)EG%|4AZuh^=hlPf)DOO?x7Hz&6xBb3V zIGr<|4NxX1AYj^iFHgHx{bAQ=1qUXzt(y0r=&=C726L?~QH{iUxWhEfZIta0loLYb z{Ib?J+XHsnM&~O@f72gecl7jb-o0XY_dTgnD;qI3@pC~V&(%iL07n&{BA=VDM9fme z>@EU?YHW={nb{dldAuJDZx)K z?u`o_DcrMau^lZ>29$hu@r{;To&G6cqCCuiuw3&nXOvoO7e0THtmVjOrZc@cBFr9l zBgM^RCNXtONBlwF`<|vj)nU6c*`YMiAyIZai(R{_rwt8G(S$>aU*F{t(&t|9kF~GB zwXd#>ez{$8mNK!$p@t~%7E#ggAvv__vjCMT51G(F(>LIeI6)FZmXKNxs3g7f_I6Xo zGVIbO#-{L2X?_4e0&15IAReoZbnSKUWC5?NoZ37I*9E@-aAvUBU7N$D9k_ro@>zNK zKZC~(ETp?_awjzITuaIzkRZcpFHTxB-njezX{dMj*gW#&A4qa zj}}^}GNXc-@&vew-dbqc`M`BBXeFw~EbUV?CRx#`SmQJ59xGsGQ9;T4c_dzD9b1em zUS2zY1fMa1QYvfc#cXqcI7cD`T>(+i?&DKB4g`%c?Bi_RJ?YB>QG}NeCAc#!=R9Bi zl}MpTVk=^^d^Z}n3X)#JM+x3OAj)3UOtm|9L%d{>wrKkTI@a2cvs%zw{to`TIrw=NBGXFVP#S=R6Izm7F z5e%};RabYJ+o54iAuQ#N2-!9Nx@AlPtN>_invGbKmfd@AOR50H#K}!bR{Dn?vW>g| z;etgEM`QJv{|qJ9!~*D7h0_hZ_ceBpyjrlk%XIT0jjz;mr+#JMVSlxrTX>=@T=Xde z5rKsztZ3TY?A_Sd+v6|82s8Wge$!}9v5J1h<;(Kk`XgH+S<>uZ79zB9T)TVkIfeNs=%zIM@scH&emCfaHK*32eBKWWdJuf~pMx9RhoN_W&l1 z@M~yuAw>uD9~@5H{!K<{H8qLwx1gJyRmuk3Xjsrd;ucPnVpe=g%J-!`^0l@s5LP=@ ztwypbpH~C<-R@E!7|Srtd_h;r?SlE^vg3_su_igv(I@EI$Qg32osi#jOkaLnJrpsC z_wu;tJ7n}=dGj`MzNeD!Z5~CII)m$SpctEl1x^Z*o#&cX0q4#SM!_<}-*_77-$|Z} z*f9}`XAKGVw@vM>=T}Upt1|@skMEUu7u@k6cNepj)V@|_DK-fQB1=K~4Rf;_j@vBy zp_k{h+0R(Fa+|u-djzi?hzm#kJJmN!x-5*JYc;!bJ_$BGBHSUw-J)<&z@R!o3i0cI3zmg8C2{tn`CYn0*) zY}s_FeWR0GqR%)<0RPg~(SdqHzE(_m6_%zz7YttQaodA!x}@9S>M%5%5r6XL9b5-Y zfCYhj!`@JvpZUluB{ueKfa8vC{s&;rFIS{~*nKrQynA)!^tgWGOH`sZ=i8PQQ5WBp z#&Zh~Qzw`ks5?ZbLboP4{D(S8a4gMkN|LRH|Zli7iuE&qxw$IOQRLm5_ z%bd{zxe2oH`G=dbx4kxf6@={wpWvA+SsJp3K;k!^Rx6Kmdk&VRpZeCftfyYBD=$-|SeX|IP^jrF_02T7JeV z+iCnQ;oSz@SFf8_7ytC)jy~!s2|5=OPV}>@jk@e#KI{}X`3;+_vYYan*psy|@{B?>kUg+Qgwf4Z^{ zsBa(Q=Z0a0IZ2q~{P!?GAk~3F4V9YTd71B7=dd!E!u^bkq0iO6Y(8tKsQUHjT9Ykv zJ2?Rs0$nUXik)pohCe{8io>DkIevw?y>XfHFOlIYw4k1M{yQ-tM2VYKG{O~FZM$m> zximf$@Rp+g3c>)w2|rJWNCTZ2Y|{BpXBNIM77d{oE-tX#u}^*A_$$l}P{&|G26V}i zHaM%<_A%VG{-bMkg`3)_XRHAVT6o6r#-J~5_gDux8HAenP4rmo6;I!RVYp19ny2_q zO{p-gv|$k}O_|Sc%sQfWkmqx_y#|d(N)_%Q*sZ{87krPY?N;J20YgA3n79PckqBctidZ zERu*PPN_+xmYHz%1hDlo(y2=CW^@}<5=-oj4*;2hW|!O(&)(FH?>6`t;x zMU`DfggYxz*-uO&Vt7?z>r8ieEICSbu7=S*lIer*cZ;7)q=}w}HKDC)zZBE+TC!;# zZ79@9zdZ|>b0F5#GJg3oJ}hjzegjSFfnM!3mWE02Ev*oY1569td6<+F5U}q7AIQi) z-8`#eaP`Lerx9p=aSP^wF-xJny8!VDpN#jR6nT)|CQW@C%H2F~TIUs(+M*?es4Fb{ z`5!1_AY3T0eJHfeWZGw_xq0)9=8K(6R9{@f{WsU$jS$@PX|Gw zEp*)NKGT7bKYUeFE~_c@zS8YCTJyO1ZP-_;_U_^PecPkv+Tb77ZiJihZM&7zPlQ;d z?pvm3nzfDns@me|G^2SU+}bR*&wEAp`H@AfTwH?t!T3Qk2|g_QZ^Gb zfGD;P=*!fzbxWB3Q@nONJy@Rnb1K~=P{?z)73XgA4Hj7c;=6wJXQ^!ADEMTq1?IUT z1Y;Prey42i)h`m0hdUMso};M>DB5o+8aHhPPH_#1DYj2;Bpa6~9UQV*BP}k7VieSq zN#B3m_EgcpBZm;$#ho^=eRP<+$V1~AyfFG~=SyVVLoEn((u){1$UcOshFpjBZjQG!i?qM?#2F0ID zAx_YBw9oV6m*Og_@sNGDDBY;hKFJ1Si|O;{VR>4G>7#3Fb!7j-S$H%cT&dJDaKHL0 z^>l#_HDW=vYJUom;bXzMUqm^j<-=Q(NsJe)Q**=G z%hF-DBHSWhxj11)ncHLdmp-I%rest$}?ZL8h7QR1t;K52~ zX>!o?OjRS+foD>g;|jw7f9y=te@GxT3JKhd)iQukZPYXX>d-9Ew1816yrOby{CZ1^ z=k$V&EZf3(@PG`pk#}-RO2Nj)r(5dYrxb*)PO3Exnt&>2&+lM;LgT zn?v~qk+5fJY0r|DYLyc;&RcPk6d2U$Ze{-4+()pYaH^^)y8^wc=B!j7U5;p^3Upei z3y7bU==bWd-EG(CvmHZV*jfN^bjFtWCv(sQ{ z6rcy@2p}ccKL|9rDeqziJS#R%PKkTBAOa9iGG}_2;hI={Iz7zzw}7Ofpb#1e-EOI; zg^lHX@}Y9HyIS80_QZ>2%mr4DVV)K>f z6P`Q9bZ^^b%F~wOnp}h|8;~w%b!-Cm7(x;%cR<(xQrT|jHF5XGXTH9^J>GzIx^7Ji zmQ#5_VhgaSv9YU{ZwHVQh6I580q7*CLG9dTy0<9UpkoKi9b_(pt0%DcCU39=m8caG zH^Nh!=|d@pEtjAu1JZa{AzmIM<7a5bT1B4S#q2THW5j}N*AOONB=P7!#*aTKFU!Boff^2=hK^GzEDvpMZ7p^V2_a6w%otsR z6ZYMR75qUywlAb#Z)bzN5YDP8dpxA5^9@|PqoEPs;7Z>m2(x_O_utE^!D0)k9Wo~k zeTz4)2DedBY}r#idwx>h(BPh!(>i!ydMCDQ;kLEo($c}Z5r2YO`TLgWX}no2lY*5? zJfx&Wwb=3UG4b)MTinst0Rove;MXd25&kLXtqrmgW%{4*^^apy8x$0>T(uCNwT(+G%% zkW|-_{h?>tGcvc+wACxmRj_L|ucBe=#&(_AoG5C9&pwlqoa;d_8(8nAMQ)BlxYZGGdnW#efeu_>|VS)Mdj3nOKLLJvjw`(KhOL-?`Y7#ioTKq zBPXwCGY8|XSPhev#Yp8$o}BI8zGj!Oo30C3@n%P2)%8~%Jregc*`C{RTn}fm1o9); zntN%**7xo^;$1DFix4d?sfD2eF>0@9xE751Q;3O89u|gnr#_e>O8gt{*-t<2NG8`plep5yAU(PcTH~9>?`u zS%ZhW2jA&Ls-6lHW2-X6^2mXf5U1zk%>XeV=P0_4B(VGZ|GEn_;V-oH^uV{Qf!%+H zPH` z9V_6B$lm- zU)1h<13FgXuB4xGtGy9y52cTauW7$PdW z^~r9(iPmHy1Yy~I!Vorgc1P>v9hB{pMWUjt3^XoE=8w2yqi&r&yh}yDUU#|Zc8#f{ z{&#CP$G&pChkdS^GgO7^j~WZjN0m+R(Edcd?P|kI_nY3>DENvY*SFofBDdX1XY@EP zG(ss_);yQl$&Hu{=Wtak(4k7PK~3n^HKWIg#k%=fKARpl0>oe&8u)r=`Gu+K&o=BY zZzEWIt9lv#Ra27q!J&NDPU)Kz9**O4Jd_&MW3g73qz0~NP=oTE+nN4FkNXmtPaZpb zbVOi|lPN^9+97*c8xV+Hrm5hG3V-T(60&2WOIF1TD;r}oJK90BoiX2P*R~d7y~UmU zy2B1B^xhYvRJWY!*hRc_KUQIa~Re z0XS0Za-8lDbp4&-8_#odcTq%f)C~U`Z91*=+aFqWG{0Zu#I+t@%SO~tIn!)c9~-UB zr{!+1WTlO^0KJ!(7?~@#zH+IMsK8+3OU0qs zTwW&Gn8gx#YPpuPuncWX9r>4QGqrE4(+B@3vtiW!mO;#g;0eg8e%w!bK$xN-iy)>@ zz<1>q_$X_K5r)JeXA*Ln&h;BP`-1k2%4UMq%Js=NiH8p9 z3k;cSA{nT{0d-xtF=HIXRV_HlM z%w_2Y<@?iYkNGfefHY7K?k0!0$UkFZHjBzn`+J3T2P8#Ko(>Oqiv6mT?eJ?oUiKxv z6~L=4-~FviaP?_pM0;s8T}?Sgy#B*W^Yf%{TAn2x_fg4sgeI(l7TY>F>iat?8+WIw z#JEJ={%L;B3SvL6DkHcnv&@2Z;26O$*evBW8$@1a7{_zp`cw;JX2Ou+&|vVmSZ1tM5eF{!p-Ibl z*8C|QXUa`Cv95ppwb*!Yy>&^xmFSRFpnl82<_Jyp^J#SrCk_*-l4?hMc?CcF?eb}r zv19G-K5~pP#SkgU~vT*tMqAg~pKpi_0udh;#at-m5LIC4Mxo z{XE0`EwO)_#kco^k8gg4pyA?THefiVs8!Eg_Mb%lT}P56?**{mx3cHLG1P8VzFKZR z^b!%?S&4=!M#jdLrMb;;46V!|H{jLP3LwP0JFznf=s;lW;- zw$Slmw-&!T2pF_kho7O&*>tcw`uA_69&dpAQ0=0lb2w0=g@w2cGc0_)l;HjV=apMc zH0Vf1J!M`yFo8P%AekNl2C{_4AeM^I<-#aC9Re&D57h9F@aWyuV*~*@Z=*WWqbRMz z)R}p$!(SqJkNf@9LfAg?fSf@pxOEXLn}K;;!t;*|yUfXfOFnvjdBFz%+-p&7UG!w; zzKsd|7}*mR=7b(ZQ|05t@Iqg2;{0+U*T-M;^FbvH*G=Ho={qPe)X|-9N#55K7lWt3 zyA@waKorK=y0{1beAS%S;1seNp8Oi6kCpYhIGHI#sw##V6ha4?Dqm4g-IQ0j>8_Ub zKs3UJE~>Gl{H?22h0i^-8k-;Z`1i#=5fE;$N-411RvrFSg&jrN;C}mcdyeI`uWXH-cBEx27azztViTm;9ajS=IYGd4|Di%hdAF z>(4EZEX4(ITsKA3)Pne3X*&=HGu~f@nqbu%rz|j)*EDT5svgnNBrDE*M8SRP@i%1TN!c#|o!EWx9XDL+-7x zTKyX6#F3Q^^40T9lz4H04BVf?$W*>{H;AzfZ_cldjaT8W)hflMu(!5^Lg&Ouija2w zfI?}LKZU8KEyReuiKreGtbWG)8ZHz_F2NzkxS(f}6IuqhwR+ zbn=1+y9WRLm#mg6=rWv9R|D&lhAx~vuhl*CHB%N1OJCKSyjfF3Q1^!<4|}#t-e1+| z5x$9lP(7)1eTgarz4SEL)twxE7i`_`I}EECZKbG-x=5Z;pXOMBMF1r*y8FV0pe%WzLnuTE%2HJ3ja%* z-wF?c{wM86Z4y+&Q+gDV!1lM_N$ zrj36qriQEMm#BP#kUU8xPf`7rIs*-sDu$#DVo6TFcJk-z#c3S1vLf=9JOrW&@Tm&DU^>>47Wt}CpQ)QH4Qj(+f4qr5? z_uvid*X#j_E#I?c4tC{oeU7KAv_}sYToTY2E<1)~-mbF6DIE^7r9ccEemNJd(*G$cO@t>7kGWLc2R0lW5+v5g5ZLB=e=psLsf;<b z(Iz>T;c!QOncUox>S|d~S?lHlFDGE4t^u)%kM-itQJpp#f5X#%t#QrIPzUf2%XwQv zBLKK{Ti#pUgl??cbO2ZE6~-= z?Hrza_gxEha!Cs&o{`hL zlT%Y+QfjH9g%iOl@9OH#irDXIsc3*A6eAihe*+zy2<1Bb&DON7iGH|w6upIS9@=4S zcMwjJ_NjJkobim6{9Y!;NDhBmk*1C=DFefBsZnSfZ5?N!0O*nf)hmD$fI1foPYA1K3AQ?SnSyKbVIL-1h# zcBxUaFk+4o_ZCK03EF#<+mQRaSc(|AGS_82_4T1}85Npw&ziiM6WPu4hvmV89p@Ro z5({6yZ)7d=3;MpKaWC`7HpvriypKmO>)7wOGGKz9+UNSEX8)2iA2|w(ap6-`QldOU z5BRH12Sm#A%M9I;-|{b_-gH@4A25%bnSI`zI)dETuNGu_5MZ&XPZYE!rg8tYWn+w- z9=GF>>b+RmPs6LM+BKpzPAWPoo`=(`6Z9JOsk9xpg1#C(l>O6|LX+sYKF*xiPs`=s z-ZH7j`=!^2-r?lnO55D|{S`$z?3;cA*@*qoC#=M)qvVCT3;_Isob1@}T~4$S>$`~! zsSl(}N+DoLD6OyWC92;J5KsJfT)8q^$<=E=-AsWh!>^Jj-jbry{Yk>2`j(UyuaJ|o z3e=wUXEW}|$N8QV-%rd`Xi)R&voLl&ZXQ$(MIan4>vbj~WcXk8DW_-fDGTsHM$&ihT@Wv=7j}nP>Mk}-fiJj7k4$L!L?_G|Irx* zh`X{%Kk7z}-mj=eDmcEfrM>*Bb54YP1G_RdQ}f*Y3?qKOG!z2ZKToY#|od@7gQO zf)QGP!oG6U@QA#6m%r>ERolHJTIgbwS!%b@;m4yq{f&83!vSPvFeIY;SxfGk?(TL$ zL`L$Wf8vqz1Oo9O(gO0Z&g8vQl4<{X zX(b9rQ6vCuYTq8Pr`LeG7={n}2W$wyioJnP z*KFZ$nL6c@J)A8tQ#TZDuYBj%=j1tyPiQrXty)_6OiU7J{nE5a$8p6!9E^?Z+ZPV@ zhKaA%`B!LGAAZfh6T~1H&`q69GmT}?zGwxSnxgu8-)mbsKs|!*D8EXHISw)(PNj(+ zVq;u}5EH;~fHkBz=4mznY!=)N5aAA3-EVM+PLSunc1VI7odD|vJnhHa+$$R!=2lkA z-Ofu|1R;`O^u(DgfH$w&q}2YXH4u&e_z=GXkeLbOqZy)c>0)Mt0CEN`uc5#`_1Sao##CJS0$%a#khl9>q9 z;qUk;=s!i8xkafx2?M4S$3|d^Lv2Y)P76$m>?UI4}bcf!(bc+N`sMgfE_@1np0Dr2(lDYy(S?3P*s5&X3-okUJ=O5{FvOMT;GBz84%{V;UB z;E9~{;JM+$I|Q10mvkRsl{a$GXP#9drz_t~@;8?usgo3?Gw0+LdNlQabzI}*67{SJ z>+>$yddKo-8FJ3SGz!)Yj-w+N5)jh@C}3%F_~%*f>89S1zV5$T3&lj}c8c05JVPPu z&F7#rH92H%^M&>E=jNuS^9xi~-*jOy;YMK86|ED^a>_Nc_=CaTWb{l`-nUuJTT1Nw zHTb5#`y1XUmTa5Z?|-VI;Sv|MS*Q~sjac#eh+w#U*(pysbjSFRnfE#D#z5_Ns-pku z;_6Dt#|g@RiUc)pVo0vuMFXuDJ(4N9Tkgg3 zM`s+7*<)z3c1-66-ncDJ88lG5BhYqwo?b|Ut7u8IHB+vf!jUkKk^#XX%{)JMBx>mS zvSedODwUD5RagW~(6?_AVDoLkk$FTIVry&5sTT(>+66eH7Lf2gFfi~x65C1{bwsIU z=E_D59z=I;N2i{Cp6=q+(9FA&G2m8mclGS7$Y#vg&?ywBUGY+>+JjF$n|ks(hJjOT zT`CgMNuMyGnFq>^o7Zb9j+{S$xi>kY+gH;!R^F&RB(Ve$~)Ys01 zpHj=1zz+eTNC_ub(JyIgYFckPeS}vEWgZ%$rMcJb?qIe6^;!rK3)mB3;s6=umsw^e zEL@gkMZhjvAc+M`CGgj%8N;dQK*wPW83U8%aXp5H-!?9)*Um{VxjX&YDm3=S=vy4{ z+jcP0OmDNOsOEPJp1<}^Alp%YKU*fZpcd{uL38I9yMG>e;mO&X@X98pA@QO5aZ0*RvtO zMJ&%ISXXs6_Iab}EBRv12rHjW8G5#?Xs;7DqAhaevw|T)^H7zfB&0iQL|Zy8g8?31 zucTHrQx{6g>Jf4Q&m%HD7Pe7?vk)koRqK6TjOGDQuR#n+rP~e_Kx|;s01HMq=O1mi zZwN=L8Iq)BW}aVNFe&c%@dJR!}3pvmCa?bXFiEu+{DboqZweFr?%`~SZ} zvXz-#gd{6_7D;xpbBv6P?7d|dAxZWQMabTSBxGf8vd3}kJ^ruLz4!b3-^cgyxcBj` z?ma%|{dvE}bJ<*z$MvB&d6n(R$43e5d|nZeQ01JwswucJ(JJ0WP}MS$;X(B#^7Y^7 zfkO%$Yp`xl&qceOJ$HB4A=?kK2G9^p2iC`{0+{9C7pC;QRmV%yF48n8lv$K;=IR3289&>1T?3IAKnkZ zsoTT2>K_4_=KmHz0Ay1hgijq1V$|>E+YSJFnD^J$Mn&NH{N2=BS}f(Y!!`2rHrDwZ zeKNSAqV||IHM@d>cNG=a$V5!qUUqEY8JE*4Sr2eMBHS6v|rA@*%zT7qJJGQ)6o{d~9K zOv~pCgq~w5kC+eg8jfy*g!Al`^VH%yo&wqSUS|)bj`|B#*S4s(Jrqg3)7B$dqAOm07P7A;#Li^pnHt41$oh2N6g-E8%!uE{@r1E zT#@niGdPSfXk2#MLfZP)%v2le6*z>ms(ylUY z_%m8IFMj3zLnTTgBvqvAag=ilJF9wI8@9yd-Hplu(rCm@>a_QPT9<+a7kGA4;N=2GQbh zZ#1@o7A!NIG&0y7ZkeK)BMgVN&r9O{{4M}E@DdC|K|}&#W!TMB9iEWU!(Ie-JZP5( zU>u1gM?jSW$Pl#a&d0T0fXb#X=-GiX35vQJLX~`T|9S(d&WE<=pfmwT0JYyYRry4? zV~kw|9Cdu?$6w#9alKa%D*upkroK1)NiOeKy013omGwOSLpj%D6<}m7vt>Y&-?1{U zjN9BbINO?zXQKz6$<~$wJpZ7rAS#^?H0#PiAeY*&-xmjaFfav_)cwCESD9=V8*rM2 zqr3)mn zdj>UU?jF0EUfwS>RB}G)2smG|DcIRE>boHASb|R$X758^8v#qE=(hg4%-#FI{fCF~ zVjYA+*C5(S`-{uw>=_IxKwbrXoAsxHO#sya8i#KP%{KvN38a9OSLF0rfZ2_TO^bp! z=%UaR$W|Ah%wDuf23Rcsi>J7}{0F!*fJIc!<6+51YC=<6hbJkK_CRTX-;iBWLK@Kc zwa%bUl8d1q9!)1o(mC=o4)z)=6vto3x4Cf_|rsQ23-<|SC)q4 zBfK#kq7%d>moLb-trW^7M#Uv9HBR;hK2)3DL)yz{ojO`Slx=*D87~|HkF)buD?!UgYSvQQ2epg)Gdg$mIqk- zK?@h<))z)Cl=r6xG*;OFa8hiI)o!~4p)rP_CO{B$=5Kh^Ik^l*n1HYl-ThiHI>4g= zLKa#R)A<}+C>G^zL=8vBSe1P-h5>>E^wq;21L`nzg9isL*GD2brn}=9{Gi}rfJrNL zcxm}M1FxrL|J6WWjQ0(}fGI$m%ThyE(x&J~;GRiZs>GJtu5z)1IaJhjdzl|#7W^l; zW^Qr69d--lWcO9XaJ5c<>(F%!h3f;RO)r7 zR)acXD;OjumRn27Q!}GXt2rLNlBuZR;*`p&r;l8$^cJl=HmBlGb~5MU5vr8h)fbKl zWa%#3y{=zgt}zktgm&xp30OC~gwO>vej9zdyMA7h3&Ex^jwKueIe^JmACh+9I|r$O z%`4?iO9T9x8eVdlXJ9Y_gom?f;xhn7uhefRqmqU_{`zZk!&L-*H3%F)e1*RFkB&T= zWkQn8YHGG9DJglt%{4rnM@Z<^giYbdspp^s*NdvunaZ-~0ZEky{A};dO2`ys1f*zy zv1z4w8{?K9(VGbj#Mn+r`KnE^C&)%V4a=Xhxn{NV`+G|XCXv0-N911ui>+4=_hl2x zDx^*KmhuYsevep4Mre6(K$Au7BV?1MFtxfnxbIj&1d$79r!TIC6L3PsIZ?j(FL0Xk zsmO9=*5Z-9LQ@+X3=eS0i;H){FF*(@ z015WXzi&b_x%XhL-{FHKmn%MmyUhNgC6e_PP2To-<%@AoPW7^NAUCzr7)pUWrnKtX z+xSi zX|17JhbatJ^iFVc5p%z60RMnZ`xvqSaI1;)lhkw4ms2O5Q(v~_|6NU{E0XO7Sr^uB zf!}{38gw8e<#_`BC>w5K{FzrHg8mYmPdAEfF+)Bhu%d3hwy7Z$a1CYc=y+^_r^2#5 zUVClWQAp?a%w4aP4)ObZM=L?UIOHVW5N*X})6}UW73#6#ZxsaDJn2W+=!VV)VIc3RdJ%5I_!OsPRxY3H><|z-hN70RqjYuSCEIxpYuwcUg z7H&?;UA^T=23p#=KP_QkhXu1Q+Skg#;oUvP9=oZIF1T6&+5?&-*uz)Z&t7UYB*`0q zupUb2O25t6iP`z>hW7_PO>IhDu%>n(KXeLQS6PYa=*U0p2t-evo4`l~#E z7Qz-9*i+!%ol36(rwt5fgTol)v!0;W!#Vl+;gkkmMUx%>?Kpuv5TpmEYzh)CKrVIC zCwZy{rFI3bNQmrXoyzw;bP(pI>iNmewYTmRRj%C`SE4h_{(vIvYia3r|NfLg*5hH_ zM*`-fmFAUiTVs7;dQ!dP&nqX>md$|EZBN*>Q#e(KTPsyGdlET1Y=8^$rF=}R7MRA z_zBTQJ7Ay*R;3EO@!#hi&~FF;o12GTOS?xQ60PnBwL8bB{9G*mh}mJH+J9At8ML8! zUow!RqY}|MhV8v`hdKP|la`B;dt9p3i!YwT*OX_HNk;onhd7}4{Kf4nLq%Q_GAglt z8TXsf#u@u-X58Oe!qNt$)S~^M97w43asrA^MGzdkraW~g>Ji$N7B;CsUO(<~t(O)_ zUKj!^0qS-C;D0t#b}n+cpX@Hzz{x@j*C06g04zhs>?-05^YbB46;0zC8LBV7cz*1Q zIX_Z4RV^XUsLHv86@6>{VYF^z)74@EAVSz|;Z%B*B?5Xi<0 z5BWsP+1(E`C+bIWkJFz%@V}IhvM}UXAF5jp?OfNlZ{JeV-!I;W4+iCxKMV5CT^1Q3q2}}Sqj-kf1If6li zfig(!9JE7CfhEK4>JahC84QQ+C!4Lk%T779#doSSE}uD9r8QiWlFArCJ}fw?jW)qs zFD!oQ7T}9WWR2@s6Ajc6eWUM^8B3JpIuVZo=WhOY0LLl_F)b)&*Zqi)g#RCDzRmcC)uXi$!Q`>wXZ z{&nmxq(+4_?r*i)YX>BC_f^0O1HED?Af2!h0Ls^s?Tj}WY(U&(xRoID!4ysbNFAWJ zA(L@mGR9?<1yILH1W@N0Fce06ANDP4>*|8Ui3Zed-~|IAM;T$~66!|IaBl&D7Tkye zKt%|BHn{YGMiL4>JyeEf@|_FN3|RVfudS_pF}QDV>)t^K4&}~;gcPQmIPRjQnq_GX zp7+)tAIWGo3lCpLjumRZtvvjUW56QU7sSbuy|72Va}$>n!3KY({$#%Ku0{`dKLX(j z#CG6j^@NQK`ZY8Q!W~t&53YXGu*xoGTlyF3GYe%~!9PCPvppBU@ZB@>)dWqU1SqNj zBm`C)tHHGAq3JODx|1;JPGvjgRal<1T7+z{u+9H@%!m=Ao~D@nb9Z(1n8>W>z->?< zG(YdC2k|&Q=bpp%Ri^Chl^U|Yuax_K|Mu4}EbMsOmRUT8Bv5DJq)&UyhPKd8 z=PjMS4vjwg`I5Zp6h&|6;7?6Z7!H+#HU#i_H+A&?df=OVo+YTT)AU@>cPdEOY~~ zbcdWE954Ib#Z5QRu*1{~{)R?Ie;&Cqp`S7iPKD@R_gFGvIB0XgHu4%Z^&fBtGqbZJ z0l(_>#~?oRR!R3YHJy**+|d%*$Rppt^ux${f}7LVHa*RV_IL(kA)xN^dEso^?BAuW zu#xE<$CE$FL=t&djFnWRaan+Y0}Cm`rj7aGw`Qt2%~5%8lM^I!-waZ;-6b)_?p;J1 zfx<0;1X{?mmee<6Ew!EC6X5IrWhT+*J{Rkj1oj31wZO(Qxc)IV30@4>MgBi6;b5`n zvl(W99>BqG2l2n@wTV9C6W1D(+*NQ~Y)}HzA>FAXO>|8K3o)p_Ko5yV)%tA7eI2{7 zp?w#zVRNyjdUqCRpxdbCXPKm?Pr!xl81DdhR1;d?eAkLv0g242si6VG zvKAwYpEeWC&|Jb;1r#4A5#TlCRA%VCzbLOOCofsh^7t z$`jFAibi}R{c?LIw@6e0U*O~dQ?vZ5q{8Ia@zA?ePV@|Eo%t$+1#$Tl?dfUcqazi-=osaN^U_VTJJrW7>fMvG*~25tmzB)Yx>Z@dXeS9avn#7k zybZ@SDn`7>^I^rgsku~1rKpKgE%7XJ@j`cs!)vyrw=_{JtzthK=ufPlhIc~DE@+L= zh}F@mxG7W(VPW^BWXivitx2Zj7p?@|;MoXdhDWWWUGy@lKTUW)_AMiQN31AhRNpe= zCM#EvPC@Rb4oSiC)Av^&u0P9LV3 z6BJi4cMkr{i#&NBBPDsG^ner&Q&pAxeFIeH^cjn9(SbF^J4wk6A(um5t)}x2S%`NB z3wRGQJj-z~S=J|AdM$?2^mrY`bKZ8Y)3w`_G#W?7;LIg-iukNnaQ60_h^0_^D8ix{ zv^Bum6fn8&DcFE^RS|Yl?#yJ1?b!USzy-~@+iHGA&+6{3{=bMB9@JX*3J zt*WY5CF-wuD#y%7g;hw2haE&LiI`s*NbwG#9jyCxuu&1`R*0)HavtLLP7GyBuwgVL zb{-cuRzdf2jnB5Aq1krVCmcpo@VA9y3EIwCuU%H~x=lR1 z?W>_tfx7tVq+zSjVZ1&0VlfA*<`vq7P0!r?`}J~G@ZYRk=t07vttQ{uetoTJTk;`v z^$z?ft|f)1>Wh>Ie&^Jy@t-jxnVuPPoGBRz$GCCg^)96#H{()Xi*^p@CziH`j`M#l zOta-{%2Js3ujf$kLcVA=D@cPZH1ITlS2%qpbabF$R{;^?Lz#a2v;iVyaM4u077Qir@&{OLAa#BcL}(E zfV07L2+X86KT0$zudaao0P@z6R_G=UInpmd=G-%EH>2;&X@1*X;&3h+lc@qm1{JZ* z2YU5p&!6xJpw1e&Y2<~HiI{WAp3|kyKUHoOrAfWYTf&@asSz@!Zq<;dH@gY! z80QM`yb(!$Zf^c*QUU$Uwg4Lnhh`B?S*NF-W&O7o++{pFq9p1Ew00333(=znFgews zh*rx37n39nc7E^ow1t!By}ZADyuAf95l^+q65P-hFsi;Ax zZMR6w{99Vio)=6^q@$yQ zo~CDVQW^S8iBwxp$-*H(A@P-SNb?3lVRXel6#?t}Tw&=<=p;C?PhsOQI?>SdT$7k{ ziDoojRlJs_TIN0>gLaRtLZqRJSgXht`J#;!f1h+l$5l1rcPwFA@^2N{zL4+LOEN=4 zIDBhKax)P1i+l&Nmmpa@tmXGx%x9>W?nR&sq}B0uRyLk&#WtA0>YB@I20tV1Umw9F zos5f&{X3ow`<@sl73X_XE4A;Apd*7HZg6OyI7{3m_3)R=!0hWi3{B}nDgS1Z?-so8 zXNf}READW9SUtfIv^kJt&ZS&W<*oHMb35m54t@LHSj_Ep&MzF-Y_oS2?yLu@?ryd! z#2~JRq_+?^zg@F`nK&ovWI?b;2cM3pE{!TSv^7hAzk28O1b@aPN!ZXEa=T5tl#F<7 z-POs3Z8?W>eb5la_eqmN{hnAStEVZ8si#92KVwgh_b<@8GfZyW{>U7oeO)tUxL!te zx~ucKgL@E*0w5VF*JOe{y99+Ub7~!)PfWPz`&1ok>o3|v#)(OZ4t%>byp{|4FwYMo z_r!UO!~agk(9-$+*V1uZmGMr`zO7ZTu~yRUfE?wA>kuZBYG9nf4PoVijx46jrbiLBFLw8CD#RBJUR&kgOKesX>pqP&awSJl>|* z>_gXVCh)F?X>^A&^bqt3X2=Y(X;iS@3{tsBpHi_yTgk!S@FVfIBv!jC7of?!l9C2@ zUH)*qu(8QTQ_dx-5D}2ZX6zo$85;(o4w$(>^dT#e!gzU?952Z7hf@`T6QT`1=R-&y z)Gr+!ooAm+$-~l*;5Cq$YLsl5eSkmTIOA@5zR&#mV7eF zU%#9R?4)l>xK&(BonzNc;vAVHhDm>~aOfWm6usSMr801(z{(2>Bfpi_QJtoi7G!+d z4C`>mOgZ0fIR4L$KnE1wP3bAVXYOD&;RZ&d#7IRi^+PWS$Uj=vlL;r?Udg-XPO`30 zCrWo9UXXuL)tpp4=E-N;X3R7;c2u2Kovwk@ADND@;o}=PDGuQ`qL$QlsE9Z%zXVOq z+L++I1WP=aL!nVvj_*fc=EP!P#nu{x|>CDL{{Pz59&(=r5AXGTCcM39 za`b!>E|?p+i2-tyxBq0t2(BrElRuQ;gG?$0gVe5m#J{w+erEL30+I&$T&a=;NgicN z=I%SJ0P5QVyjfDYU4P;7%nh<|8DngfEh>Hg*%=qLoVnhpXJ=u`Fc~n`gVop@ z0%zm)v*Lv_SAJMu(mne7R;u<3VlIGPb`S zC%q<(aW>?$mYgyV45RFz%)aoCj>%n&>wwDI$_kw;y}W!y_-GA5YiVI|-&f_$lgv*h z%VC`VIYWz)$av4xdjYPT13wA6hqN3%Ac1@#mgwtOg)3)Ihieavs(72~CXSTm&mY`) z%$||UEvV%ajn<8H?9*F}8ISUKQ4(?92)P-we2*(qM+|2@TE5%9^ZYIbtTur`4m34d z_fJ-Jf0tAkaGdz}@v$y``J&!H`se<&@Q3gh2HHL-F`qt>`Et_;q@`N=aP@%IrLD}T zpo3w&r7{0V$9lG1$|8aRaNYT#;;uKJ4>nPZw9L!3dfpusOCxj(^m-_K+tPe@;mX9`D=eUDc#&8NlYAI@_)1BUVg}{X|RjDss6$7 zs#1&DS4$#J2ZX_~nLx?BqaZ*3%&rIA-=YwZY0P^|b z;^IEVciw>~R#aN5em4U8gjMy>?-1(+HS-^fbaMyCw-Rn~-tsXA%1>qGCUlPLFddZQ zDMp)AuFGA$rhfHZx)?3x&qY%k@0Ew?uUaTrVtOkvt;3aVe>!lCod{0sH{Xre`|Ii$LwpeSts>Ccv+=^VCMH4GqN7T=JZ{UtN zgm`7?GE67CejpMvNu4t`{2ufrU;llm93JVaa!M%?y&FN+`#(AO=P!vgCq2l2%yK8O7XYlLZrbbl2tL!YeAOz(_(OeC%%*40$C9lzSW$9k0zw z@^VALE6~R$1?CyRZQj1;c;MqT?I@qyv|p^m98))C;6nT}W}Bi~iYq;a|I@w4A9@}* zK4wc}vs=tgv{G(*POQoi^Gea`#x=#Sd2AAVSY7$=>B3|nDo5gtre`_c_ZV95I{m4y zbjt%k@IY5OAzJj21Aq#R==BT?v`{u2wET~6MzS6T>$rWT!l7*lU+`s2?A%)PJzLz> zSJne9%U>en#vGHGiMywHK}XX3yTWlEi^0{6rxj1HOJBctlVa?Mi(8zIg-TIV{Ntr~ z*6?QZHiM54qj!S}85|Ho%_(EcTXu;1&86Olj~Xl$w!bYVIRlUgi+q#AY1^>Bi`o0^ z&UW8h0xZE+%-sCdKOy5q1oPxrGY516Y-b-!MgX3C_AFhD#MO-oY}F@84sqFV&c!_L z-YLmmt0T35{e0QSEYDd}g7-{Q0ynYC(n9jX_DNm!z8i^H?U6jXd9m+$sRj1YOBBbo)6<@NaNu>(=cCb*ckM>~QVlb>9|M9HGPbs}8nPd^W6tAC zz|DqMOuuM6F8%+KH{l{H6lUALufstB_ZqAqU~8euMizk__GhG>fN-(b5B_{ZqQ#oX zJyKC$Amjjm1%_WhAzhIFq2zIuR}Du3)#ntd+kSSvEapO0E_-kJrdWBtx5K9g%-Bk7 z`0T&B1+MJqeiaDnUBUapCDRjxH|^k8?q#N?fWNb|Mth8JK$TWD;9^yHS_!tTfXFTZ z?736$(bU)#9{kO#NvXf*r76p`ZuoI=CkZi3SRk6{Q6T%7tJ3! z8F#%jDy=d_DpT;=AT~iwo!3Ow?`c^1Sigy&7qhC5Vzo5W|5}e~|_J0J(_*|4PUvZpmxj3aTr@C{@^-J^89CfC6 zY1WVOLgYi{Ag`i8mUpaI1-RXZf+pjdOyu6aGQ;JPDVod5*WtR#LW-!>Ru08nLOMej z6RWHJ?6X^;O+Yt=eB{W*H9fmelsvSQfav>fQu^Kpg$ZDX4f_ALs0CS@_l(~N(#2<6 z&^J0foDfZ{8iEagW3IGgss|n|(O+SI`{G^8vV(;3V6q>xZ@Z&98r$U$mGJRZH8qLo zS43V#uHVOh>b*yHFz{i4NuJPiQ;|ej4H7>n!q8hO_R&WPk2AGh}1);i6ogt@wRKHj%`A_J911oQ__rCk6Gdg%_pf8b> zI8@%fyP_VlV9B@9ej5X^XKg4L6IQDi!NT`Wy-R60;eAalLCj9#(~+a6e28Ax-y&Z! zpAtmkVX6MKeKR8vzh_+-r@(oa(#!a`h@%4lVpwm3=QHu1NOv4MzS6N+P(T3I6C!(# ziGQW3fE>b$OBzP!<@b;z@L22C(ArN2jHeBkeqgV=jubHsvCtD@5}kA)8AxA zFE>JS4NF2Es|RHrl4QU4*BtcgdyTfxQa3s!yhK>oAmQI=w|D8y(WwuBJus5;holK6 z{EvYMx!xyjsVBH_zfBz`PyNrf18c_bb%R}C2wK7?SieaKQ_bNFv9C6?DV?8*I+3dZ zllpwc?O}C!7*wUyqiebortuui5NP%IGf&}{?>dM#^lm?05|*tijGQ&NMsseG`;N<3 z?D4YWwEjAAIy@%ZP8^GLKKI>Q7nLZksoy_eRYcvT-qm+48`X2Ap;c=1`!+mXyuP=m=WNmc@NR;>^6}YO zoNBy=j)hjuJC}y;R_Vxzfp}q&yYC+6?H?xX5833@)Fi^NOGo!mmxBUIDJ5kVqUGQ~ z2+n=t*=GLVP&cRw+(JU&a5vJcCHd(0;m1`Gn76>}5DY$rybickBu{sw+$4pBeu9}_ zb<=SSfLluWjyy`yu;PT@WB%fWu71T`+x6?*TKb=kKRdmdttQp{G;!f^8J1FCA*10Y zXDwIyWxS@qc#aSQ%k;-zr-{2Vv*%l#cGPReGcL>*{KP#Mf+OB8iTJDlI z=-vc%0K9Lmt`!I>SKKNgn>I9O5hf=0MbRlaA==56<|t;!5SM%RkuW;F-sy+LPC7B9 z3m(8K%S#mp_DSK3w!Pz84b=7qc&k$%g7uiZ0ht&sC92z;+iGz#wRxaVoZOrjbevkFY<*@c8Q|K|L-~8y&>(r3=vjk_8oUB?6&6Q z=)u4yUn1!ac=dHnWmIWs*zFFm;nf!B^>15MJhg~V9UO9wOKj$QX8viXpskHSS~@SJ z^Mcq8cJAk8yP_wAk-?0)y10u4-;H#|zlo4Y%3H4AYdMB{7ycV?wWRhkuerXssJjbf4oi5}%{8{hIDW2#F4nn61crVYXlbCM&W0oWWVG3#FG>+_(#N?cfZtAPB_$18(u zNmnyPzCxM5H*G+@*7hM!S{lz*M(nU6Nd=c*bBq~him3}rc*SLbXMQV|)YNI2#+&uW z`$P8gF-m4zU%PMV4K1TkIra4^xw*OUfUHe@?8jk73v;nYygWuVq(XI>+$*op>t_}G z_lzbh1t0y^IsDEl;RnxsJN5h|TlwBP-;5nqNVB~UMQ7c*$i|_0o_yaGYq29Iad1i+pHQ5I)77vAY2RYGDC70*eG%agUuOy?DbSZL~lhvTosx1$c&Q1tq%NWI}am#0sXdcJREG zjijHiT1=V!=7(9k^IHMFo4Y(b_d(>Uji?GPyoMBEqP8pdtYWb~5p6+pN!~$abkA~iROV_Q-&-K4@3n0S#G`=I1qB|(! zsS=x?zbs6}vTs=Ml@IUwZgoR~*4^RtY*G$a8X=9`>$>gBA5<10f}*WHRZ{V8U^M1kb8d zLl@E7t(b4O`i|1K`afY9W%(R8Iw&;nIDLp_HO^VL@WZ4zw(jiTW!H|GpZIz46+27$ z61kgLo-1qWuTCD`O=Rf)tx)u!n~)8`Z}nX`RpMZK0RB{iwc*(of1hzMQbR*yZLES= zMis8iuSuh^F3f#=KutkQ(nUnti;P<-nvQ`278T8^a`p8Cn54&k<8i^hAKZKk3L++I zTuXevl%<+GA(z4@sg3`unh% z<;)bX@Z!Gf0>paOxzLji=!57W(aMQ2+s4{$Fz)dOr2)WWgDI8c1m}CUB(FE7dzB-U zLwc2>F*BphAG*J*FS;P6&sp=Oe^gCvcIFh-U^QT{fbB0U^ef7YgE^z*ea4A=XLd~R z-mF-vCqYfsR@U*e#DIO&A^DejZ*ml1wzQ%yZ?dylR#r6izuV`)#AW8>gv?_A$DwL= z&#uAoakL#B^uaL0&-uUoU2dGf0@i1sl9fnh1+UVMdB}4bTQPaxV$_Iq0ogspB-}OG zoGiJy?C=L+;}1$z6@~@OrCV`FW+WXdWM>@L=07VE)E+nLmsr)Pe(mG4XVRbuO0svD zIla=tND&ZQPx+1PlEw^K=nbls)a$8gn-q-(rG_Z0`SuscU+3BtBOJBI6UUzm_mgMt z+O5M($OCmDMcflyQ+2(C#zNsO6?O;&;%S)3&n^?*+6Df?`9}Tclpt$+d1< z5d<3AeJ#EBXU!^Nw$)5)BH6zwt5mHow)vT!d(;mwj-**mZ!-w1k|$I_Xu12Gj2X_c zPof;EhRa@1LWq%@Sp8;eO*k|Nj}01*ni?^nFMuA$_rpN@LvI~4e!S4&8shOcM1#Ecj$4sQvcv`)H#tM6|@sZWl|;_$`Uz7cNNM+Kdf*c7itsiT$tPlPO-kW)M~^7l(CVr z7qJ98S|qO@fZbQlX_a&OF%&^5(EBwroP8zOx%p2oL8qd;qL6qay3MXnNl=x8;;>D{ z$3gYH7}7R{T-K;y^ap0A<8kBJal=dt<)MWyes{!>@%x@;Xgzi^A}&q}}7;ftZ{#WN47+nC*|DjR?~*h^+(p z5Ab+NFMk=IX&{ZuZi9jtpQ!77#fUBcR!@~5AwZ(-bf!ORRaz7f6lKS z)!fJW(pwN=FPRgNjm6_6O+IJ{Mr4*#X zztd7tOG_IjkVGyDdpPY{c)#>6ebT4%ZT_=SR_v=vqpPW(jbm#lJBuA`YgrWOlYM9% zSYxg6F9iUs!o$sNw?4!m6apqNHLj>1i@Khc&Om-TK~y#V>6iF6(;Nc6v<{XABDdj! zfSU`}_N*!?b1jqxb8vda_QxI3cMa^r!0r}4u`b0@cgQ|)!2wY!_T0DOY&dZ!sM~vr z{<{lmQVH-|_(h;wY*Q@ZUk5(W%*G7bNN!hUvAd7-t(((Gjbm`v>E`8X?#4P#oiyTa zCbuVy7${n#dgJun_W3Tg5@EF|`*+YZ&-{u~zJ~8tFN+;Ql&qztg>@t@{!D)}ba0p9 z*c}XNp~QcztJAx5bemf#8u|=s8XB&ZdkoXyQFa4<6o`m3i1Dlcr$Rd2e;a-DJ6lyF zMzD5<5CjnP9!u$A)fF0-8=)cr>;)>L)G^|{KVchF=bdVqgeO*CuQl_zTUv!?*t{=4 z_IhlfqC#9+dXIZ)u1mB~+wo~bsvOaSoOrjci07P=xFlp0qvA5k>b^Kky5Ec24;x(v z_J99A%QMuWX94mfdS14*MMV(N06_>^H85r6w*`fw0)(=-i?GAU@g_X{S;+`Y5Tg59 z;g3W|Pv%a4cO834L_VX^^)@F5MR0MKCj=6$4{?vA!Eu9KS+)gKe0grF@nNFCmrD)O zJWn=l)GnVHyS%aI$BB5jvBO!DCqVODYb6G2O+S$PkwrISj~tWRpRbXx7uSS1aBKZ; zmHLOzoRMYU`il2S?iQc3`;xcDY}1*grC9BvIC$rDdD%}pUQg|IJUQ6JH%k8CXliBk z#!5f|eiWHHp>_BSwh+V#fZf9J)OmevsM0_ENHwEX;Z)B@y3QH9jn^t({^((S0UA9q zG@Y`1Afy!CmSkoIKvxRUL|xWt9?Mx9f5H&Yer;}nYn6_G8ld?jfE}RPElD62>Fu0D%(@#&PDvaHHS2D`LPCkiC%)6XViRtQV zZb)C1md4!O-8FM*y1qSL3}ctPSD!W^E?-&V4WIV400YxU2HTqls_n}ZeWBQAdRxw%PV?(arNpI%!k zgsYevw)T)4Aw#!>dD3EVf?Zv(ji#fE?wwQr+X_0d3xDRaW^d+p18I*uSX)kk6&2uL zBP-IQPe5S#ebydYlP)Cl@`2FFaf72>aWDg&Z7Z37&O_E)zf@L5>3V)kdDcjou^yZf ztyizge;FKzv`D~;TsXew`z5qi;6e_^o7Jm$bQEzfjxXF@zD!i)@bx^wP+ZH&=Q4t> zqa61-2n)eHqHk=h28mfy24P+iw8F*Dv0p>C|4xBZ7!`X4fFJZ-uoy;b`_$ANdqc!1 zMU?t92~a~Ko2BHC|6RN>iF83#4qd#C^$(T<2F%cw3DMqeU`hEv5wctu@aY6m_g-n^ zaYpTQ{yy(lEe$I{JQu0Vq26WvwYqL|uJkazkZh#bsTUj?rKD1 zpeF-sTThzfJ>=}z)LHq7$G=lIClzmhtiXhd?~$6sR#c=NnEGv@fmdj1bXRHzH$e9g zCmXR@X10H&qdHE}RES=$_|*$*!ogg|A$@!z9B6cs5%w~(E612bcK(^t>?N!mpPoC7 z?0NLbt2w_hL+sD8v)vxt5W1teZdps_x*hVZSZwa96vU3fH?sy3gs}m0s|V1H9&FCO znEWyCP~>}lc9;013u?NFlaspU7UG_hoWQG2@wWa?tB7Pj#?9)M7{^_<6NstaiRE_2k4W!Wa$LW!Dqol${PW$QzTpjv z`E=PAjC$V}shgg2@shW_eG~tZJnUt5Qrkdm!0PXo)B1I1J%r219$uhQ-`w}tq(GJd zMuzFV8a$nvlBEEK6Gq@vVF54r{1@=AxAG-u|LgJiM9(WMEXJz9wYUmIjqY<%vTid$ zl$9r29Ti7!%YV7&&s1$byItej?1wqK zHU+)|^+ylZd`9CeB_DZYP&3ul2D3ir+5BO-gscoSK5}&1ou4?6>6^O#cIF4%45-S59*LkBfSNH3yHWSlheVeGVA`=we+T5f zQ1B?YBizVn{ia}*oOFNt0=&NH%X`hmQ^s8Kd>{XMt)vEoZNr2Ba5?m=&d$zezE5~h ze-+=8%zq(9lFEjCL_t}}{=zj7<6+In{BRw~5++%=MX@`6q{Ou!9B~J z&+)b2|0b7Czla!2`aa-#*yPPs^TlTj`$+0C%(aw0&)51>Nf)1rB!4IqHC2%~WJW*i zAp_sK|MhqH5lQ{AU9T5YeN$W;jwcZIE#zL?iBq*pzC~iT{9-EN*pkQB&iA~yqCQ40 zJAKH!yV-(l#E`eXGlamTCmwe*7S%GS+at%4y|x&n{l3jCfGivrD`sNs`m^+wEQi{4 zi5;tpA=>V}V`DGIEX)rXg>*H?3s#SsGzj5E7Cm}_hSJ*F+W2iri9#xzLczuy#zClr z{R0CqZrRRqDWjih?Os{oJYg?<%B{rfB;f#9B>K-xZsRK5ti?f2*Zt?X_F?5VQ}nW zG6bq8t%%brC=+)LhSf&j9&ViVWBAs<7csbic%$V?72F622i)7h0)?6y&^kMkl#EaM z=Un3olqGW;OP(Irat{2RuwIhn!`mCx>_f|s!>J<0=gkBxTGPMV3!C+-2W&=>UG~d> ztqA8f^Ji;kF03QJm(hX^Rj#^qmo8tYqM>0*R_?fW&IjG7K%x>SJ;7huEo=j7mz8Ya z{a;)64%#~5zNxA1#Ho|#pqD;?alIm6%0K^YH}uLoY%r!QmIs^Jk<=gS(F=`twwb>& zXK9^TzByAohON|zu*b#u^3sW_smW;yF^a*!tbS!A2(qz z&Q-E)5yO@Yp%LUy2U9j|cv(3qU21}0iE-7^vb_5#4eUHxTY+q|Hd#lL(^m!Um~~d4 z1=_!Rb+vi^>0xM9SWv-gEx@7Ziwf*B-h$jSU*g}BEjQ}qFfJ!z21_4whtcj@zfDK) zHhp38Xt2U&_qz41iZLa?xnR!eh<5;@6=MYVR%>oMZ_ z0s?#rDlew^h&0W|3Qju}oH_I;#$6iX8(HTonjMIHn4S^LnsfOH3tyLBXP_@0e{8d# z=ZKHbhtT4VQ!-8!E+NhJ7XX^V&yh&VstgXwK&uhuMU+3yhej_%I)n&tN3FUESn3gl z1i+xZ?dH_csY1_s6&DT_?ET?6li~%r%x(MD$7h4iLt8VkWN^{vHViA|&7RjjaIyv( z+So)qVg`%$bXXZ0a#tQs5nGZyiPWlnQTcYhxBp#Bo46-Hyud_kPV9`mOUatf{~J;lFH!4i#h5hWxf&Tjen1P=uN4n09%& zaY;tLNF?6Q$EW|7W5xe8mN#~w+m)4k#%mpRx`U@NlDqt?~HGh z7O8-UH7rrD4^~$ECM}uzOj&LQKjN86>uY7x_~aCWIYi@Xv3-f}rHXDGEq&b3rrn_m zhY9C=vmLKwe5uQDGB({1myU9H8eV?2NZ8rgLF`OT!Nxt&wmu#x3g~UrJnOH2RwT^J zh$!^+GcY{-Gud^rZR&JlYE#g6ThMDoEp-&#aKm*A>}>S@zQqVIqohP)bq#DSy_VY! z-S&_`7Cxl(ZJAL+U17`_^Qt2Op@y0sVYh^68@U$WAD6=VpQaeZ6CXVAJdG43)WS8~ zZdKrEtH&HA(K)4l%Jmk;0~- zE{>5tvZFoO84NORCUycHn6+Wij|=or-;H=A&Nd@E?iu_$ji$9dHV|3-+nS%B?r#qi z`oxUc)F3k&$UJ=HeL>|_I21NkSDXD@4dF^R$s}8uYns3+OTBy<$IZf_t<>kn{XvwO zW&3^O%*MOjiW~S1U-uTD6L-GRXn7L!7J@Q$ypJ-VFd`%%fb&xI zm7>`+R__OFW+p{@3rP4xQ{D`96FABqmCpd)Wb6&t=YU*5W9`&Z1W-4cyu`$=3 zIak8y#Lvx~x$x>>apzI-$%c1B^F?zT4Y4O2?3 z4M&`URS6`$!xl1cG8-_-wP7AWAVNCJ50bL_mP3PtAwKQ1y5-0zJiV=-OtrIgsNsI& z3T+N}#~f{RzBCRB9CHcQiDbI+%uU~+xE_UX0q0c!PkQrdA zKh& z-pq~Onbp_(Y*X4y`UR4lOrlQdgP%dBiI1lK%=Le3&4$avt4v5Y!3if)!ErlG$@9bR zL)7`1t^R)XkTsO}D!7UPq6Iza;`&NZy#y^+-FIiTXtD7rc{8D(_@oLH2EP{Ax=dd` zs4Xpv7u!`2)T+yM?9?k)>-cIMyjDO~lzIP4Q^<9SfKP93{CrQ@nM@?pc8$mDq2Y)R zy9q>u5w}u&HZkT)_*1lo>S40LwRP>pSRS3$b4^qOPIGQ!7X?yW=Vm zxxk82L88BlDzJ!xmH=zg+~bv@O3;j3f>`j7s(ynYAC16t}1 z3itDE-!m1Ps(FgD4dU~+G&Yqp8L3TbWI+W5oX~dqPoY?8iBc1ZcUH6q ziWR=ZtVpgWe7r)R*M_&Zb+V8z1%~&}JVlaONVI)2OzLCL5%3`)(wUnI4Cm3CwRGu_%Ry40V*v-_u8zfr zeWj+}`&9@cy&)Cf#qFQVPMmq25EBCcow3xB5n~`D{IR=trFr9-sR&@2&hOmO1E@(H;a<)Zj_F1uKnBRJ73-=3bngP}0;Q^yB(Y@h2gC+Io<5!tlb;YzWBW!V)Qc~wXYK_gP*+^4GB zqf&Q!rQSPd-4962{{DWEme#cF>?Y@v`;!DT3uof|aH~WIu_!GknXDGQekq>4x=l*W zOE6cGzoljysq0$IT$15tD@mQvV43-nJDsdF8{=DG62CKMf}igGp`g8m6K2 z!s5}IO(B@^X*f*Tt?c(l0(b(-9598!7yvnPw&u;q&i;{=Wh(Erfo%Q6%s}+48%!yA zTs_0@r%MnDX5#4Q2rh6KAe&x4uLY@#OCtW}s~|a5ibdS`Gra|42~$Vw63-HabiH-b z;$$|qgF_ufrn(U6xQ`9b`$=dhjh+W{@^FdYRZ=MrX8)3!{P0azF-^<%$8F2J>R(?k z&X^xumIV@cf!lp>tyqKDAs(z7@GC=qY7v|LqjVfB!eTIvI5}<;mNwfsRQ;p+@001h zPq8ksGCbul({lq~u>R@MWRpeN)Jw=2{Bxhcdp9%$w4fKDU{_>LmtQ(NhfXzd>hsl$ zmb8hnl#!o*iMscA{zt{{UI}(7+a7MI^xSu264*qZyq6KwYw!DhQ6`k@&5u%lSjk&V zT6tu%%%XL#c@Sgz#ctRlPTBAHFHYCbmE#VRVMY(dJs)Vx$WfNP{dRdJ%qm5IPUnwD zb2qXp*BKTsJfg-!2i17;140T35fLA?gP(yeY{3)#b`m5Jrl+;8fqx84XDb`J3dlG8K2v3dr8Vn0;Ex-LNT-=ygzhRUaB8Ab8Us5nyfu0V}I{3vb z^32kE(%kU%u7g3uT0qOFPI}cV+cV;l*Ce60yr!m#kA^_(oX{^kJ&|^lLckz1RAbeS zMiOCQZT$s$aaNby8A~K8q@p$q5(WEc6bOihAQG_*Tm7uQi@wKrL zGKJMjY0$=@JfE7Xxp!k2TKNWV^tXz3!xKPZgqd_kRu+G#xc#9I0gS@oO$%sS0FN-# zUEsyO2AY)FQ+)LX%d-42C-$bctqu+xcuvSlG_nhrg_*gzEem_*q0G*KL>vb_z8*p# z$5d=iexqWXt;-_byirl;Jbn;~Sk22ElM=)W6-4p79udRkYaO>a_SP?fqJ`@X(bbbG z>9)K_=OU?QYO^`NMOVYw7Y--b831}dNZ_TwB*sE|FIo}+MwywJ$?54vF7d0J+%PkrOZ2zxkVkQQ zgS`-O<3f+?g`G1`@MY&S;aK0c+stUE(seHx1pOnV9v#jfizMZ%pytLC@tX7F$~)zh zI#ZvWIZjL*ng>18R#cQu5De&A8vm-SW)bcv*gH8Hw>h9a-+)fLxR#!{3>`(u|Cd#R zkg{rNGk_ti+z(>mRain7K5h`JGv2O-pM=I9da{)?bzVZg|)Fp@Y zz3+lXNU?kaCot)lPWOBXNUon&;AQ0~ZT#w_hP6Tq(l<3XeF%V|31)H?bMNs>zc;BE6JmkO?OY%x`wYDE(F zQ~6h^^=pL2tcb;j9OtV#$~-#`?btt9`QT!YC)MF*2{e`Y+vNS*I-u>q`VBugq%^Yq^$Nz8~UxbQVu7}P@B z)d$kZw0Q!%$4S@uY5vtI9Q>s~?gJbd_Qh%JZAUI1`J-QWr>zUr@p$qpwwz(Ak8`yZ z0z!XAnPJ=fS!Eg392McMwtuB0{^ezs?3vt)gp3A5OnvebU&;ipyRR^xTOSg}1ve4X zTpto~R>kc@MDv~rJ{Rc_ub-;mD0JqBLj|_hG&iHRh0y4a=4paUBKRr0Itww@07T3#^syNh8Y9Bn&tr11xq;}k_N#efYqU5v+kut zBTCtJAc~xaUmxrRF$VmAFvM+i0HHqn3q7)yRyvJVesl=i>G|0P-_q^KD{kH!;z#wZFk3F zq0=m9^S68tYuTR!`$$!+b06>SxZQ7*dwQQxvL2NbO!n&ZTlU271?vySL!pv0uFF1;#vJcI7$6LRx3K#--2Xov%p@y!y}< z+wR>H#VFh@1PKcUzmZ)f1;?AF&|vAQzHb=ca^&_det)1%2GMF5(tulv9ET9kAEa7cVy8?9w z0_WrT7nB~m=pOY*E-tcT!v@&^@?0Uep9W5BO`l(DUb(lM#a!WP{K=?oBlEy1N_vH~ zjEZ4xfOjB7jre|R$*5swuBOZWR8Wv6SBjm?mv|e|0beQ1q#(KW)O(+p8A@zve5M`* zOi1l7<7gFCruZ2Ze9clLGu9t1`Ert>UU31L_R*LqHm@}zgM>2gQ#pRk1@)L(wFS{+OMpN zI!<}b2wUWteMa=uVZuAl%R>FsXS7Ml&k5{W+EZ;MtE~y?xUQWy;MfJfz1C<#Qf4aa zpekVHI?CL8nw*#m3AS-qn-~q)#lxocUBePJGXBvE_p1?~=tfkFv|rL~EP2ic z-}4^jEt4}ZlPe!&`yZ3!V3!D-6gtQJu{3oH=jPMbw_cT9+u`O;Iz`Y2QM^oi8lc*K zgMm%vYg(xZF&cHuYpGwK1auZG>h_e(pKL@6qZ_@(K$5PYNr!I5{IahQTZ1Y5E@41{ z*(h7Zb{nAtS(_W|gRA+kO;i2l$j&QuX||ZEN)RsLKnAhk_qot+G9BLe(!#lGM)bk} zZ&;@Jl6_f_CeIVZBu?3kJ0>n}I=%tSfnvfWpDK$9EjJU0Dd^kiR*5rmVKdv7^gUYU z$1)|Z+tyTSvK38=FuNE|tsy_Jz&|KmnORvWB_(#^X!U-&)~o$=V7!efLw@Ro zg@yBO%)`s(ZM(2BoOPqxp+Y13X8A$T54K!AXu@Ec1||osd}Zlh+XV2c_ZY&J1^W~M zG6X3d=%9hx4f|cMc9fI7`O#Wl@im7&bHZ}J|4Ae`pbACL!X9~(~e-NyPxkWi6?|4JlZ@Q>_8l@Q496V zM#$_KUa_CQ6l1?S0r?g>E^u$aq^EQgrB&b=fTUIjejq)Npyh(O*aAt}hKdsWdiX-e zNt=TJtw$b6Y^FAH+T%77WIY#PPBBA9n{2c3O(sAILq>-(x={W>iS`({7obGNFinJ4WvwnyW5WO zoY5XrK3LTF(9~v`Sg5Ep#c!qK!)GCfkI%Nr9i1Ddist+Qi3g5%d^k0c_Dg_g z-GOBq+S;@*yZEW1;l}T1Y>c6(IKX8=hm?c_Z?CPf;_j^6a4nmtnY*U-mbhAr9gLbF zctIrso=*s0a#6N$;J|$=($6k>xwSAv0Mk|2Isj&Sh){zf0E_Q`WqR@#{**Q%Y+j!c zIH0>!;cJ$$;B96va;_&u5hqe{Gd97O{7~>FSNslmx>4`noB_ zYT|tU<$C_&JnSp}=P@WSJouYxK^NJD%rGE@7mxEcj{>Wi+FYTZ7NQsp;BW%}C$gp9 zMm4+J&>(zrTbO^mv*S14-44km``GY9Dz&xE(*13Ro%9Mm>v7Gr_5%*|fFqaJI45t0 z*_WF|j*4h}?GbMpgd02h`x@G3nMmKit=O5e6t=?S@gBy;t;31ntdeWsz=5m+u>atD z3p=uMdX1Q2j{qJGzfazVx*KCUcU{5OAaZJ8u^o`1*gcBf!g~Fo1pLr2yy9b{W6+i9VP$$Sq9!!jA`8dHi z){(e6XLF?`SlYroq(E}poqW)w;CE6ku3RY>dvK!8`WQLA{6}fq6#^lT%+Z-VZ2kvD z8pD}3zjw5yMUUCcNoS&I4pd<`CzL-hjD`I^kq<_myuv877WoMshPbA#ecscn^MmT6 znw`{=l5q6fj?c7)AO2ej{V~X3x1d70E@+SI(E&NPTzj@1hH#Tug(~-lcxGN+T4tt+ z{E>ywsqN*G)nmytb3tx?-}jvoUfk4+ZToK-L|$;(?M^H(s-QK?zaE#M&I$HUIzggDX9V2+{sAHzu*>c z1@A449u!z+)@}EzbuPCqd(oE zOBz9JnKbFE3l12Q?-nICY_c$76_6ph9& z*wsz@xm}eEmG|b9Zkj_?3z=bKb90cXwQdd?-TB8~*9I-QeRue~6;U{ZBGfH)!H4Lw zQxln;S81A!|9_u49%lcv5Z96&ok=f_< zX<`Jsg05oR*!Me?uTTh-CC>wWTaUTv*Xu&{@*j@w4k-@p!qBG&eVxD106*N+QG>?l z4>^e#)N8jQyILRt1+z{rF0Rc_{SOH+koOj}!|%0(^P1*V9#zj^K1aA3)paeUl$Mg} zB&PuL4CoBe2?==^rz=hFkzM!6<}^JCP&7v)b4NL7XZsQv;XQ(yPp51`rVi2bGd)sa z?Xx$j_QA645`jncKPAwk-bGV#iHGIdQO)=te>r}86@}&8>YK2alwMOFq>D0gjjk#& zsmXt;UW&@)ldOy?CxORs@@_ddDe;dsYXz7R{I?KCb{39LS35O}G5gKpxa zW>5;F0WMUixCORI=~`__r<401t9ncj~m1J_H# zKSzhH1zdeD$mClN(_Hp zC7J!Am4vjY-sg9pbA10%Xo2(m`n6cPF`}N`ZRT%d!sm}!lGP|D9pcKuJXUACsx4r>JK1*QkY#oN~M zi3u=n&v6X6dbTfcW5~lBFL^yRGTsmoNEE*iJR@4R^gO;X{j9L?n9qTcbUSCK(X7OC z2P?z5M^#u!3IU_1W)2*vChWUM$KcBVe{*`>9G3Z$iC<`DWh`H&S|t%07=@8H$9+Yv zH*vt-RIb4W3aNdSTT9s%kPt~ z^Q&$C=e)u_p^7Tr0_#!9r2E;{B%EkK9n+pIgYbMQ7#+j?{BDBq4GJ>S~GxOh$814`>J@&D*H#kn1xGf zZF{?QMc^vvjimx?fUe9V3eKCx7owd`REsT=m)0=n4r+n*##-;^RYw2%CV@uAC8fT) zk?27r0wKEy-*eKSrD6}JK)FY{Pp^(rSEhXlFz7g>h?+PeE}YiSB5uc;#4rZ_+N4La zvZ0kjJyO5ji&Zq_S;y7PgKai_KaGS!+4gg|==o%{&oL>wig(cS+7@-DQg%)|GbhNI zCmhZXH2G`^dD#m;FC&X}(C>>SAP8+*r0*^>tI&|u);>Ms^!AtVbWi>$+JP9MKucAr zyhqS(TAfVyS|UGoaw+$tSKCP0UYpxZ7-m9|sGzJ2NZz{}S-oFOVvvK7qqEG!t7Zx8 zOX#)ZK&?kgOBAbsxk(!%pfj|jA zcA%Y0tYab%yibv!u4Qz~6yN4#!xZ{DEobMf(UQIua)aj2gia|pS&f+C`*ha_>sf9A zAs30?y1Qg6H`+@=qqj|NZ*cpn%*KuFo$Hm*aCg|H1}wdxp@EbUVD(_0vH8iGi82_@ z-~qdu6r$;cz4l#qY)m?(I6q;{U7@R$^U#H1ca}a63nuuyf7Y8mz*4FV)R}M##=bF> zc|ZP`NGI!`&s#!~f63&W6MHXyiS69xi&bt87bo|INVxRF=KQ;hjQ+>=v`@NP^V|Em zqM|(mFr#9;^xnUZZMx3Ns#Da^k@+^^sXc)tk(qNyUc&1*Hkv6(y8gl#FP`W}S~FpE zM|qLDBPZ`w1v1{(m@M=)fyocDAwc2v0`e9py}fH~RbT$`j>EtmIp(90M{}qJ{BIHR zs{t8H>lwGBj&YiQ?|Ob;NIY7Y)%vAFjsTXix;)Mi^v%kG2;z4z?e@g(G7~$l@_4k{ zdb{I&mP(;8T(S>pyd330IH;01OuqkJ+1PNG5`XXRT6uV;QPZ_3kyK+z6clVMnlzH0 zn0d!0j}#SD;s?7F4bX0=RfP$d3cR63c;NbIRq%yI4-T!t`2~Fe(ioSh5J6AcbYVf( zn8+%B!_MaQCt*yzzp`2w&vU8^wp1RqmB-w(qNKViC+*V<4`D@(=={>pI36*y=D4>? zCxRzTR~1@M;dgv3TGE8l!-gz96{T?JdKj{$q4oL4>{_&?4*djp&M7V5|x+K$W2*)do)8jvBHy)_vR zXK3Km-}6#K4Q~y`=uLZpIqTz1hjp91{fR2x{q(nLUhaPL9i3ZgI;(4{Syb_Hf)x5k z+XxvG{=y=EQaoSr-6zED(iH`ac;;^iH0e2Rbu%&iefjaUTgVW_zg|mXIxa6`F2Gbw ztDB*GQW4QHG6w|@sVOZum_1#TkFuGEw(X|MpC7?yjVmMQ- zrI*QvpnE;__4%IOWw)qncA223Fzvi@0{Igx`#+z_o@%Z)9|hjwUV3<*G~=(xS41$w zMMtWRvyYI7U@TOVpu0SeMW_z!iYCi0c8Ibk#oo+dlCqO`ylB~Xm+%)+o z&6>OreD~dbJG4*+FMqyvjl%Y0{Spk(>(|9>QHnI05Qb`nI@T$3F64*Q#+Y$2ezEu{Yje zhi(|6RO#(!R${QS1)r#QecPFOhWuCUsI84%+IgStnwE!pj-O=_`>|@ubU^T^ED`u(pbe`^UWr|5-vF-6>{KG8V(5O*tP*V*6s~oC0pC zf|E$ed32_c-vlV1t8J5sX4)P1n4f#u)6)6xJa3Gm z0>z_4`o6tN@8{@$_!OT``E$g0Var6&UBCAI?KD-(g7kqSF1d0iZKrGz?=w8}h@RXg z7#~2~4a?W7+Sw!wBFiwHdJSZjWerUSPuMv)Y173D9M)X`K!tU&*SY>r!ND4>@Oomp zOY89QcKHHcEy$Dyksc?JNZ9=$vOxk=pbQuY=2A}9<_cpW1KZi>BW}bEYUR<|c7B_di5X_*UB5m>1#7qf5MHBl%=%tg4=1}^z z+QH^EuFQo9ZlA<&)JR7!F5by6?u3F>Z{*Knf%QB97*AyCK7{(~#lW}X?+GmE$NH(l zf0rIyMi7c{$ZL(+YxIDS_|THKxh%rVMZKW9JhyJ1iH5Gey|N#18`@6Jg%&GJ*_GW} z#b+1_@ot~2$Wx`yPwogu-l(b}o*o^I^g1tzQTWwDO=Mft-Tb4wx94qtU{{{DwX|Mx?h$g*a{fJByh=&ph1YZzcA%!StX|jb|q?BS=vwF&)&F z9Y;VbX3?^Kt*b($`M}GG-X^rE3YUcy-`1MRE(8A;a0=BL*@?oSjVm z*Y4fnDtv4DZwrg&dcW;sY4U1vgSA~ZZnISwxJ@1M1)fJmwlm~N3Tb8qtEe^UizfFJ z%gGM@;kkSeIE-q6m!76Pw97T7SzT6Iqi|!+_+#bKN1Ib3Y^wV{u-gD46voEJNy*92 zm1JI#(XuM>1y~u3R9El`2WWSPGuzo@n0;8ye^nTM~oS81+Ot<5n z`h^d*kchpod({f^ZFRk!miC73^3Mk-t-DH`KUwL0;|}WnIp5uE6wuT2^|eu?uq3wQ z4N4)8gpUFNP2CZdt`W@_DBhKoyorB;7X~D;cy`2R5CsKpoMi1JBFGJ=u>0-*YL@oV zYN@0wCK!?Z0ADE#&ynniHRLcgL|{7!wn3e3SKQC{0DnYlRn=|Q z6Md20m6<>U36_M%0#w(Wbmma^jXPH-C8`OO@jsK;6LT!K?5$hWoy!k;nG4iL$+Bqm zB4gv7YssVcuW8G6(>>X ztyqaK@e=y0U5V(inzPu8doF{=)UR{YYX5jD&+QpiWZU}WRS3Q`i#ngk(-<>Ubmc1F z8T&q~si+9QTR&GY3<4n_qq-vxA_2tMc!3IETKdH4n*kc+(vh9H0k{%9&dNL%es!@; ztc(6D6#lf30ij%=m3ve}{%z9%a)bmXril=UK>^xgW<~~_5)evPQ|9h9J*;+P?-TlC zJ3e?_`o&6wMJNub$?ebYcW1dGwWrJT+z$KO8H;>eh=|q`L+w^UvGp4)|l)y~l-lEgOH8hV}qC2Il#2$o;dn|rA1Sa`66|+OYE<#k$epn6@iS6Cw5JO%nPujK z;`~4MhzB=mXCX2V^?v`%w*64UoktLGFI7r-^W+3xfcox){Pr%0Rwyy=-J_oW~WT0E*5UFgoJ%P zJ&*YL-vDGgJzX5!orDN3;Gndvedv7Vfs2f}zN9Cd6Qw=<@*iISy-7T>VfX=;&dIuR zT_>CxWypaLg&$IgMZQ>ecJYc|IQ9=7)&BK3b8WO=&I$$xzghi!%`e!mxboicu^7ccE(fK9HgqGP$EGv?H%w+Q_YwzP4&# ze>KAQp)0(=i%(HEg+!Kk>f8=QTtAwZDrARAiZG!`8$E0n{!5(^@cMeNpqNd@;@b}5 zUk39OgL_=RD_GJB&nsj(cAadFpPHP9(A#fx>Ry?$kQy7ymH&FshI_BgdYz$#%05~1 zx7OZU$|(n(r$It)nxhmnH21^b03n>AYMSROffp>4#A)K)AlfARm3Xmf}RQN$Q`7 zRNcZ7-hWV>?-`msGVAI=}8?N!M+##6A21YaC= zZYXA$DXe0tk8Q~rcVnRx`QI|US;Y2iJ2EeiS0}Q>n{qU^cX90+Y5Kv6nXWnHhp2Vh>l_@8jYXZ=138YmzAZ+GZxdqigw*T+Hmf+WeA#^a;6t4u<#xZh17vxAH zZ9!~m$1GO_6(odKSQ;F*!ND8ZZK@sJ9rW5~L)d~(`JYQ`3Jp`?t!g-&-4^nmm1_4kubr< z(baHqPV9awesK6zi^kV(4L?aisV84sxTKx%e6MM?Ep*R0r0-V9mrsrRh4krpwwn4T z+}YgADh@>P{6HG?wTZw<66;zN9sWs~X25P0)3)~3so3PmVYq!wntb{}c2<@FYc~TJ zm_g+R!4vu^@RUw`(}^({QuZ@zxf`hX zTV)t)Lk+ehbl&TaS1*>JCfMNimB%a?vxy}>xF+IV;h%H}3O6s0$XjCQ-~pmQqIuS! zvm57O{#s4PfSIgh*igoUO!sjnDLF_vBnOgluRmE~f4_0QA1ikx=LK{k#h|Mcx9wl5^o?4BtPE5koew>3vSpIcn)ElGZv%| z9;q<=H@0BXhK&7$kc75XK$REJq6ZL!nb}U}{7K0Cl!BUcad81xr>vr#VNeHy5%>1V zsysC>-%BM;p|9%>Ce`y(@#h|i=V;U+f8*JQ`kg*Swwv)UgpTTErOukx!Wq+VIo^*O zANCjDOF%OV?_tOschlHIsXS-aC!ne}>ps-JPvCyT@hE;^L6Ary{ZKTS0IDh^!h`3J z!n;rT?ArCtzcrU1>;>_)4pE1h>Uz`Fl9*radzCiGCFLt<&|{=wx@T_O+?YGD-Dp^Y>8MUsJnnp!Yh=PF&B zY6)HYY4aRO+TZJk+apdJR&^nfb=7uLXVi3^2RPrNKiqmstmar}nKsZ(E|lEQm?Nov zSCadpSRqk&xq$cw3Af+?dr5cuykYiQwD+P{IgIi4-P7P;;!gzebZUrwtI1Xw+yUb;3T-oD-DfG6Ub zPL!4PM)TO#Hr3bC-#9 z@fY7$DrvncC!VIlz#&G|D-KzcC{og(9Xi^4KvRf8p&SWMxEV`QQ-SrP1nbBB&#D^} z^>=5e)bH>>-1zZhFAydHnF25SJ;-K(y&4kvn5eyOXp_>&w;9fV*tv+*p*7r3mY@jy zw?Ztw2NA0Ay?>z9$i*1dr^ETn$fapnFc7Y^!Z*MQB+9Y?PzsGs_isQU5(a)f+ z^uJT~@f$O8ZQ?>MoNS|~UiUm_--d9A`*j#K5-(F9P%*~HN2NR~GHEes+?)Nen!Gn? z&mAbrvnQ*@_}i-y=4cobUD!62E=kKHxWcVP+`=WoPoBI~R1Al+e?;b!oLjF5?IkaH z1!7u?Q;MNvFr{>%1=)xn+5}5yRA=NmIDJaARrEVfi2_Wv-`*wPRBydn7s6LCnHe0yL!suRONd)n{s2>Y&%|D+4|XgW**x$Qq) zk3a;n>*?db0Tc&Yg5x~qKS3!^d{-T$ASW=fJ6DgtTz=JuZ*6_Ia{f@Whj04wcWn8b zqUOf5Q5;UF4LP^|y?uk8#>xbdRpQSWcu76Q-#7+8mkNH;Ln!CWnK(FC-qzPGnXI96p>q^ z1>ML=zE4-1uJ=YG3hQNWiEQc*qsg%sOZ2s)%hP}IjY89*>Q?mZL+9kQ*6W|$O?tys zL_SZo)MBozw|et;1_jdS5rW{C(NR(LJ4cA|ai*%NK04^#XOJng12TZ}PC70GpHIzP&_smSzW>jMu0{sc zB$*Fr?73yrZ-9#h`_o9z4&&>D{>HDGi(p83dz|L#pPvtqy(W!dXD}(FoZMBzr@aui zYTDBB=xI8@c`Wk%7*n|8Ys+}t2?)r#_vwjm()f1_)ya+<8x&>~9gE$-O0}M4ointr zxJcb1b*S)bt4VGvj|R&w$WQ;oYYZjSqLa7sc`nD zg0RQOMN&PWkd%MkS4l=z+EZSr`(P2NgJAx4TcK}0Iw2K3&thsPK}AQ$roOE}L5_H} zaS-%7Pa5Mno%|i5zAw=dVG_$K$J)UJI;*Ka zU;HH3DfE9RMNFd0wwvbp?p0BOV46TG`Y>&=g)}CT{qR4f4iaD5;0RkdD-L=EK&PCC zBt-p+y7tU&%ewighqiNNdzt%@Q8h1r#r)x649?CuEsxCt)lr^Hq8Qihc`=11gY!bl z&w5T-B^piMjDxyr57+r0th;|~iDj`+PJh43GPH}DV=tsK8sO%R(vLtNwi@qWm@rQ> zfwfdEpiaP+J$Ua`J8U0uaLe99N~7!apN7-BS4`4g8|V^V;fT0qrwayX@t-#p3xa=g*F3Y5j@3wK2T^jq?FqBhh{%9h6HAdQyO&)81 zfdcqtT(x=Z(XH%yMQg|B6xE(MTH2H?a#%Yne8q!{ZYC(&ZJ9CM!s}{@(k|u1i=$_M z-XV?~7g&bZzNhbr%}UDLRx3~y8DrJmd_(d?g1jmfE0*xZ+Yz3rd=pBb)xr5fNlBTJ zpC2jTzcS{Uu)|dueUGr?j}ZkS99rDC49jGtiXhIL((Ez+I(^_zAuQ zi-;4OPl*hPz3kEpR5y*$eAG}iMWYxjW$>)-1Pgy@S<-TK`xS!{X&Pbn0cSGH)9!U! zZ5Nv{v&=%7pMqez~ydUGBUA65NgK>NLj>-Q2ITSU)$ef&x^coo3g_v@QcI;V|9 z)`!;T>Q3FR_KJeg5xC!Gs;bEQ`!sRZuEFjejYxudqk=Uw1dU_WXl2 zg}BiIH4aMARFm^k4#V6;lCFcur({pe-La0hI(BIhd;ZO%f9GY zb^#&c@#iw3PkQVQmf7;4z?fdE)D)UloBHKww3A9uYckdLJ`dh+npZNc((~C1NiCyx zW%Cc|F1!6iT(QScQg^2*nJzBAzaf=p_B~b`4@%h6A*SXfW!RITt2GLPd6wOy<|CgE z-8z0h?{`sC7zX3W-@-P8l+naCTik)}X+sN=D$Qq3D0FviR*-;%KW!ANd*IY{qrM*iaw zM(J><=38oPx1&Sf&NwD>(ks8jzF<%o!N))t;FB}X*S9?S<15YShV5eet!cpDz}CHr zo!T=<4*rj>i0uc(3-+Ll`#zD-=|CuWZyMZ=ru-*r?66a}iH!)ABqi&W-4FN@T;_HI zC(PWOrVqNdCjShKTCJ)i%+nJ!M^o*7CD+I)XB^&@l(k|)^=W)Pacf_yFOp|nmKx^e z--TwZ@-34QB|q=}IE4`im>A+1ftgc60u!`NM1hJ=gsNd_r@K+%sMq;*&@0$rKD< zAD9jt1R;J^MmKQj0|{h5rnRGzR`>7yH&nq~fs{?`l!ZEU2DZBmm3TP^$r-E|e`&9E z?ax|f-w%5Y$P7Q-{-L24ywnkHXDeX@No`CUA3lfIrO7CA;ND##sN)K#JMRove0bPN zFf*@td!CFM`+9A}d;Ki?IFQX%v?fRNZF8%yI1S1oO+QR3GQDAxXh>wp{LZi!ysuZ< zkrBN1`~kw7ZJvnPLk3g#PCtjQpGlTM7`*JDGgAA0R&&PsaD$6Tj7OF$?4-+^z3rQA9`^6I?^mRqLi`H;ig;c)$uj%u%RC>wOG^yg=>(u$PYMJA?-oA zog)dvq#`g;gtbEThDDIYxSMU?TXd5B`-z&oLm{EaE-+BP(VowuPr{4*Z@TE;E0kW- z8a!5-1ps)2otW%kF#cyl-bt$VX(}8oG`d1#RVLfVIid%Uh|4j~8KeTi!CUD`Dl z4;@e)n~a_up8qqTXz$^VcCXz>^){7U;0YSS!q#sk-aAZ8VqA)RY==^1*`S^`S3xJ+ zRdTuBB5;=HlcfybmtHbDRA}j7>k)7s0EC0;{hVSu5xbGaZkDXAlg|BTasL)Sv84B& zO?Yp4%#6w>tM}i{J}is6;>YcuaPOxQXiK;ZAZ|-eRT9kiv0H9TFA~hmeS8+~nadQK z(fQ>l?KLCCenwDRs_2IyRlNQ9MMThpi8pgKfe$}OD)_)72~<+(=p|`y#Wq;Mit-E4 z!|2B)9tNJ7#3j|dO@>01{1CtVkji7n;!b4Y|4?S35B+3VOMkCiBV=&C)I#@91k;Ol zZ^b=UrDev0up>H_&Sjx=BMA&(Z|msk0S6HNQIP8I81+8blpKw(h;oC->`Lh6YH0a> zC-VtJOxd!R{ypN?sJ}oV`J9TDc76Xt?asC-Oj--3H#3JM43%yU1-FmYOKF7*+q^Xkcf0K3k1Zq^0yd^^^rqR-cAXf;Fu>*%umjB7K-AK*b>$%b zGGuVw=lc54x7X*d*390~?EAr^U`IPK4arc_xN0jYudg(CtDc>{Uw}&Pv@+jV8ZGx} z=ND{b%tZX&B$!zvEYDJ4QB?gb>leo(7C8AUCdqZY@z0mA%>zAg25Ck26i!T<<~n}9PnE$!uqjfE%D zO~Aka?&b((@f<{$j8F$aa9%4alE6W9Aq*8Lc-a2)K)mRYXOzQi{dJ^^3UUreW}30- zc&+ZXJ%oB743rM=ZwT9Pr;Bn@9M98!K4?Co1>pZ^@?LE$j^#-aK5Oh7tuEB}a4|SH zGy+Ugu*4U=ph+^{S$p2Z z5tWc&=A(jma5}Hvn+aEX!A+9>MOy_y6WQqTI7TdN5hdbHGIFyYi$B`Yb5kQhBjr zzyfCPVZDUI<6Kf>wa3BQOAkHRSV@gdeMY2Nww{i~*y|RJ>d1?zeBV+ovf~Gr_A7c3 zhOp%1WY5Y<{+tm6I~R)u8yzZ?|G+N*R;|{~iKeh_I$VVdTUH(dFabnHCy_+D|LQs) z8j$>zx5vvobsa$X*nu+0vi5JTRfo<8Wa1t$z`$G>1h~Cn!PP-xlkio8sHAwB(qwHq zTd&pC9&=`eyMEoGaOPG&Dk_q9LX1cy5Jv}ewNEs&##H?$2MOw;?xk5l*Bky|R}?NC z8!V6`ocWqTVE&VXuFF);((C*1{7X}4SEW;;3=vVba+6$mOHOB!?>A!2iX|Rm6nSDs z(gR6+W=Xv9=-W23?N~}!J7kAeQHx<>ziiPS+oBESC~Y^fvs;^e=ijlYtWTpF!+jpk zEgsA)H5S&m!IJYkK$v~?iE+N1N%o*I-a4-ZTTUQ)Y96M<8C%Yl+nCx>3U8^hMQ=1P zOu(Ok#K!(JGzq2_c0aGK?R|A$1OvS9z>EXCCw8RSiicY~KAc?uD+&gO^yQ+=rH=!! zX8z~-Jx4ao1IHILS8lZ@JyteA>fTFbcYA$Em>>2JI+if9OAJ=%WRVcG+;v@(Sr9zU zMZd-TL8oeyf2W46W1{vCb1KS?-J7o~x_EjqUhQGr#=l$Eoau0I^u6rm>2fo1hR(^& z?r4o0n@a}cwtAQLMpU_H8g`4a%xA5{oN}F>F7rC!#QA=DAJj)Ts=<$ zW-*q@W#sb&d0GQ#3e>#TA79Zo0zLH**DWqQj`EzFi%X|$r`lvuN_R0*axT=Hzi7K^ z8OKc*3moh-&pzdU9Ii9aozO8!p1ALmoMbcwU%;RQJ31dP7rJJ4R?Dhry4t{ymPCJ> z@DQc&XYW}mn4U+G-wl1ZPkz_`;r>rTe#8yw6w&AN3qq2%Tce`f`K*8DR|wI3vLaDj zd!7wQ<@nWGMvd8dO7XnM^8O+q8vZ~uT)-P)f0}hKR;-_zYc)q{6=x#DzN%1dk}>&Z z&Q=X0O?>}27FqvmRYk?4x)-O-^Tz$%Rmjz$MX>ypD>lN1w57?@W&YHRbfb4hL2 zo>|^@TD&E6+Y}}v@EuH zQ2W*P9%?zOv?;Z_h|o-$-|PyDcaZ;IU8C#ysWXl*Vafn)Va?o!PS6WxDr`nD6}MS3@(_j^EN5RE!qI z{se}8)wkLtSVPkde=p$8mPBZu7Bl`#^s2Yp5Q3e7!83I#Hx`f1AUaP{<^(y(9wZtC zY;?Xbd;~AT#`Kjl04~6t52h?YF1$b9)fGub238ye6A(K(5BdS?R6>LPv|_u;5GQy~ zuU0Ui4z9|7gAR2p%{+Epdg0Qo-d}C$u7wVZK51I0LWe=1L#l5<5xE~8pKV#0Lkt4M|KpZ>S>*@aXA))=sNrEig znzuW$?)=^K-#iDT`85VyG)Qi>3Jk!K{H4|!lx?44RQMffWCc)gt`O48DMq@~^&^WJ0PD)a8@MHMtkWiY}EtX!9 zlU_SxJ?i0ZuibrPrcSDCN-~E8XTr&<`r6t%?^9cU$!EuHSM3NP*-!9WGyzTU7->0& zV`W<^-_@pS3Oq@JLqp9VUxJ_N9sve$hd%bLObmtENn>7ju72Fi^LV4yrL`Z@xMU1_ zDjZjhto1P=W=Qu05k;&{b%}DcwKRiIR*OI3y**bmwQZ@l{snYgOLK7b9 z$>VOK1%EXCp7o*g;`ktk)cXsM=vY`~I)^yKCV$(YGwbtcr`>EM0N=)S}|EN(f@Bf4qF_{c$EYD%+=))q$Rr&vU?z|_y3ss3aBdA zt!ogFE>XH0>5y&^l`iQnr9(>TE&%~40Z~%AyBnkwq)S4O5>RQ-f9-Scx&Oy-3?0LB zhI_xy`>dF2t~qV(?Q>z@A8Kuc2FbuWqTmCJ^IG;Uopgr&t+fZ8Mt|z6-_(|K0#vv8 zuvDnGL0A#_&lO{PU(4Pzr~eWkx>#R#zkplWwxq_YzSHNWH(qYP(3=;igLPD<|56%9 z<4zBCX3u84!5hQJxc_{orJwsmXyN^GOys; zB@AM5u=aFk@_F=)ih#DHt?b!>Gp+GXHnR6@xBUgbuislacQ)|PwOm?#i|~f|g%N>S zyE&-rtv(rZT4x~MTlV}-tT?9u3rZBB!~3VUxoueHHY9f5{)w5Zes%jX$Z?{W>06xv zQ?~KBKu?OG@dwZ*WMO|ZKh@)#0e-dyo7_s3+thaty1L;-+0|Ve} z0(p~9T>K?Cg2D+S%(Spep;!E!sCtEwwp-R4^tzlA>Z3c)7-J{7o3R4>^nY}7qn}TI z%SHhn#fJ&#-Er!Ei+-(ZE~>v2!-Wb7dC=JX?z3XtD8F%U`Nq^+ zdTMk%mv^jFtffY)T3forvR4Ix!TI$zRjP$`b=pG0U^8bI_D!2WIELB zs|}X(E`0w^v6XXG==A?Qg{kKN+yd})0naUkV>XG|m9y}V4INi_sB42c`^;T-zWCpe0%aSUm_ z`pNy9oTXbiub3`Fzq~AL9dvxPF*4ufNSCV_`Aws+=ILk3lgwy|LUaA2fi^TWc29O~ zpO`P++TGMBE!O)e?(^>$9NzPne$ue+GNdRqT0Ei8B%ble0!9@`dLu!)hcYhPRq9gp zdDdJM=w6XgP{5=h1m4HV6&VBC(66B0D}XP4i1ZK02u|sDD_s8DOmQ8<+9h;iwvC;WwnfmmhJb=|W(OyN)ke1?=l;{1^G1XoqDtm` zA&G;Q!}i+5JBAvSIcCWDzrC_2-}P(d%^P+eXzOOqn-(v3%*L*$Z*@&9*XtD#VL6`h zny^9#4vZwAiY+<`!AhZ_kM+yiyL6}!H^EV6a%su9inOX$68w4a5tj8IA<%pT&(7F; zGvWV(T^17%{b2@F==BRu3<(cifV1babdzNue_|*Bs#*YIK&l)&sc7Z8yuZWu$o%E@ z_MHQ-$seTIjU^PzSDUzgt^NhVy%^!eJb%17H`$IExFlp^)6MC#%?tMFILeJ|LkQF< zdu$AjGzRjQ353GP?|PYd5S?)?eLC7>g#A=F1_0VX{0EMK5a*^4u{8O)X4sf)))i@jW%f@;mMyZcbalQ0z*qPwzMM+r7M$Lul*+eZ! z2|8@5TdWG2x1ZFois1I$9#LCP?e?KiJYRWdR)-<`1L7O}5?5WAXE9kk>9f5$5oLK> zKHc}|YJML3$Ht5T1|7)GK)eDi4ww)F;G!Q;6AQXVhlf1y9YI>>yh#)V5in@Z2OoSW zDq4jWR}kLw|I?~}2B?J*89^vJ2>5$>e5-|$xm3NLk+jdzwzSctlMC>d4Wy7!LmC7AVj(kbuMlI<1r6V}kw z{3>Gd8k2L)G3r@g_p77&B9@|DYDlRLE6~QXM47-p0rwaI0|T#j_4O$r89^C$huGjA z-#>LdXZ`#5WLCAcakcfr8H%-q8%m(En;)IW6j}sBRwd$k4~CI*f|VMcD4JRyHHtNC zb2ARi0~NRL^bb@w*zN|4Gv7xOKowL&e!?2U@Vbjk(bA*r*T-P3G-gJYwh^4LG%}pT zDDDoo?&x5xKx2Nku?^OjERWQzdH6-6RoN%K{iNW3u1n!pde%_B96-2LNGQp-W~v zBTfWNUH?&^6j)kX0+(t3Gr6pe%aVal4-|A;$GP2^&dL_9SYegg;hm4DV&Z^sI04VO z`@KFZ>xb(+_jZ2UZhoPcTg^5vF>tF3B)NCXHPC=)b4KkT4t36KoB35_0I|)RTHCX?q_s~YLML)&B!oX789?zjC2wX}92s2-i zs>Jeo7bR1XQ!ky0jnj>S3NPo*U7B2Mv_f^4^mi=USnDF1N&3)!MceS1=GLSHD&@~7 z6+)2O(z^@|89=6bOe|V*XWnaO{l_+(OhauTj%3ip=|j-rA&*T|%m)T2@K3?@%&Kn; z?-dwyk&@Da>e>AKi++1bqY*G7KJXq8gKe#Eh}{|I=fj5o2O-X3k@I_GuonOKEdG3c zrX8`3&;vbc&Cf5@wajz~sR6*uKv^wrfeOxxYJJ0UTw1hd@5HHP?63bdhtKl00Sp3g z{YndkhfrOkZ7GP|(Utq0k#nLM)YE zztawP>{chXbB7fqhFw_NjJS^!`3;3RYem8ZnFfuKubCb47Nl4HqpHNk~wryVAKa|CLrQt?Na%+u5Sqz1Z&G-8LHuuS& ze~J*i&9TY))6%57lhIRL+OD>mVUGTtM#lT68^RU=)P}#J`eJSHqLUi$r@dktG(I*K z=2T*S$I12d%*>W|8aoQHQnTwm;`>d7VB^io%3*?BUpoR4=P`Rkniv)>=m8H0@2_lDTp8sW_IvY4Uy(-Bt7np}+!EC42y!=L zyMYOcvB$>u}m^D z@b|J5K4M8UD?fNpy-=>TK1y_Q8>2Z|Z+h^qpQ<9Ae@*Jiu$YUd*Xx2Kp-SoFJ}3_% z=Yixud<$4bfQ+He%80T`C6CbsOWR<_3yV58e%fEZgo38x>SM(r6jiSgG6Ca$Kc4;k zEB`O+QUzEdiK<||h)@~!zSu4VONiy3gX`skA)YOqJ6LE4^#KFpt6x%LB0qa&(u|h} zoEpflmPc!hPA-&IH#3BO^TrXblCGyUcf-mksh#y#0)abM7o?U=0#(!5W=9-N2GUtF zywwn#bbaifdwhR=|L zKIAF;iX=PNlomkSA6tqBz5Jw;cUxJ~Y`v!ot*}cL3}|gmT7J1tLdls{rLTqo zRm#-&k%8aQ_rGFTDPb>!^Ua3nbJ#1_SLpISVSl`XgO*NJ)Lq_kgds%T7o&T$u5qPz zhb62@WK&@v>H%|l%vjFuZVOgt4#o|xI?9c$qf*!7z`h%!$YEnQS+Y{=HEKFaZm&TF zd%WICKo%|on$f_@S1klKF^Drk5o@LU2M=7mAuW6;R)ismY9T_I>H!RP)5m?o4|EsiB-kCp1>$ z3jf?4y2dJKKESebCT+}e??mgyyPbx>Co|#Hzfx|Ae$awc%pzSHK&MNG%4=-=pIDTU zk2EI$^H8}-ZRN>cn}&54VQ4N{iBH**&W0-*c^(@UDHc+0P=skp*OIPXV$M>Y?Ge9= zY~u0Deo*<-lAuyOTdOngUEtVS{Jof|UP2B|yIqWYS5!CFl+p95)VP~bxHE(_r7~G~ z>wUU-hZ3`pKy4%+xN*pe^U3&%Dhg&ujhc|mmG#T!t3qI^?{`Hv`lMl*`^=faQP+L_ z`*vt^Gj5rdD{uP2Q2f9;<1I{Rn4lO(OmGRj(KDnH_S~Aj@9p*8CGnE91P|z~bA*a^ zyWiy*1b^lkvVVzbjtUTyYNG-FSpN0)6g*4MdZe!8{I^s6Z4qf2c0|GWB>U>o@4wCm zV?DXW={7>Gdhej=LrEkgo zIqlPnR(i8<$xCtdr!b#Vs+HZy*R|#|hgeORTS^siV40~6A&2w4N}A2=P~2^G@?smg z&0D2&hV_mwr(_xlQWG8p_>gdY+-@^AclyqU%z^&B^~jsF+ps?DY*m)WKnkUuuGcv& zqC=#Wvv+0HFEDMDmelnt{z6pxglqn}OUu$R)m2osF^5&zLARPx=-K<;?hzvlmGGgdXRjYrr8VKBjwHYh8ZKii@EYS#4*6$~e{c)NQ5R~;?+aehV+N0r`i&z1#Qi&H0v=N`qy=%# zV=DDVi2R_hVss1@C_#&#Qhsx1qKtQ>z<7o-WaBIo>E`>3J_ic^g`DGGheTOt^}nSJ z$I$Odq|8N;2@xk@*RUN;X(!z`lM_td71!QssZ`icvvp6QC1%jkkeS-N@v-5yrio6R z)ME6#o8;7!RjWt_NHZz?0tUunauBkZ(S2CFjlQvD`#7M+X zt@Qi$L9SB0s^DR+mX`ntEH*t8*-W!U{BNG)5*glJpRqET`esY(nHQ9q8y&UDSn!^8 zChOL-RhH6q`uHkQhWwoz_RL<+*IvOd&@_Pe-ae8L zP9!>&yxpR_U8cOcW^W3z31y;esAuO+*ZE9Mm>4!J;%6;^^j-Q zfdXZ*J;rTfO_R#xpKF?TuVpM8&$M%zn`q=En%pN&a1 zDeJY~DA0&E@m@s>$01AHNt8$YSfStEiK*=#pa~esdz1v?*BpA4fifXC^PBTL?I#O_ zg;uW69`6d7f$o=6jS%MCi%Z#&D{H3d)qyUy!rJZGmFfhK+qdG=S@kBOo>p#)f1~UP zVNV>mXx9Ab(-zNE_F|!5k25sJg)BYu^>S)btB4A|(~88v{%ur@eZJm?z^SC~oekbZLG4A`_D@I&%4&#;>IllK2+Qk;;z^Hde&psE zmRZg2MWvtVP!QKd0Rcha7SpXLAW~K8-NN28aMW2N=BJ}IXZsE^XxCd8 zr-v4rD}DV(*49k^9oZmYY4z&BUG&a|mvoSHU~8*5I3r(Qo(|a5>|dx>%Yn`eRjSDg zHo*Eso^A5@o*inZGX4{y#NMl$<|TuLAFfl|v5uRYoA>#*)NRlQC|0s-pblt_{L5*e z#K;}5CmcCy*JCj^P3YGXm7IBx5eo)enbRR8r2B$-{~XIMhRmDeBel%crcqL^82d`4NVf#hip-$8=+NyTXj!GllQANI6W`D*JH+MNQbAl&at@AU<9&-( zMsZ}iHHXxlaj_f|gWVzV0v{eSNR5AN_E1+xI`Yo7ym>=iBdFs;y53g$);ydM#opM1s&&mLAS4{wA5;IBt6q(TEB?v(La~Q zQ|5j9LyD0su~ZoB#7p{J98M^4C#0u0?TS6(XJT@7YBU%5v!=Ob*gy~$T9w9mERz{E z_7+3!t!Z3Y09R&!e@0we)U9@U{J`!Ce$3Z`xSRYglWYc~#D8B1UZj{0&$T+{5|+{Q zSXzE)qaks(Gf#0D({uT;`O>`S;mh$yV?8T4ya5?_ghFs@x~xe4Y`)5_{Oo3yX;tlr zeOP9~Qy4V#(p)}@40#PTvj2v>J!hyrrM&%Q*ZPe*=4VIZpzR; zEjImBA4Yuz(VT<0XQQ-lt*AR)KZ1QBeu-+pu1XMpTY2-Mvb}v>l5AOQ*8iS9zfEi7 z5Pg>N#1JFylaWU<$Q$37xbDPCAra!qSu=f}(7fB=kl&gU?H2ILto9fi52evTqk8g| zO#2L3PG9Pv1AR+N{Gz$Ukc20aMBGk%UpdkXU3kO5n;y1*K-E;35>K2OUosm%ejw!e zWrF_1Dj00RX&@|7o0^)etgQCn)@bGD=No}ZtHWG%Sj&&*7}&%Xo??nZq?L#4tY1iM zb0er0_AosbFa`s*P7DJ z^YFR17zemgyf=E+{4G8cd=@3C5~VqsS6=&r6Uid0Q8OgpJQX(s1_v<&9p=0X?}k47 z#X8F3ShQEd|2kOO^+V<5m#SGyz-n`cC}X4Nl5 zAk!*QhvsYHdS5IWe-KisDgN$sS2%9e9(CZ;+jrUDS0^@i(|=SPW<5nO!V91O66?$t zG($$eKt(IY8M!e1^C5N1xLh{f(sv&#Q?~~={YlBoYyQjWJD=>aSWMKhPJvqXSjV4ZZvUT2+2A~?#%45&d+*-OBxcUSf>JEOtq|nTPeY$_M336#YBfjA zk)*5*o^;>Q4V-G;4Vf-^mx8aC-1s(C^;>fRT`>xU-@21x@XBG9?VXc$_6%y;TC_Ep z7im7Ok5gszR2DpteBzK-K5XJU$kQ36VH{A%`dSYBnlzjsSlf!Y#eam|jZEKyb7X9G ze5)aks9`oTeXSRRApR0=6izoOFZPirP1QVTk|sPHYQR5|&@qfz=fL@p9gwXzBG?!L zC0H7gp#l{Y0eGgIp3a;$hB=o@R@F=-#g5{V16I{UvHchkvY|WNpP$}t>51!EWgPvH z6C)Ah5EB7+ik=F6;m>vQYlq$bmFoP}`>`?}xZ2vVZkwOe_}VQXFnZ~Ge}n$ov86|_ zRWr)$2&Z^JYEou<){sOse$xPMp3h%OkAwqR0rLFv@^aXIZdgb{ZkN3ZNrg)1^=ox? zP%EGW&1E7GtyfK)&{VX_!t2wgepxv;d$8ml&afmb|5z36ig~tsEMja!GF~ zbxvb;8*#x8f&*e5fxr;K`LgYBwSE7`i@d7~?G{uZ*o z9E(3~+oQ;XDrSXkZ-;uS@H4_Uo~sd_m1&MBNkg4K%9OYq>`$l7s|+&6S}SO>E&28@ z7-KPssExe4@%H?Qrgwy^iU|^2P$PPR7WVs{ytGW-_D`2N2|9uBw4B9=h9jJStTo!b zKBm_*XGIvOMOfiacTL*_d>DBtq&OpI2%RKTs!ji^kbRJ2agUFp`~7!!Ap&a|58RcrEm}r zmmjeHb3xE?ph;-~^;Q?i{NTj7!eiczTA-3qVAc^EEEDo5Nk`qp05?NWhJ(gen z&}f=<+(cVdCq?XY*-H5N5FI^-4H{w2nbQ7kza{rBV}6^Pq{bgG1A2sl4ytgBqKjN- zJZiD%^YaphJhI|D?U%UB1Qv+0&RceJR#ZnKwMg-*MJNFve=@8pdnK%4As=N}+B@R8 zMvTp?#h2mS9XXq1d3A&CjajGE1VxF$4h5bSqf)nd=#^7 zb;7>)g!y}f+int<*td*bP6u>%2bF!MymI%=GtYhgN!x_l{CJ0UC4k2K`a(EJI@H^d z6R(3NVg>)+y)S1W+F|tCs3D=bj+bQVDmNu{Bll`)^?x97?Rv7pDvI{7|0ZP<|6EVOB0d!^V&2%a95+>y?eLu_k@yugA>(^u@_Yb zZ1ZSA!}9B5_nY{y5%Le^sw1kljO4Ly2HBeB5}(yBG#;3f^B1TLm^`3csU{4}arQln z$x^Amd{}6|s9>_s_mjW9ciP%=5+xLimU<22K>*j_kC_ve2lTxwQiIYe7zkW!?ke0GIE!=xLzn7=)INChi zLyAgExR!y_-GXt5Jdz@eXNvK}=C-a4W$H}m)Ec5O;j1efo_tA0V{4U|g7b5!mXd?) z=51mpiV`g&oY3GQRo~p!;GJS}MLT=85^PN>j9WrDI1F(^R{d6d=7JeK>AMvf$_`TR zR-9rJ_EkgA(#r{^`&qn4m=9mLdJGjD5c&;D6m$Ez@w>K6Jz9o!dl<=qwMD2b-HjmW z8l(=oR^32uF-y{$eSLkmZ{PmZ=Hu1uzMVs~=@SJ!v4FqVh%yTrk|RhUc2(wt@RQ)F z2yG)Z9D~Kx*^$FyW5Q4p%QGONBAON^;(zXhwl+!zHtlV@>Lt54*8{ea?M(B2uj47b z%Q{J;c*{fT6-~Lor=LbPnM6pd3Rcynnq}RwO?A0g_zvYpP88y4;-!UYV$9d8OD(@| zEgpZ52q%z&EAY(QFw*(e_f}_&AM5gw>GZuS8aP^h-p;N;Lyvx)a}nS-Mb2DAPTAb5 z812Py7LzgOO+T7-fIh`aFC*ei<5tZf3UjVMb9>jA&x}f%vQDWE|JVrWk5b`sWtSJF zX`C%Hv2P)VWc^e2qLUDN`j)zo3OXrsxV898oR!c=^*OTNeOI0MPzCqfAG5Vv7`LMC z1t{FSsUWT@?nEH8W+vc^rpzoJ$1`UuQ#~o`odrlP|K`P4ns{_R{%yezVoeXeC5rK6 za2ZeF@3`<%$Ft0cJf-}K9wL+T87RVNg_G&6tevU#(F(Z^vz5U5k#=(8+TJ$Me&NiE z6M`^ihW_MJg{jo$c(z)hYL=#P3L;rV?lD7<@{ut&r(5rjWss7}&TwjufL_IfS zZ2mDiCXYCbxvmrAb)=(rV!Volf=S+G_zcx$F@8_z+GTo3sW1pwYA5>~RHD%AtIWH@ zN6ovE$p*5lzm7y(D8d)YKfXf1V4{JRd`Dd)O2b&i zgH^dn>S37FLl4%)&1Mdp5M^D9Aj9ljybLy34r&JKc%r%m%))&0-HrUq_e(=mU(y#B zmBpz^1T)I!@^iy3!O<>C-k=rm;eVid(we!K;J~x(&fFnrr0dP?lLHPru@pjyDJcrs zl77E0Yf~jIzUPXcZzTQRUr0Uwh3HVW{_PNb5DJ@e=NlV85!h;MK|muW)($` zNBiOCow!1!R+9yGtX#ow=9w4Dh`rZzzj<>c-^QVg++~q^45MV3^$;L2(YPrqV11 z5sN0x@`LxTkxtc&vc5G$Be-<_zqXuFCmTk4D@H@V*ApyS@FkhoI8qc^RxypE)H?AR z64_|cHwsy*p`J5S=oE_ZVt4$pP_tj4?}%c^80<8Lay8+R=(aF#olA~|&yldhT;lis z@k?IS^ZqwhERs`?L%TKZ-jMltjePgUd*YTOuRhzD#C^QPEaJ{B9CIhu;qAZ?W##mA zcbbW$MLOf{$pIys0*Sd;YP^UHv_+`8kV>ZJEoPl-=6Etj!F$`rRE2p`Qi=>bq~UC0 zm{fHH{*cG?MJnWS-aHlaG4X7XAY1h6B_dBWfKkfTl`n*z?{hUQS)v{j2eT$8XY295 zdWqCN6{^u7E=DSs@xK7q9v$LheBK<*AQN(;G;DCP-uY65*a|A*Dq7NOu8js}@15S9 zN;av4GeI{Cr_7Zs9mQS?^Xm^9p&Y~)zqcogpLhDpw)S9I^kQnyvR&nD=QI&>;N(zv zB;fj*{9>E#QsPagknk7Bc|WSBqQem$i($Uj`z{~PJ1Jih;?z;bxUmopk!weUx3OI0 zLC9=95iT5%!gY5&{Dzab{w$c2%f`Ojp_--pGMr$=Y9REEvYAKUo6s{s-32*cl9LVD z`t-b223&jWn7JPhX^|)drI$rRj@j)o?>wGHKY!ett5F>C{(@HOp$hB7=7lozpi4OP zdSVaXwEZC!q&V|^mHz0(IOPTPeAc27V6bN{!UijZ;QEz;wjSH__o=`3Iz%_2vCLO>=b6U$ctruwr3h4UUZ!fRS0t-n^+Cvi&TM^pGNVouRQx{gd_g zC99T>qXQd0l09I9IEm70M`!fqs*~BCz7;TZXD3OU{>Y5TOMa+Jv*IE4tS#)QfIf)T zt8AMp^R!zrb2n8MM2{LcZR}OUk8P6LK4VbXCq0rB^b#Qa`XtQtqFXP^ao4@UOf_!Y z_It0_z8UXA78`rXLBHwtlVPu9(mTmfv{JX|NMkiMq$oP*2^h(pFS8j-L{wA69(o$Q z;_xEK5Wyqn_ilTY9wovaLmkqRu)v+v?&r6$k+*;G&gYTb)>Yh%v;7eGb$OE$?T?tK z%qQVq5)pzv=(3%cm7hOvYW{Wl9%1Q+;1q&1&aZB4?oS|w`@fhICF_^qXVw#r`;tN^ zE-~>ISS(BN_+OoAr+xu6SEFaYAs{2|L5&Daw~AO@{0~g8?8ljBn-fZy);87VJvX5k zgKDd9eeOuMo_7si?yJzquu+FUj1k_ou8~$2NXAvY!M+B6xT=C^%so-xP%~18>rt68 ziXK9M|Ik6j;KXdQ#bmO@pp*1y@gVH1K5)uTFv+}*WnjCJQzxe&rT@ELJ=#fZkB`f&EKCoQx3fxy}PYZ_Z@eA;de6!>>di)t8iCQ z)tInXQ5RfS!gNVtjYH8z{>yDcRu%XMUdy(Qr}DRq&(fn*ePhawowqzoRrmc3JReq^Yb*|b_~`MP0xfBj<}xgTI0!W#jWVNDuq)S#NvDVfE)b)ry>QEm zV`5Kw>*u?%X_J_-%fLR|wlIEQ{zTGa>p)6P;`laV56igOeHC$Sy$+WDcWwFL+Fq&y ze2{ADF9J2;o2nH>`axHpkyv>q(QkM>-qPGLpS-%vQUo(}y zPrHkQQtz9h(pa$Fm}#={%3S!oc}?POi(llF?!+Fc!WRQ61s13W;=Ma3Eg}!<=Aby! z9ff7xqWP#TYa)rx zzrw`Cp?-S&ge2j*5EMuu!>_prf63~ImpJMr6903yzVnF$QQmHU1xS9+5+M8y5Y5c3 ztbBzS)IUmMhVm9=8DwP0B`-@Ua}s)Unqrp=U@Wr!l3?Jt_(lUodF!nm6#5<>&$&8N zxxmx26ycli?@}11SPRWL^?XEXCXCjp%CjS^@1l`gOvE{|tV<^i`dx6!J2T$fF#o7# zu5xxURK-)4nVl{v9o`HwpNwWr6&sWYp+uuE)2p#&^|e3t=VNpI@!DCM(#palvMZ=D zDIBh4A4;pS4mNybxL=j2_J!BNoEaW4gug?fD5RY?llWZSADNr^AT)&`Ydx|}z z-H(XsJUzYKm|9I<^TE>j<;;4#)EJSPxJ*2?M4FH@H3--K3lLSu1C}?3|MS-`GAVy? z$e`Z}GMp@aZVVN1Z3m`gn+~W5`N;`jy-sD>tP+~wUg*!Y#Nx>avWyGT$bI9m@_6P? zgM$@S7}D$Vv3EAlCN(%kytjknb2>%ai>@=h^K69z=zG1s*5$9(KQXJ%C*;$|NFR~s z+1Gh$T}>K=cXgra8zCfXPkvUWcQ_S4ZmTyk9o04(-@!0N@ln5_Hm&vB)W%;@dSX?M z9rBogA_oBqDT28+C*PR4&Mhov(Vr58lp+h8Ye^{zHU<21u`4xCZt9dYAKjlvL0kkd zLZ7vaKiWuAn<&xchAQC!h)QeyTU5&dG+5?RzBQ8^qrJu)e&X&1;pG4)dBZ1A1+l}V}se+TM3_Hu>cx=oDed8 zn?q0#Nyx|;{xvT3pJMXAT^2gxg{bN7mgQ>M;}djR9Bg!5M=+mxGwxW_TTPeNaTw`! zsPCuTz(CD#=4zt^*^F3_-_kySj-MFXu&DC&>-5yC3>^%yvoi zL+_q0&yvST#PPFdQWjaNt(;>b^E0>RCER~;Eq>)bapJwJ#_KuN178G}2k-EY@bP|tPPJzK&g{cbIt`CVQN z4yp^aKlITtG#rM)3}ozogN8aV>mli_LiK@A95eAhx4t+%7)N|}RaMpE;ai9oAmLgb zMlM*l@7&>aU6Y?KHy$4MKjEkL{))VJ(niAS1%Dv}HpBLI_PF41^Gq_faLN5hIPE;9 zEKeloRRXfE5%M&ZSM|WUD;733+ughL?CjWH_%zviGe`vGb()p6lTI^s9{u$mYfr+f zn!J{$-i9ZQC>WoaEc#gUF4(NO&Ne5{UZ^;jm?$oL$xJ7|D8gd&U&*Nwz7y2@Fe_SO zuitAf$Nl5WgsG*Vh6kdXrOF@vwv7$Bva}qvc%6QiPkD&`$@_~c0(qUtkNM+xfDXtj zg6h^6L&9%0NPv)?7xkFG?lAoiD*gw8-lK%#;=2fEsrIX%N(wJYyI_Y^XkubQ(|8*B z$&W>k)(gAq?^)HFA4=`2ud6LNjl6rOdxcmskr*EQ_?Z^_;HUP`yHKrHd_F9H zY0eIs%ECA@PN$=6#}J%}>v3UE0n$J?nj_V$XX9ULUL01=FqMqMzm@5&&UScPPOOV}s!Upq|SiPQFQ)t+pD~E%SpvzV$DSVsnuo;YGmtLY`-CKAN1o7cQ5V zJL~^N6{h^|7B8gW(tQKnr$>JpdJyp)w*CHhqRNO9MGKKZY?&=T-=c!4ba&0Tq|&q} zc02W+ot>=m+bakT3OT#JlM0c2NeeZTl;WZVY06%#pYsFy?nISe>wszY+Q8K@$ANYr z3qMN9l6@M304Ua3oCuqZ2jC>5bf&sIQx}58Kbmf1F|~!Ec$H{dTq7>9C)AdmB^c# z(tq#o<{CD*d+bHKOOw|ni83)UA$HtLjayRS<7WU$qRyiA&zuksd|U5pasUztIN=ef zb$)^<|JLV+$cy8h0>fvfYdC}+Fk_l~ z_;CAr%%Q;|7YP|PcaWw~m!cRVE@!00I9D}X>1=l@=(y1>^osjZq%6I7Tef-HQ=n35 zOO)V|Fg&BdgTJ*6R1e0d1P; z7NcqAr8B37Zl;8|yI^wp{FlvLcJ}S*GKwrQuj#0k>!(oODyQ)Tl^Hg;ZRe-DUG6tn zR_(2K-23O1b?emp!0mie#)lssn7 zkV9pM_9at8&>y6JzSoo8xY~{V-s?bzMk=nWCBb=Cy)Mj!HFRcI<)!R=dDAiiogBiu zQl4cl`DgD5sm3r zNw`tY)itZt!|!m7^8k6bYS8QIH}I2Yqgd%Ia8^fqy_j4&}pwW3KaxypVskq@+=~ zUoCN9B&9moF#^#8iUj*Fs#t!YW4Gwig9`}{u>#&7N0sqkp2r5#J9X0yq#Bo1*#gz9 z=pm~$M79N&b5)N==%LcI-LKh96SGZMf=rBX?%f;OHz~}L$}(1Q^7L2>*U0^vW;uT) zd=E9UH}?y%u7-)8qJaUq#EajZ`<@43PpVMDk?-FJq!=wQA**EYbH z(e4W*Z5Z!+)rgIrLo7EX; z8Bfb>H*LMO?0-oHzK;BGQ^0?q@!!@OodEhD^be0qbXCTWUz6w5a|VGv9h>pE@FTK@Fd;*8bYDC%jAl$^OxLc#nC#(~_dTJ{jOsIZ!dr zIymCA+uK2MbDl@7tLt53hTJ{G{wq=UM{f54Eom(CgB@+uVEOfyBVtFspkYi^lJb!Y z0;z@2@zlZ7Q5zzO7v4J|SITZ~6Z3ya=_kwu{NslKAOQt4$AI{xwr(CAYTrW{AW6un zs^UG{95Fxq+Ev7*^q&~W12GWK3@VQ`2!M=$4P$e+zJfs`MND7yu9&A8fXd?|>F4tF zjQ!INV-bh-cqi*VPk63tJ@hz_ogGt6u#8)NBo8dN$J7D5BlhPxY6#Tj;Z9f4SV%Vr z*>{IQD|gdUE-cloSKfn1Uf=JPDamrIlP}R%m%!oJq5bZKIT_Em^u0Yt8tp^xJ?KZ7 zTaNZUJyxPA_fED86Y@p@`v!*k!Sn0mneBxJZukNST=aimKpOD{To7ynzvC%i&;-u( zoVOJ}nY9$AjMGj{=38}nE%xIq`k+1L#;f%Ds2@wec+zn{3cmPYvXE}58t3AhZGnX4 zD@okH4(mKXH-VFw*%=i%U_$Ls5gQfe-&8rva-%jN11BBUhK%5PTuQZ(M(q0 zxs@+oSwF{RCV2EFaZJ=(34XXhb90w2DoBL3y1@NH z3{r@R*F0OcKj)%T2s(TX!Iy-&i|ed;1h3tf56q`*tgL3M-C;h*ZQAvis5cOTq`tpD zKcMtKTNOY4qGe@mo&VpuTSS@-h!p6sF^jlh+5Q)74`}=Z5bZmxqZe_pTc>a>jWEPJ z(39ixS`G5)1f(gI(hqhfVY6K=D@i@`a+30+!n1!paOLlRl$8DHpis(ss`xR2?+0uR z?|6G23tl__v!LpVj{er?{Bx^3wyG@5Nl{Nn!sd}zQH+iZF)FdnV5yEumAO!nfZrzTO7pCe;VwNOD!t;iPtdfPD!cu*aa(R3J6rR z3=AP4*o-K%%G)pOVB7~IZ+ezYmm4TIaUL!oR1HwClXRS+U0)n8lpN&WNKQ^hoHpW< zLfMi4^tjAgf!O;*|J0&XlUHa7p0pAYSqOHC$By=&%ay-wP{JI6HuPkLX@_=d%B=zF zg;+WKrH|pbh98QC=y$enB%i-_RAwpB<&^T(8ZM-0aWVRPBPU#@gT_CQNX_cgJDJv# zd3*WX&-L7Yo|38?y!qa}(nvVDIcglVqn?$Z*u0`M?$f*+z?zS>2oMIsTBvMaUO61tpI0boA(GkX}kl^WMEf z3N&eZvHI&gaksYAupw@7anb6PZ!h74O1r6Is}BdKmmt|W)FI;Y+vKhaqOP$oZwlu$ z1VzLV(mz*QQuY!khzCJ9$8?>Y_D~GOC!T{y{P(36AvLLM0TLq@OYqQ%4~Mn%Q3*}Sy0ubrQsRz5bg|tBEN8}O%$O$Jte-YS!p2|A z5BeJxsuom+2&DLlN_7TD+~KDCoQ!nZJr-(@tR3OO0o;DJ*@GQGe8?}Ki?j-DV#+~E zMc}#GZ*OtfLuKjkYFPrTdAiK-!QVgE`b}=u3(HFR_YYcs;UU&fAu%CFjXk(RKzI1J zPp?IR05MM((+4?Ji&Wq#0m2RwVQHpSpbS`i)Tr#6xF3TIEuZR2!bUSJqbPhq6jRnu z&euf$Ube&cu~`|JWxO!GE4YD@Zj4;1_5(2s7uO3nu3{hDQDx zVSwPcQ6&fQkt?hZyzC8*(8$p%Bj!jUUba&SoNRK;i|fH;CZbl$p^y@zn3(Kx5}GVY z6`G4ZIJP5HBAjaiju+&Gu&^*B_x<_gy}doEoBfo&le}0Ly?>;#puM7UghCNh{w1*eItSNpwwfcA~gwfJL)T&y1~bY z@Sx$xQU%PE;a69VGXA!Y3Ps?EYiGNG*#vyx33b6=>sYs6LHL~ z*#1cQ+*uAC4kw|;rLUgM%|BIaI8x+E{B?Bb2fc)8#TM>6IWhZPLUY1N?BNtWSPp%r zqM{<}-MffRIQIcQ=pK4KpAdooflDFC*iivC$@NY#eHP~3p_tMZ8Z-D(S1t=aMYRSz zq#Ll(YWX?mzWeTUtbUCRguGYUsZ9Trzxywt6!F|S?7`of(lz~tU^&GKEt^3+LCozo z0DEt)z24kNinludIbLZX*J~kI-IA*HGlaXzPwW1@iiqI6rgy1*-HEN@c>D8rCRwr> zWv{xsdnvONp!w9ydaJilKceKH-{ddo|Ek#RaWzH9Jq}e8;?$%MORAk#^mw!Tc&#R4 zS)%DfW2(NLo!hma3gc4&^@nbSfk6(ck%<>TFNHz}zC6%;VX}l?F|)9v=5o9{6Mwqw z6A$6hh>L_>r}Qg8QjXQ*yA>T{h$b6)mA*tKCE?$nz&5fMQ=1{?}Ojw$PXA?LQb(0C~m zMRDrFYQBik=YHFh$Lx2p7quN9t-ms&@QX7<-y}8WAnq5Y6t2h4fhOZthS|UOG+C_QLvEFJT**0BeNP zlK4(PNrE7DL=T_@8U_Z6FJ6d(=_nKA;D+uKsI-pYWxo}xKql_Z^W^PwW+38pApRVJ zj&C!Z!U^&2FCEB$(9}A@<%Ua(-@+?W`mE&)` znx%QOSW!BbzSsY^y;oJ=_*Jy@s~xisz@o8v<`;N?no2>ACZMxhpCGEQ|LU#Ndo>!G zrY1=@x4kdZDOx~m_4E`3SiNgo+Y3UTDi3KMl(#>BxSueMS#&{rsOx;|9o@=&Vv543!TOVJlxd*sClew+y{ZpvysQ*LG=p{O#Ub3`S0HS z3HPQHbPUy_|8|hQZ#^-{XH?Jn+}dRN_LIsg`nPGZG#Izy`n`M)`;`mi0}tGT7LrK= z$Lr1d_m}Gj%Vw|v+WXw#Y!S5Jv^0@%&2b|L2_|${oAP;&cXoDIIXLoQ1P zQ80QXYzDj}}bQv4Fo6wB%(SRR!eHN=TLZBRIPgQ6J4L1pPf&Td!HKQ_>%zo^pBRFsy7N zP1c(&JE(@9;x3)(>rX=|%uBJd@3izZOr2F;LxT`_!eA6E>h)u>xR3LG-g?Zyy0*5q zx~8TqxHD-Ls=9Aa+|M<{bAc^kL~+Go-257t_=r*;WotTw;*X&Iqi9DKehM&N?hK zAMx|Q*RA*swa-j>i_mbh474+)r7z)@Lt6+R1mcca&79|D8vTT1SdY1)k`fymTNf-V z1H~EEF{<=9invrf>A1P$5g8r^+X}JdeL(UH9aw+lq5sbdF%bk%)b=~5kr9KeXIo=^ z>qmzmRTKuIKn2aEe#}+nokAQ&-&wxAT91&8G7EU({7i7S-c^)~M6Yzz^T@k?bl_m3 zPr=D4hoS>sR<4<4{6*ZJcQ65gkBK1sp8F!YquN>lkAoO@vv6~SnuV(?FO*=w^nI0- zB&VPd{%5~|h{penPH4rKA0#~xwb`QYuTfD#hX1^^S%?c)ZXVPQZ%VE0WaD1-u&9IZ z@AY`=kvGQ@QUWGK4dr->N{`0ARS9r7?W&FpG~A;xH?ImDPsKPA^9Wuv!v8O_^K9#y zq7*$u29YB;Ir~;!5T8ItCqRZ zJ!4}kK%w9r{0b{>Uy6wI_4U#FZfsE@q%j+oJSh%3{z?Lhvl+$m_A8_)wf~(h;)6Ye zu;>bx{jiV36hXhD4!Cl~6WgoQPOY19oK^4AQY*9h_R;ttBOw9f=$V@`4*3SmLz5n< zs?N7_yW@k(0wSnBe=Y(ALS`l<2LY$H~)I60>4;OSMO>yw*FM4BwR)YJ zs->@A1nZ!{R^39F{v8d_4DjGMwH_%zGRw;@{11KXLudRKZq$(N( z9~L8fJawn~guFv)M2Wl1V!aESEIpkWPeLH%&ovRE1*0ltpXR}VSO)O{{y|H= z^udu4Mlmsp)ZzRcCxyEJC>#JQ=diH^@CBLRk>3)AC_vzh*jz=E;;l(jBm&D0#v=!TV`y-OO= zFceDB6hQ(0@!`YQo#F5X$yQ}^ZJ&g(HdqTxhw-7e-o5)|t=-P)XANBRELd{_HZpl_ zv3TFU-GTMfH|q-5H&`4xgzFmYhMul+z;I_y;=OzK{+*c_i-?a%fule-GD3mqoe8C# zoOQW`bou8ExH;G@I6-7I1#ws@yiZI_jQ;%&`!5%YPJ%|)LOvgpf2`= zM#qxFDFSE>YT^lK32JOdjC=s387<9h-c4*Jie5B-A<;vp!@wC>fO1m<)UdE3Mv@%Drz_XJYHaMd2!JyTqmt#kL{MH_}_yi$EMl zW@moCV)L~1X6zq*ZoB5q@!Su`fy*fg7c_L!y-q+-0NlJH(g_)YH*dD1ED^Q!FY6t? z_Mb@qdVB_d=oSjLJOIN}4DOStMO!9QQ^TGekSQ<0YKgGS9vwJjmpx>Oq?a<@-4h4B zw16bLQyl3dg}Nl51hAbjqfZSVSmbuPJzr?a+;FYl;=ra0i)T*l@=!zf3GfuU$*U`u zy-|MxX_?)#*5!0jxUamoF{>BJTWC$+@Lt%rbr(Poh%siAL7>@%h=>Sb?Hh}RW`t8d1Y{rl zf)d)yP!+n?&Yl0-D={5 zB#@msGbUzmJ-y%aG|Lh}yXPH>+2dng03~lm2U%rB5#k5|ll;vDz~ER2{1_Z|%ru-R zgaGjPaqJT39=KzWv$zG`2dyfSZroU=aS|=o7Jd~?QilS`E@#y3Tu6vAu(c=77Zp%) zF#UI2t4KLk{wEdz&`e}EA^^6^biWHY(hUc(OXQWC%HFzQ!ZlMm;FoT4 zDzo#Z`Vl7wn=kS9=?28v0O_Usu3`g@M*n2&tLjE@Zw>A_!y+0ZU7Qf6G>i>*TD=aY z-NChB_Rz9@*k1Cv2ZC2U?|Hv^GEZv=ga>HJnWO~?0}`Q`9*>?)urH`A?V_iOD6JKWaJ z8eZ79SQsxI#R@9O;Z+$De{Mefa|A0d44R8pi3@Dckah?*!{VWSrqeP!s(D9s`j6`9 zb>$2C)*1q_UYR;NFB%9Y;8};E1dnx3s@=>k5w5LKiRn|ktBgXQ_XZ)ig*Ef=|2&~F zj-v7Wc2@H8Ixj^Gp9u~?8Tsj+gK=n2={W6+Dw-94ys}KKcThsW#(b!o%=^4Htjbo` zwQ36NF8tiJUBRZZmq!HAo^;Fg*nbda@c2DgDrgH^d&(CF9rkcse>@o5ULOE}}!_)k)>cU*`W04R2 z8WPVIH@!NXlJv~G)l_|`krxB53oINY>@;(b<)^VJaPGf&N6ag9qYrnLBxGJ>CFNY0>ar)c184iHGkpGAcpi+05~=VcN5LMpxL2qez`R)<7n#QI==# zFN%nlEcp5;kE>y)-|wh`r9~Wo8YuS&5owhU7-uYxniZ0ND*y0HTg8mlyG)B?JZIJ# zvebp`e<5qI^NE=N(^OqFytB_nJ3~SE?$Ud#_sM*MiD76dMGxG1{5W!ksE0po);w8()s}M+Lfio4N07j_AQp-2>(|1U455L@ zYlK|5tfr>H@!{R7oHMu@G0u%Bn%8!nM@#N=iz z@2;`mY2bRnb+_`0im2;u_x9V|(Ps0z+}o64dYy716F7{pGi&I3tRPcd%xOWD?}yS= zzTI<>K-*5zgvno5HYKmb`4O3^H`L-5qXI8Z0u>yPX`WHTUM20Du~{M8miHR4g)<1qt4C&eusiJKru7`! zOU>$a_**Eq>A)ncMFqv!P|8dR*=I$t@vo$q)2=_IrJE2a5T&;a%`$C2%hKB1f`)wb z=JZpoK+j}`ap99OEHV#Yx5d|2-7X^*aMq!6-+>6A(jPsVnq0F1w*O_nMHa=^6(17Zf0=talXKLILTq}jS zNRitG*%)CtP96Prm_g;J-|cva`7iwAmO zq6DLhFVdr_pl^x_pOk(scIZ}X>y;2A5gCBB+xB8V`yDM7(}EH)29Bo%g%%nh=m8MX zzP1@CT@omZ?wY2Jke#Y^_>DbIqL>$e>}g}9_R}AC?gByImf6X^SW+GVsu`g`-^SkE zn@RJAAQdA9|KLO;hGUZGv8n_9`A00l_qX-fP#O9)PN#vu&6*>%WnQ-R-^4)BPXyOa zSPo$q_+Ssb*X7k#Tmb)I@Ue08(mxq%v$>Rn;35udP_eY90_O1J#QGM<8V(qHF2kc< zD`;mASaADRj9BBcHq1d`W+1oO46qB8~S_&?5U{DTSLeC5vaDMViACCsokNKBI) zUt%QAi>@;W2@hdkEywK52v3h_On3RNm!Kg3^;v8M0_D=kD@yZwoEc7>~nF zICVoidWtO87cr{A&+zuCPt+4SF=#T&Pp!%si6n_PeD`8%_~6)) z{)xDK)aCCv6c=1%%!fdu?S95(;Jdbu_c^~NYmBZ$UCFm#@i47_AFcc{-ZCOyKh<&` ztqeFEYy{Z90_qI8ncqkghYQ>(=H;#u$I^h!A2-F~L2?q7OkmxKH2|jya2q>ZJty9@ zoHYAl_a9kTECf1 z9l-pgUAlO{m`Pao2hZSLq!LW!V`O7?U_Y#_Y-(pq7m(RRq59o@!>dfP*<=k`FkY+1O#5^=wifU8@g2;ncc?*f|}Lbu0OO_PpTwFnpXKOLxN#MHpE%wjj*blc?>44jEc(XFO3#%T*Q%cCb zBNxSG^YI?>778{WyQ*y-I+kyfu7M3d%y*Jdvjzb(w67(ib6Ro)}2wL^-BPP z(Xoz5C<<1w7<;O$5*iqz?KDm0>+~ghuJG#2>TG z(3cv|Nm^f*bMBMc6X#X)`ZC_}` z|BhT^UzcTZ)emfDFWMBhuknaeJT6EHKMs(aYxN&nFnw?94I~^XzT}8eafapOogJWr zu095?Ii9`WckkO5P|<2p8mC+`#m65{w}=s2Qb2L)fj0GbF*}xrsr6`4_jxw8f&lr% z;Lnmn_Am06MH{}Ngj9le&o-zy^^EQWr!v*`n+=l5T;hfH zU0uaqNaY+WKSU=%Udy;}=~4l@bJpA${^Hpj4~$Hh24qRBEtveQk)Go7BC40r7z`D7 zffcPpAtCz^2!9c9Nqq%r`4A)tC>;BP!9I3KSG8je$TXM=VduAUY8YMSXmN2d!7Ydj zP}c{z%rBGK$m0ujmdMBB0@LU4NqvoTF1-ex5xp+RUOx2g{|ol){{D6Dj2sr)m%X%* z?|AI&hx9R;dWR`}3%0C*ZF&hHQsU3Si3tKn6Q=(A=ug0Hkub&TF;|1Mqui*`xe2^Z zMJSNaqN-eT!-kh!HiD{dQj2~1xM6^hzV1a>pZ>!$2PMqR;kDi;%)G?6hDj`#xl2zk z58kEjPJ^2!Y!Y>3!s?3RH}Q@Dwn~K_P}HYyHyI$B37BIX+MshsdtrpzRV>D7bNCz( zVmto0oh5i%6;v;s-gTet8ntHJ>{gwUO93eoZ(O+$F<;j3^S#@7 z$jC?sRv@Yd#%&SYL!!?>m2RgO>fTo))v`RJ7E&nc;vMJoW~H#}hux&Xk**sgE`vM? zIFe(jyt5ft`$V;4pC>ar;CJExw6nm=sot~s^3)OZ)r{x030cRtHn_vS<-(VSIsbN{8+ot$Uc9^VM=c+}4Y#frwF*)N~ZUrCV59(l(?1iD}G^P?Kc90LLK7 z!(hFKK;14$A#Y}(1)<*-&wO5_?2xvCpI;^+Z>9noA+?`Yi9Q8@vITH)Z%_iJg)l#aPkMXZAm;1)+tp>%T8daHs1BnN6U*>O zp;fjwv$C=dz7q+_{{`1Vjxeb5SQE|7Uk4r}!ALh1N3(?!@-b-I!cPpSLH|f$_~Q8z z&m{R}3_pKUpTjSc1RY&!)mtpE$gI(bt)lfXy%M6~&X6bdLI=2O@7~vlJqVK^r3{}S zd!p~?rBx<%-%>QjN1T7Y7&HbrXa2J}1i70gP}$A+(o~+1#hopghzEj!+tw~>$tVEm z*(t%pVfr=qB+?g-Xi+T-5jK1=SI}u#jVnm*L(gI0^X2q5cqRC#rM=B%%^+|n?p(HZ zZ8VlsxRBk`lf1f0C$Y|YkveJbvIq}$u_eha+$Ei$1TWMR%Nhy9-4Z!Cr+494qA(=U z{1FDyy#6&&?!&G8SwJk%3>PRNpr&yEb=nf0!#D-tiGB?NytpsT zv|;PAeMf&ccHn&tdj|BSEc;sC=P`)4d*bnSj?7aWO%69N?Rm(|>{`9s^Le?1S3}n? zdDrd>>74@Bc+|uEFymxy@hxk-;stZKmPavHtXyfG@}xsS7$B_E)^QkPA32BDgYnU6ob|l0y~{5CHbK&;Xze?MB|c ziXI@Ulk@tPP zCA`S17KCe%dC|9rF$MsNwB#oXFCCqE_Sr+rF9@_%dYJl+FuJ1E)~y{g?0^_- z3JUIW;6nIpo_0K`x$;7n0i!&f$7Uvgm05Jz4F$n6U;2Bh`~xy*Y--}39ROPKNKeJg z5dKw`eQd)c0xTbZG+XXGy|AXv73eo&s5 z%e_1<)$@p|j|SD)*w`F==3?C=cl~_}*Pw{Hw?}FXGM#Z*zSxBs9yx&#>q8T@F%Tv` z{rh@+5!kAFe?b9P0xH^i5G{8)Bo-8eFD`=t^=EZ9gY&hQKio_<>*RU{&R9b@nwT{T zWW+FqPE@+|xxFleN4_ZKUET=?TDYbr`NpSNSF-odlUy*cx1qtd_Sr?kvtTBc+Y70< ztRU$g0!I3O^m=%;BS9K}S(n->uJ-=rKrs&yL3QX^GX{j$3#{4;?MgRg)^PbQG^DA#>t#y06-&s3R@Tm8cS8e@IJaR7a9jkq0wyDQ) zu~&PFh)iyczV@81Cl*)|!pP=GAs&GLTr;bmU^FLWc9)m8&0izzRdivg>N>X!yIKU$ z=&|_Ny5c6^p!G(=Mil#23l1EazRNT+pH44+@DkpKtL3&f3#eLhr>3BDcQ&&^8Ne}R05A;m{H1i3`;I*01euh-u$?%NWdsb7-yE~ts zJO$Zo6go_SVtbL?;iIf01D?V;#6 zIF5QcJpnuqCXy*(QCT^PT>t zLhB_9J|`=PoJZ~7%QLfhFUy)ufy%@iJXaR_)v^El^(`a|A4IO?FArM9=dVnTD{nk= zKDb>7zng^sZv^JeoEf={>AD}BJ%#H;9!kc$M5^>m1e6vTkPk2jo@3XSp#_Bp_yRcI zcPmW*c49nsd=HM0oa2v~ymhe91%bJKwzu-#0}tz39p}uR9ceT=Z_clqdmz=^ zwsU7Pa!tS>@jq)GUA4(OD_pe}92IF3=>zaZj0XTfG<^U*WAT9o{S{CKbCFftOMn|-B^gJ9yoIhBwgdvR5V<42551rwoZ105$yc5bKeBZrd3$!-St#BjcS;d z$nky0QuDd{b2ZeEjs1YyoR2J+Jd7}vAGi28M+1EXpJfbwyf(+FF;fG)bZEBq_!$vY@`1x z6abYS!c=!=_?;FL*(}A^0KCwMo!>Khaud$Ec((w&Q-{>Kd@dPh6)c?@01Sww<{l#ebsbiRH zTEA6_%@EI%)qn0LjR;zeM8AloKzhoU{rr;o2S2~__ZTZDdj zNzRd1Fh_hF;xZ~gi1Y4YjsX8r_B^ZqmoheCk<=_nTE%mg~GcJa=rDM74Ih@2K7ig!-Y>JogB7#FV? z`6IIz`;EvEhfkQA>>eT~NTfqh%W@ieL)bgqO4W4AKeWIc8w$F?SQBf4bay6zkpKji z7{UhBH-5u8<5Q6R7R?2(`}FY1*U9^2o_?J|AN0qeq5~ZBE<^DvHioX#ZmYzyLL?vZ zFd51qHd{VDWO4L9mwc>&d`7s&SP{bQgO4p{nfQ$=Ao^o=K>knBc70lft~v6asM`3j za6)JzHbW%fcey596AdmM>g)+A-Vm0x`?IUo)R@KgYD{@e#G*_Eh?m3Lg# zV5GbrV+4p2xpWK>bEVX4jgGVmCA1z;29FSFQ#6ULlTK~)u_hvcw6FC5mee+z*g9y1 zaTd_&;--JlvLVjn+gU1nln%Q{5-5}Z zV1f)f(r`7a${xbnvnfDeIG~>{a2vagjl*g>Ea4pq?;5(-xsFv} z1>tUAC(AE+n1!V|(i5H2NC_H*W>$FXr?qxOd;npVy*|r(DE(;nm?cieq+V_~I}xOt z)i0E2kX#AW%iP84aTLSBLocg5vi&cF;?O#8Ml7r}KGJ(=?vPO8RlTBB9q}6{WCh&T zWjg``B}3Zx;qZBsP@`AMTS@}JY9^w}Za_m?+0?<8(CXjDg8n-$i* zu-`~GeP(x0c<&@5YG%ie3^Ztjp{%) zV20`DWK|9XFPO=dWjB0%e05tYBq@&a%3PI`svbs*!tZY3q1 zp^h@T@haVk<0C>`X@&j8Z-B9Q9n*gc1vCsoy+y%9wxSK?%u=;-LVLkAuhGNE#ZaEIvw z^%5Y~Tv4^lQ1Vvf`1v8BiohSoIjjz-EFQxNG$cH3`}B}h2NG~T;G}`Ar-L+d(m>FI zYRfnHpmb@tzbz>L@HVsl2^1+dj6`+Bi?3&Rbx9=o4;Ch+j&W`1F3?}Ch+`ELpQpzH z-><_9+VS*pVx0o>Qnd}5=p5(yHQ4fg(*`?L^p-?n*HNA?_CTG0FuV8cDYx~538&k( zXD?Q?6`&S}WkwT@nu{8*YBu`wgyct_>QuZDNnOONt%bL7~9MYoW8KioDd<3@6_`{&Yel*nLN;Cf8|?E3NrU!yNw z+5@x1>SVR1faYz1sgJT74ov6HGVb&}v1M`=MWX2aTD!2*aTlh;v!zdER<7J;40i_| zp}CbGpU!e#D0rfa1B#o(COS~ETTx06`Jf)b+S(e$ppvWR)d&qh5DED!%3YxoNv-~-pX4h~0s68gzIb1D|>)YC8H z)!kF8cAzq&XX@6xWMRR?KFEcaLXG;oHv5xb;N){hQqoy7$(Su1ms-@bmTN?N`*!Gv zbf!0i=0KCUH@a>)6fsq2g9D=$P(dl&LdmINi1XQs33kUCZe%(U3ZNj26SAJU(_>v< z*|S7gzwWX+CHX=+9&zT>AC`}C`<%~ms6Ct_v1v%eM`!0wd#Bin*7ir{XC@Y;riuz> z?Ed@dIHO!SZPFppA6jitBVaPP&#Mu`w=I`Drl1~j(VJzHEWFg>_;?s7D zt3Vd2W^WM6HJZ%V5Lpk9g)~%ptLH%}K7{Wr*u0dqAZ&-jKW!?_72N>TmW5aCnK$ta z9RW0@NHL2Eb0H{w+0#@E5ysdnGJN9UYLD|qv20=uTTon|aN@^pMU6THTM&qP?Hw#d z8WhWw_66X;jyRU%gp%18N*% zRDi$V-PV(YToUzU^@v~Xx|J{o)5sI97yHE(lz0426VjW6pgv!3uDE)mc^^V(Bs)p< zP0dMha1J%eq(A{GlW{PKtXVJkwPQ3WO|x(do~8Mg_Wam zL7*3@?;q(~&^)ffq11_3{CO5QBK37q>l`rms| zY5~pCjv$-KtPz?~%Z@^;s;Vg6Hv_L!*WuQSEWyt~IkiGz=N&!2QTb)?rI{Lc0|s)K zq9%@61ODw>t;)%z?Due5!ZxmA*A-Snb7xgoG^P{rf-KdP(t7hc-r>*UwB4g9=r34pT_H z3ZBey@%}&Aulm^i->pG{Q6 z`O2hl!7I=2%Ok+(|wtJ2Ns#65vPc$63ke{gr zlUs1P&7C<5$PhkJZu(o#V|;Y5#ycnQs*ou{`W^5H$-_CIlBn_%2e?Xy0f z?F?p|M3qn-N_`Y-t=zwtAY_-bDvfItNMLVqOyie>z_f*Hs}%jlym9PrY}g?79L#mf zfnyVDzN%=bfiZx>Q*j7DfW^B}$Z=Lq4TWPZ zbzWlYC3xx)kxW|x&AI88jAJ2iqoNI0 zm$sm5qPx?jkua2k1z;s`KG(4KMRbH%dx`_5=Mf@e#l_}ZsURH$0|$wuu1Nhoj66~o zO#BpCyDv}q_}?G5MRP!?nbyzwehya-)*5EJYt5w7$a^mWAvE-}eB^neY$ML&^;p~$%6f?o&rQ!f3%#hB5 zXFvYbUDv-Cjt}tL4>^;kwLcq2PrqD(d^dx~d0@so!4G!|U*J_EKe;zg4Wu^;E*c{- z9v}$koT*btBE+Ej(<2BX?mtVMsKRBMxBtueXb))O@e zE(B^8Dxib+DekJzi~TYV`PX%%6aB#q-K$iRSF1uqz zuh&oAq9knB!<;^`A~-{tvU9~NH?!Y#;vYp1ZtG1(MKsz;Y_aCVli;7_x)UIW{dK}R z6Ki+!`)Ry{+5h{gW=ov@`)&uip-g@ce?+)#WUt1_uWHdrGXH(^|MRxr#yP`TJC>Xd SGD%_JXWM37%?BGzPyG+=(HEZp literal 0 HcmV?d00001 diff --git a/docs/leaderboards.md b/docs/leaderboards.md index bfa6f9bf..f55d2b55 100644 --- a/docs/leaderboards.md +++ b/docs/leaderboards.md @@ -11,18 +11,24 @@ We evaluated the following FMs on the 6 supported WSI-classification tasks. We r

-| Vision FM | pretraining | [BACH](datasets/bach.md) | [CRC](datasets/crc.md) | [MHIST](datasets/mhist.md) | [PCam](datasets/patch_camelyon.md) |[Camelyon16](datasets/camelyon16.md)| [PANDA](datasets/panda.md) | -|-----------------------------|-------------|--------- |-----------|-----------|----------|----------|----------| -| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | N/A | 0.410 | 0.617 | 0.501 | 0.728 | 0.532 | 0.350 | -| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | ImageNet | 0.695 | 0.935 | 0.831 | 0.849 | 0.759 | 0.678 | -| [Lunit - ViT-S16](https://github.com/lunit-io/benchmark-ssl-pathology/releases/) | TCGA | 0.801 | 0.934 | 0.768 | 0.895 | 0.890 | 0.753 | -| [Owkin (Phikon) - iBOT ViT-B16](https://huggingface.co/owkin/phikon) | TCGA | 0.725 | 0.935 | 0.777 | 0.915 | 0.916 | 0.771 | -| [UNI - DINOv2 ViT-L16](https://huggingface.co/MahmoodLab/UNI) | Mass-100k | 0.814 | 0.950 | **0.837** | **0.938**| **0.942**| **0.775**| -| [kaiko.ai - DINO ViT-S16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.797 | 0.943 | 0.828 | 0.893 | 0.915 | 0.770 | -| [kaiko.ai - DINO ViT-S8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.834 | 0.946 | 0.832 | 0.887 | 0.903 | 0.744 | -| [kaiko.ai - DINO ViT-B16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.810 | **0.960** | 0.826 | 0.898 | 0.889 | 0.753 | -| [kaiko.ai - DINO ViT-B8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.865 | 0.956 | 0.809 | 0.921 | 0.922 | 0.759 | -| [kaiko.ai - DINOv2 ViT-L14](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | **0.870**| 0.930 | 0.809 | 0.898 | 0.931 | 0.774 | +| Vision FM | pretraining | [BACH](datasets/bach.md) | [CRC](datasets/crc.md) | [MHIST](datasets/mhist.md) | [PCam](datasets/patch_camelyon.md) |[Camelyon16](datasets/camelyon16.md)| [PANDA](datasets/panda.md)| +|---------|-------------|--------- |-----------|-----------|----------|----------|----------| +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | N/A | 0.411|0.613|0.5|0.752|0.551|0.347| +| [DINO ViT-S16](https://arxiv.org/abs/2104.14294) | ImageNet | 0.675|0.936|0.827|0.861|0.751|0.676| +| [Lunit - ViT-S16](https://github.com/lunit-io/benchmark-ssl-pathology/releases/) | TCGA | 0.77|0.936|0.751|0.905|0.869|0.737| +| [Owkin (Phikon) - iBOT ViT-B16](https://huggingface.co/owkin/phikon) | TCGA | 0.715|0.942|0.766|0.925|0.879|0.784| +| [UNI - DINOv2 ViT-L16](https://huggingface.co/MahmoodLab/UNI) | Mass-100k | 0.797|0.95|0.835|0.939|0.933|0.774| +| [kaiko.ai - DINO ViT-S16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.8|0.949|0.831|0.902|0.897|0.77| +| [kaiko.ai - DINO ViT-S8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.825|0.948|0.826|0.887|0.879|0.741| +| [kaiko.ai - DINO ViT-B16](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.846|0.959|0.839|0.906|0.891|0.753| +| [kaiko.ai - DINO ViT-B8](https://github.com/kaiko-ai/towards_large_pathology_fms) | TCGA | 0.867|0.952|0.814|0.921|0.939|0.761| +| [kaiko.ai - DINOv2 ViT-L14](https://github.com/kaiko-ai/towards_large_pathology_fms)| TCGA | 0.862|0.935|0.822|0.907|0.941|0.769| + +
+ +![Screenshot](images/starplot.png) + +