diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 01998020..7b6cf4ed 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,30 +7,11 @@ on: branches: jobs: - security: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 - - name: Perform gitleaks checks - run: | - # Download and check - curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_linux_x64.tar.gz - curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_checksums.txt - shasum -a 256 --ignore-missing --quiet -c gitleaks_8.18.2_checksums.txt - if [ $? != 0 ]; then exit 1; fi - # Extract gitleaks - tar -zxvf gitleaks_8.18.2_linux_x64.tar.gz gitleaks - # Run gitleaks - ./gitleaks detect \ - --config .gitleaks.toml \ - --gitleaks-ignore-path .gitleaksignore \ - --no-git quality: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Setting up PDM uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 with: @@ -60,7 +41,7 @@ jobs: sudo add-apt-repository ppa:openslide/openslide sudo apt install -y openslide-tools - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: lfs: true - name: Setting up PDM @@ -74,3 +55,42 @@ jobs: python-versions: ${{ matrix.python-version }} - name: Executing unit tests run: nox -s test + docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - name: Setting up PDM + uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 + with: + python-version: "3.10" + architecture: x64 + - name: Setting up nox + uses: wntrblm/nox@5656fcedc31a1ea37d016e4d94d00185330cc528 # 2024.04.15 + with: + python-versions: "3.10" + - name: Configure Git Credentials + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + - name: Building docs + run: nox -s docs -- deploy --update-aliases dev + security: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - name: Perform gitleaks checks + run: | + # Download and check + curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_linux_x64.tar.gz + curl -LJO https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_checksums.txt + shasum -a 256 --ignore-missing --quiet -c gitleaks_8.18.2_checksums.txt + if [ $? != 0 ]; then exit 1; fi + # Extract gitleaks + tar -zxvf gitleaks_8.18.2_linux_x64.tar.gz gitleaks + # Run gitleaks + ./gitleaks detect \ + --config .gitleaks.toml \ + --gitleaks-ignore-path .gitleaksignore \ + --no-git diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 7805b489..e540ccd2 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -13,7 +13,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: fetch-depth: 0 - name: Setting up PDM diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b3214f57..07bf4275 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -2,6 +2,7 @@ name: Release on: + workflow_dispatch: push: tags: - "*" @@ -13,7 +14,7 @@ jobs: id-token: write contents: write steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Setting up PDM uses: pdm-project/setup-pdm@568ddd69406b30de1774ec0044b73ae06e716aa4 # v4 with: diff --git a/README.md b/README.md index d30f8ac0..4cbbdf5f 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,18 @@
- +
+ + +

_Oncology FM Evaluation Framework by kaiko.ai_ [![PyPI](https://img.shields.io/pypi/v/kaiko-eva.svg?logo=python)](https://pypi.python.org/pypi/kaiko-eva) -[![docs](https://img.shields.io/badge/docs-latest-green)](https://kaiko-ai.github.io/eva/latest) -[![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license) +[![docs](https://img.shields.io/badge/šŸ“š_docs-latest-green)](https://kaiko-ai.github.io/eva/latest) +[![license](https://img.shields.io/badge/āš–ļø_License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license)
+[![paper](http://img.shields.io/badge/OpenReview-MIDL_2024-B31B1B.svg)](https://openreview.net/forum?id=FNBQOPj18N¬eId=FNBQOPj18N)

Installation ā€¢ @@ -59,12 +63,12 @@ eva --version ## How To Use -_eva_ can be used directly from the terminal as a CLI tool as follows: +_`eva`_ can be used directly from the terminal as a CLI tool as follows: ```sh eva {fit,predict,predict_fit} --config url/or/path/to/the/config.yaml ``` -When used as a CLI tool, `_eva_` supports configuration files (`.yaml`) as an argument to define its functionality. +When used as a CLI tool, _`eva`_ supports configuration files (`.yaml`) as an argument to define its functionality. Native supported configs can be found at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs) directory of the repo. Apart from cloning the repo, you can download the latest config folder as `.zip` from your browser from [here](https://download-directory.github.io/?url=https://github.com/kaiko-ai/eva/tree/main/configs). Alternatively, @@ -98,7 +102,7 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate ## Benchmarks -In this section you will find model benchmarks which were generated with `_eva_`. +In this section you will find model benchmarks which were generated with _`eva`_. ### Table I: WSI patch-level benchmark @@ -129,15 +133,15 @@ over 5 runs, with an average standard deviation of Ā±0.003._
_References_: -1. _"Emerging properties in self-supervised vision transformersā€_ -2. _"Benchmarking self-supervised learning on diverse pathology datasetsā€_ -3. _"Scaling self-supervised learning for histopathology with masked image modelingā€_ -4. _"A General-Purpose Self-Supervised Model for Computational Pathologyā€_ -5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scaleā€_ +1. _"Emerging properties in self-supervised vision transformersā€_, [arXiv](https://arxiv.org/abs/2104.14294) +2. _"Benchmarking self-supervised learning on diverse pathology datasetsā€_, [arXiv](https://arxiv.org/abs/2212.04690) +3. _"Scaling self-supervised learning for histopathology with masked image modelingā€_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1) +4. _"A General-Purpose Self-Supervised Model for Computational Pathologyā€_, [arXiv](https://arxiv.org/abs/2308.15474) +5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scaleā€_, [arXiv](https://arxiv.org/pdf/2404.15217) ## Contributing -_eva_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md) +_`eva`_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md) and [contributing guide](./docs/CONTRIBUTING.md) for help on how to do so. All contributors must follow the [code of conduct](./docs/CODE_OF_CONDUCT.md). @@ -162,7 +166,23 @@ Our codebase is built using multiple opensource contributions

---- + +## Citation + +If you find this repository useful, please consider giving a star ā­ and adding the following citation: + +```bibtex +@inproceedings{kaiko.ai2024eva, + title={eva: Evaluation framework for pathology foundation models}, + author={kaiko.ai and Ioannis Gatopoulos and Nicolas K{\"a}nzig and Roman Moser and Sebastian Ot{\'a}lora}, + booktitle={Medical Imaging with Deep Learning}, + year={2024}, + url={https://openreview.net/forum?id=FNBQOPj18N} +} +``` + +
+
diff --git a/configs/vision/dino_vit/offline/bach.yaml b/configs/vision/dino_vit/offline/bach.yaml index 3d1dd721..926371db 100644 --- a/configs/vision/dino_vit/offline/bach.yaml +++ b/configs/vision/dino_vit/offline/bach.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 400 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/bach dataloader_idx_map: @@ -89,12 +90,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/bach split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/3632035 # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/offline/camelyon16.yaml b/configs/vision/dino_vit/offline/camelyon16.yaml index 29a55c1d..c165b37e 100644 --- a/configs/vision/dino_vit/offline/camelyon16.yaml +++ b/configs/vision/dino_vit/offline/camelyon16.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 10} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/camelyon16} dataloader_idx_map: @@ -98,7 +98,7 @@ data: predict: - class_path: eva.vision.datasets.Camelyon16 init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + root: ${oc.env:DATA_ROOT, ./data/camelyon16} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/dino_vit/offline/crc.yaml b/configs/vision/dino_vit/offline/crc.yaml index 1790d610..56985ee2 100644 --- a/configs/vision/dino_vit/offline/crc.yaml +++ b/configs/vision/dino_vit/offline/crc.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 24 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/crc dataloader_idx_map: @@ -89,11 +90,11 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/crc split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/dino_vit/offline/mhist.yaml b/configs/vision/dino_vit/offline/mhist.yaml index 2fac6964..77cd7cde 100644 --- a/configs/vision/dino_vit/offline/mhist.yaml +++ b/configs/vision/dino_vit/offline/mhist.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 51 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/mhist dataloader_idx_map: @@ -78,7 +79,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -89,7 +92,7 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/offline/panda.yaml b/configs/vision/dino_vit/offline/panda.yaml index 600bfb12..57f34696 100644 --- a/configs/vision/dino_vit/offline/panda.yaml +++ b/configs/vision/dino_vit/offline/panda.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 8} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/${oc.env:DINO_BACKBONE, dino_vits16}/panda} dataloader_idx_map: @@ -98,7 +98,7 @@ data: predict: - class_path: eva.vision.datasets.PANDA init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment + root: ${oc.env:DATA_ROOT, ./data/panda/prostate-cancer-grade-assessment} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/dino_vit/offline/patch_camelyon.yaml b/configs/vision/dino_vit/offline/patch_camelyon.yaml index 1a6a7b98..bb9fa5d3 100644 --- a/configs/vision/dino_vit/offline/patch_camelyon.yaml +++ b/configs/vision/dino_vit/offline/patch_camelyon.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 9 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, dino_vits16}/patch_camelyon dataloader_idx_map: @@ -79,7 +80,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -95,12 +98,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/patch_camelyon split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1494286 # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/online/bach.yaml b/configs/vision/dino_vit/online/bach.yaml index 6171eda2..a72689e1 100644 --- a/configs/vision/dino_vit/online/bach.yaml +++ b/configs/vision/dino_vit/online/bach.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -76,7 +77,7 @@ data: # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/dino_vit/online/crc.yaml b/configs/vision/dino_vit/online/crc.yaml index f90c0cfc..102f3b44 100644 --- a/configs/vision/dino_vit/online/crc.yaml +++ b/configs/vision/dino_vit/online/crc.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -75,7 +76,7 @@ data: # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/dino_vit/online/mhist.yaml b/configs/vision/dino_vit/online/mhist.yaml index cf4c6770..62b659c1 100644 --- a/configs/vision/dino_vit/online/mhist.yaml +++ b/configs/vision/dino_vit/online/mhist.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &LIGHTNING_ROOT ${oc.env:LIGHTNING_ROOT, logs/dino_vits16/online/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -69,14 +70,12 @@ data: init_args: &DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.MHIST init_args: diff --git a/configs/vision/dino_vit/online/patch_camelyon.yaml b/configs/vision/dino_vit/online/patch_camelyon.yaml index 0f3d2e2c..f594a3ee 100644 --- a/configs/vision/dino_vit/online/patch_camelyon.yaml +++ b/configs/vision/dino_vit/online/patch_camelyon.yaml @@ -5,6 +5,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -74,14 +75,12 @@ data: # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.PatchCamelyon init_args: diff --git a/configs/vision/owkin/phikon/offline/bach.yaml b/configs/vision/owkin/phikon/offline/bach.yaml index 35fe73e6..12ad9c50 100644 --- a/configs/vision/owkin/phikon/offline/bach.yaml +++ b/configs/vision/owkin/phikon/offline/bach.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 400 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/bach dataloader_idx_map: @@ -75,8 +76,6 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,12 +86,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/bach split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/3632035 # The BACH dataset is distributed under the following license # Attribution-NonCommercial-NoDerivs 4.0 International license # (see: https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/owkin/phikon/offline/camelyon16.yaml b/configs/vision/owkin/phikon/offline/camelyon16.yaml index f7aca73f..d44bbc58 100644 --- a/configs/vision/owkin/phikon/offline/camelyon16.yaml +++ b/configs/vision/owkin/phikon/offline/camelyon16.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 10} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/camelyon16} dataloader_idx_map: @@ -94,7 +94,7 @@ data: predict: - class_path: eva.vision.datasets.Camelyon16 init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/camelyon16 + root: ${oc.env:DATA_ROOT, ./data/camelyon16} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/owkin/phikon/offline/crc.yaml b/configs/vision/owkin/phikon/offline/crc.yaml index a1abcca6..c823aea6 100644 --- a/configs/vision/owkin/phikon/offline/crc.yaml +++ b/configs/vision/owkin/phikon/offline/crc.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 24 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/crc dataloader_idx_map: @@ -75,8 +76,6 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,11 +86,11 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/crc split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1214456 # The CRC dataset is distributed under the following license: "CC BY 4.0 LEGAL CODE" # (see: https://creativecommons.org/licenses/by/4.0/legalcode) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/owkin/phikon/offline/mhist.yaml b/configs/vision/owkin/phikon/offline/mhist.yaml index a4dbf234..f4dce943 100644 --- a/configs/vision/owkin/phikon/offline/mhist.yaml +++ b/configs/vision/owkin/phikon/offline/mhist.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 51 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/mhist dataloader_idx_map: @@ -73,10 +74,10 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -87,7 +88,7 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/mhist split: train - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/owkin/phikon/offline/panda.yaml b/configs/vision/owkin/phikon/offline/panda.yaml index 32194554..462c2b53 100644 --- a/configs/vision/owkin/phikon/offline/panda.yaml +++ b/configs/vision/owkin/phikon/offline/panda.yaml @@ -22,7 +22,7 @@ trainer: patience: ${oc.env:PATIENCE, 8} monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings/owkin/phikon/panda} dataloader_idx_map: @@ -93,7 +93,7 @@ data: predict: - class_path: eva.vision.datasets.PANDA init_args: &PREDICT_DATASET_ARGS - root: ${oc.env:DATA_ROOT, ./data}/panda/prostate-cancer-grade-assessment + root: ${oc.env:DATA_ROOT, ./data/panda/prostate-cancer-grade-assessment} sampler: class_path: eva.vision.data.wsi.patching.samplers.ForegroundGridSampler init_args: diff --git a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml index be2bc4a7..8d27ba00 100644 --- a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml +++ b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml @@ -6,6 +6,7 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: + - class_path: eva.callbacks.ConfigurationLogger - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch @@ -22,7 +23,7 @@ trainer: patience: 9 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &EMBEDDINGS_DIR ${oc.env:EMBEDDINGS_ROOT, ./data/embeddings}/${oc.env:DINO_BACKBONE, owkin/phikon}/patch_camelyon dataloader_idx_map: @@ -74,10 +75,10 @@ data: root: *EMBEDDINGS_DIR manifest_file: manifest.csv split: train - column_mapping: - path: embedding target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -93,12 +94,12 @@ data: init_args: &PREDICT_DATASET_ARGS root: ${oc.env:DATA_ROOT, ./data}/patch_camelyon split: train - download: false + download: ${oc.env:DOWNLOAD, false} # Set `download: true` to download the dataset from https://zenodo.org/records/1494286 # The PatchCamelyon dataset is distributed under the following license: # "Creative Commons Zero v1.0 Universal" # (see: https://choosealicense.com/licenses/cc0-1.0/) - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: size: ${oc.env:RESIZE_DIM, 224} diff --git a/configs/vision/tests/offline/panda.yaml b/configs/vision/tests/offline/panda.yaml index 5cbd0456..28844dd1 100644 --- a/configs/vision/tests/offline/panda.yaml +++ b/configs/vision/tests/offline/panda.yaml @@ -7,7 +7,7 @@ trainer: limit_train_batches: 2 limit_val_batches: 2 callbacks: - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT}/panda dataloader_idx_map: diff --git a/configs/vision/tests/offline/patch_camelyon.yaml b/configs/vision/tests/offline/patch_camelyon.yaml index b9155881..16286058 100644 --- a/configs/vision/tests/offline/patch_camelyon.yaml +++ b/configs/vision/tests/offline/patch_camelyon.yaml @@ -7,7 +7,8 @@ trainer: limit_train_batches: 2 limit_val_batches: 2 callbacks: - - class_path: eva.callbacks.EmbeddingsWriter + - class_path: eva.callbacks.ConfigurationLogger + - class_path: eva.callbacks.ClassificationEmbeddingsWriter init_args: output_dir: &DATASET_EMBEDDINGS_ROOT ${oc.env:EMBEDDINGS_ROOT}/patch_camelyon dataloader_idx_map: @@ -71,7 +72,9 @@ data: manifest_file: manifest.csv split: train target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor + class_path: torchvision.transforms.v2.ToDtype + init_args: + dtype: torch.float32 val: class_path: eva.datasets.EmbeddingsClassificationDataset init_args: @@ -83,7 +86,7 @@ data: root: ${oc.env:TESTS_ROOT, tests/eva}/assets/vision/datasets/patch_camelyon split: train download: false - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} diff --git a/configs/vision/tests/online/patch_camelyon.yaml b/configs/vision/tests/online/patch_camelyon.yaml index 52c3b466..073fb82a 100644 --- a/configs/vision/tests/online/patch_camelyon.yaml +++ b/configs/vision/tests/online/patch_camelyon.yaml @@ -42,13 +42,11 @@ data: root: ${oc.env:TESTS_ROOT, tests/eva}/assets/vision/datasets/patch_camelyon split: train download: &DOWNLOAD_DATA false - image_transforms: + transforms: class_path: eva.vision.data.transforms.common.ResizeAndCrop init_args: mean: ${oc.env:NORMALIZE_MEAN, [0.485, 0.456, 0.406]} std: ${oc.env:NORMALIZE_STD, [0.229, 0.224, 0.225]} - target_transforms: - class_path: eva.core.data.transforms.ArrayToFloatTensor val: class_path: eva.vision.datasets.PatchCamelyon init_args: diff --git a/docs/DEVELOPER_GUIDE.md b/docs/DEVELOPER_GUIDE.md index 92f562ad..a7f97bc1 100644 --- a/docs/DEVELOPER_GUIDE.md +++ b/docs/DEVELOPER_GUIDE.md @@ -17,10 +17,7 @@ Add a new dependency to the `core` submodule:
`pdm add ` Add a new dependency to the `vision` submodule:
-`pdm add -G vision ` - -After adding a new dependency, you also need to update the `pdm.lock` file:
-`pdm update` +`pdm add -G vision -G all ` For more information about managing dependencies please look [here](https://pdm-project.org/latest/usage/dependency/#manage-dependencies). diff --git a/docs/images/eva-logo.png b/docs/images/eva-logo.png index 7c0f7eae..c31d3ffe 100644 Binary files a/docs/images/eva-logo.png and b/docs/images/eva-logo.png differ diff --git a/docs/images/eva-stripes.png b/docs/images/eva-stripes.png index 274c1c7c..ff7528b2 100644 Binary files a/docs/images/eva-stripes.png and b/docs/images/eva-stripes.png differ diff --git a/docs/index.md b/docs/index.md index 20e637cd..cee477de 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,8 +5,11 @@ hide:
- +
+ + +

diff --git a/docs/reference/core/callbacks.md b/docs/reference/core/callbacks.md index 0e50d155..910f22ab 100644 --- a/docs/reference/core/callbacks.md +++ b/docs/reference/core/callbacks.md @@ -1,4 +1,4 @@ # Callbacks ## Writers -::: eva.core.callbacks.writers.EmbeddingsWriter \ No newline at end of file +::: eva.core.callbacks.writers.ClassificationEmbeddingsWriter \ No newline at end of file diff --git a/docs/reference/vision/data/datasets.md b/docs/reference/vision/data/datasets.md index 426df296..32a42f10 100644 --- a/docs/reference/vision/data/datasets.md +++ b/docs/reference/vision/data/datasets.md @@ -6,7 +6,6 @@ ## Classification datasets ::: eva.vision.data.datasets.BACH ::: eva.vision.data.datasets.PatchCamelyon -::: eva.vision.data.datasets.TotalSegmentatorClassification ## Segmentation datasets ::: eva.vision.data.datasets.ImageSegmentation diff --git a/docs/user-guide/advanced/model_wrappers.md b/docs/user-guide/advanced/model_wrappers.md index e3ae2dd9..957dad0a 100644 --- a/docs/user-guide/advanced/model_wrappers.md +++ b/docs/user-guide/advanced/model_wrappers.md @@ -1,7 +1,7 @@ # Model Wrappers -This document shows how to use *eva*'s [Model Wrapper API](../../../reference/core/models/networks/#wrappers) (`eva.models.networks.wrappers`) to load different model formats from a series of sources such as PyTorch Hub, HuggingFace Model Hub and ONNX. +This document shows how to use *eva*'s [Model Wrapper API](../../reference/core/models/networks.md#wrappers) (`eva.models.networks.wrappers`) to load different model formats from a series of sources such as PyTorch Hub, HuggingFace Model Hub and ONNX. ## Loading PyTorch models The *eva* framework is built on top of PyTorch Lightning and thus naturally supports loading PyTorch models. diff --git a/docs/user-guide/advanced/replicate_evaluations.md b/docs/user-guide/advanced/replicate_evaluations.md index 61c26f07..d3770586 100644 --- a/docs/user-guide/advanced/replicate_evaluations.md +++ b/docs/user-guide/advanced/replicate_evaluations.md @@ -4,7 +4,7 @@ To produce the evaluation results presented [here](../../index.md#evaluation-res Make sure to replace `` in the commands below with `bach`, `crc`, `mhist` or `patch_camelyon`. -*Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` in their respective config-files. In the case of [MHIST](../../datasets/mhist.md) you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* +Note that to run the commands below you will need to first download the data. [BACH](../../datasets/bach.md), [CRC](../../datasets/crc.md) and [PatchCamelyon](../../datasets/patch_camelyon.md) provide automatic download by setting the argument `download: true` (either modify the config-files or set the environment variable `DOWNLOAD=true`). In the case of MHIST you will need to download the data manually by following the instructions provided [here](../../datasets/mhist.md#download-and-preprocessing).* ## DINO ViT-S16 (random weights) diff --git a/docs/user-guide/getting-started/how_to_use.md b/docs/user-guide/getting-started/how_to_use.md index 496d8785..21380fd7 100644 --- a/docs/user-guide/getting-started/how_to_use.md +++ b/docs/user-guide/getting-started/how_to_use.md @@ -34,7 +34,7 @@ The setup for an *eva* run is provided in a `.yaml` config file which is defined A config file specifies the setup for the *trainer* (including callback for the model backbone), the *model* (setup of the trainable decoder) and *data* module. -The config files for the datasets and models that *eva* supports out of the box, you can find on [GitHub](https://github.com/kaiko-ai/eva/tree/main/configs). We recommend that you inspect some of them to get a better understanding of their structure and content. +The config files for the datasets and models that *eva* supports out of the box, you can find on [GitHub](https://github.com/kaiko-ai/eva/tree/0.0.2). We recommend that you inspect some of them to get a better understanding of their structure and content. ### Environment variables diff --git a/docs/user-guide/tutorials/offline_vs_online.md b/docs/user-guide/tutorials/offline_vs_online.md index 8f4f25d3..059b0f83 100644 --- a/docs/user-guide/tutorials/offline_vs_online.md +++ b/docs/user-guide/tutorials/offline_vs_online.md @@ -3,11 +3,11 @@ In this tutorial we run *eva* with the three subcommands `predict`, `fit` and `predict_fit`, and take a look at the difference between *offline* and *online* workflows. ### Before you start -If you haven't downloaded the config files yet, please download them from [GitHub](https://github.com/kaiko-ai/eva/tree/main/configs). +If you haven't downloaded the config files yet, please download them from [GitHub](https://github.com/kaiko-ai/eva/tree/0.0.2). For this tutorial we use the [BACH](../../datasets/bach.md) classification task which is available on [Zenodo](https://zenodo.org/records/3632035) and is distributed under [*Attribution-NonCommercial-ShareAlike 4.0 International*](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) license. -To let *eva* automatically handle the dataset download, you can open `configs/vision/dino_vit/offline/bach.yaml` and set `download: true`. Before doing so, please make sure that your use case is compliant with the dataset license. +To let *eva* automatically handle the dataset download, set `download: true` in `configs/vision/dino_vit/offline/bach.yaml` (you may also enable automatic download by setting the environment variable DOWNLOAD=true). Before doing so, please make sure that your use case is compliant with the dataset license. ## *Offline* evaluations diff --git a/noxfile.py b/noxfile.py index 2a60bada..916a7779 100644 --- a/noxfile.py +++ b/noxfile.py @@ -24,9 +24,6 @@ import nox -PACKAGE = "eva" -"""The name of the library.""" - PYTHON_VERSIONS = ["3.10"] """The python versions to test on.""" diff --git a/pdm.lock b/pdm.lock index 5b0fb979..8555ed7c 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev", "docs", "all", "typecheck", "lint", "vision", "test"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:f1e852d1f3aa89e7061fc859e3446b3abfc3bbd019323fa885dc81a6b5cd2659" +content_hash = "sha256:d3ac381de08b8a3051ba9c70002f748ee6c600fe9b20da760cb71fc7686d8cda" [[package]] name = "absl-py" @@ -168,7 +168,7 @@ files = [ [[package]] name = "black" -version = "24.3.0" +version = "24.4.2" requires_python = ">=3.8" summary = "The uncompromising code formatter." groups = ["dev", "lint"] @@ -182,20 +182,20 @@ dependencies = [ "typing-extensions>=4.0.1; python_version < \"3.11\"", ] files = [ - {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, - {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, - {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, - {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, - {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, - {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, - {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, - {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, - {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, - {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, - {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, - {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, - {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, - {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, + {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, + {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, + {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, + {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, + {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, + {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, + {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, + {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, + {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, + {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, + {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, + {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, + {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, + {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, ] [[package]] @@ -597,7 +597,7 @@ files = [ [[package]] name = "h5py" -version = "3.10.0" +version = "3.11.0" requires_python = ">=3.8" summary = "Read and write HDF5 files from Python" groups = ["all", "vision"] @@ -605,26 +605,24 @@ dependencies = [ "numpy>=1.17.3", ] files = [ - {file = "h5py-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f"}, - {file = "h5py-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c"}, - {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03"}, - {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d"}, - {file = "h5py-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f"}, - {file = "h5py-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc"}, - {file = "h5py-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd"}, - {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7"}, - {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52"}, - {file = "h5py-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684"}, - {file = "h5py-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3"}, - {file = "h5py-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20"}, - {file = "h5py-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039"}, - {file = "h5py-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339"}, - {file = "h5py-3.10.0.tar.gz", hash = "sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049"}, + {file = "h5py-3.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1625fd24ad6cfc9c1ccd44a66dac2396e7ee74940776792772819fc69f3a3731"}, + {file = "h5py-3.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c072655ad1d5fe9ef462445d3e77a8166cbfa5e599045f8aa3c19b75315f10e5"}, + {file = "h5py-3.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77b19a40788e3e362b54af4dcf9e6fde59ca016db2c61360aa30b47c7b7cef00"}, + {file = "h5py-3.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:ef4e2f338fc763f50a8113890f455e1a70acd42a4d083370ceb80c463d803972"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd732a08187a9e2a6ecf9e8af713f1d68256ee0f7c8b652a32795670fb481ba"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75bd7b3d93fbeee40860fd70cdc88df4464e06b70a5ad9ce1446f5f32eb84007"}, + {file = "h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c416f8eb0daae39dabe71415cb531f95dce2d81e1f61a74537a50c63b28ab3"}, + {file = "h5py-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:083e0329ae534a264940d6513f47f5ada617da536d8dccbafc3026aefc33c90e"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a76cae64080210389a571c7d13c94a1a6cf8cb75153044fd1f822a962c97aeab"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3736fe21da2b7d8a13fe8fe415f1272d2a1ccdeff4849c1421d2fb30fd533bc"}, + {file = "h5py-3.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6ae84a14103e8dc19266ef4c3e5d7c00b68f21d07f2966f0ca7bdb6c2761fb"}, + {file = "h5py-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:21dbdc5343f53b2e25404673c4f00a3335aef25521bd5fa8c707ec3833934892"}, + {file = "h5py-3.11.0.tar.gz", hash = "sha256:7b7e8f78072a2edec87c9836f25f34203fd492a4475709a18b417a33cfb21fa9"}, ] [[package]] name = "huggingface-hub" -version = "0.21.4" +version = "0.23.2" requires_python = ">=3.8.0" summary = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" groups = ["all", "default", "vision"] @@ -638,8 +636,8 @@ dependencies = [ "typing-extensions>=3.7.4.3", ] files = [ - {file = "huggingface_hub-0.21.4-py3-none-any.whl", hash = "sha256:df37c2c37fc6c82163cdd8a67ede261687d80d1e262526d6c0ce73b6b3630a7b"}, - {file = "huggingface_hub-0.21.4.tar.gz", hash = "sha256:e1f4968c93726565a80edf6dc309763c7b546d0cfe79aa221206034d50155531"}, + {file = "huggingface_hub-0.23.2-py3-none-any.whl", hash = "sha256:48727a16e704d409c4bb5913613308499664f22a99743435dc3a13b23c485827"}, + {file = "huggingface_hub-0.23.2.tar.gz", hash = "sha256:f6829b62d5fdecb452a76fdbec620cba4c1573655a8d710c1df71735fd9edbd2"}, ] [[package]] @@ -703,6 +701,20 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "intel-openmp" +version = "2021.4.0" +summary = "IntelĀ® OpenMP* Runtime Library" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +files = [ + {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, +] + [[package]] name = "isort" version = "5.13.2" @@ -730,7 +742,7 @@ files = [ [[package]] name = "jsonargparse" -version = "4.27.6" +version = "4.28.0" requires_python = ">=3.7" summary = "Implement minimal boilerplate CLIs derived from type hints and parse from command line, config files and environment variables." groups = ["default"] @@ -738,47 +750,47 @@ dependencies = [ "PyYAML>=3.13", ] files = [ - {file = "jsonargparse-4.27.6-py3-none-any.whl", hash = "sha256:f429b4a1b1fe92ef2e3e531615f53e81720a424f3f3181eca7a28c994515fc15"}, - {file = "jsonargparse-4.27.6.tar.gz", hash = "sha256:ebd2e0a4faef85a075bb6ef79c6b2f03f57a5f8e3db26c911b55518a1bca68ad"}, + {file = "jsonargparse-4.28.0-py3-none-any.whl", hash = "sha256:9dcda241349547e8035c630d51de73b8b4ba67bdc2b014d7f76734d404e82518"}, + {file = "jsonargparse-4.28.0.tar.gz", hash = "sha256:ac835a290ef18cc2a5309e6bfa8ada9c5d63f46ff18701583fc8f3e95314679c"}, ] [[package]] name = "jsonargparse" -version = "4.27.6" +version = "4.28.0" extras = ["omegaconf"] requires_python = ">=3.7" summary = "Implement minimal boilerplate CLIs derived from type hints and parse from command line, config files and environment variables." groups = ["default"] dependencies = [ - "jsonargparse==4.27.6", + "jsonargparse==4.28", "omegaconf>=2.1.1", ] files = [ - {file = "jsonargparse-4.27.6-py3-none-any.whl", hash = "sha256:f429b4a1b1fe92ef2e3e531615f53e81720a424f3f3181eca7a28c994515fc15"}, - {file = "jsonargparse-4.27.6.tar.gz", hash = "sha256:ebd2e0a4faef85a075bb6ef79c6b2f03f57a5f8e3db26c911b55518a1bca68ad"}, + {file = "jsonargparse-4.28.0-py3-none-any.whl", hash = "sha256:9dcda241349547e8035c630d51de73b8b4ba67bdc2b014d7f76734d404e82518"}, + {file = "jsonargparse-4.28.0.tar.gz", hash = "sha256:ac835a290ef18cc2a5309e6bfa8ada9c5d63f46ff18701583fc8f3e95314679c"}, ] [[package]] name = "lightning" -version = "2.2.4" +version = "2.3.0.dev20240609" requires_python = ">=3.8" summary = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." groups = ["default"] dependencies = [ "PyYAML<8.0,>=5.4", - "fsspec[http]<2025.0,>=2022.5.0", + "fsspec[http]<2026.0,>=2022.5.0", "lightning-utilities<2.0,>=0.8.0", "numpy<3.0,>=1.17.2", "packaging<25.0,>=20.0", "pytorch-lightning", - "torch<4.0,>=1.13.0", + "torch<4.0,>=2.0.0", "torchmetrics<3.0,>=0.7.0", "tqdm<6.0,>=4.57.0", "typing-extensions<6.0,>=4.4.0", ] files = [ - {file = "lightning-2.2.4-py3-none-any.whl", hash = "sha256:b44cb8692253f2719b2f84237e94ff84451fe219922c7f04447b52524471379e"}, - {file = "lightning-2.2.4.tar.gz", hash = "sha256:4cc3fb3edf04fcd63c0ecf75087d2fa06163759fc8c1fc500b16404ac1854f77"}, + {file = "lightning-2.3.0.dev20240609-py3-none-any.whl", hash = "sha256:15fb839cba66463239870bfba13b33b081b4b99930e79b0914cac20d4e9d6d42"}, + {file = "lightning-2.3.0.dev20240609.tar.gz", hash = "sha256:e3a3bd45058eede98202b52a13f339bf730ae3368b935eccad7e19a604ac5572"}, ] [[package]] @@ -825,7 +837,7 @@ files = [ [[package]] name = "markdown-exec" -version = "1.8.0" +version = "1.8.3" requires_python = ">=3.8" summary = "Utilities to execute code blocks in Markdown files." groups = ["dev", "docs"] @@ -833,8 +845,8 @@ dependencies = [ "pymdown-extensions>=9", ] files = [ - {file = "markdown_exec-1.8.0-py3-none-any.whl", hash = "sha256:e80cb766eff8d0bcd1cdd133dba58223b42edbd1b7b9672481c2189572401bff"}, - {file = "markdown_exec-1.8.0.tar.gz", hash = "sha256:0a932312f0ca89b82150e1638e84febb90eadd410dfd2417f05759c06deed727"}, + {file = "markdown_exec-1.8.3-py3-none-any.whl", hash = "sha256:77ebbaa4a20abb167fad0fa8a0037567121b9cf262349dbed84900ce96058af5"}, + {file = "markdown_exec-1.8.3.tar.gz", hash = "sha256:5e16a70f9f2c97738f128a88db2951fe3d8bb2bdc2b4809fae7fca0123ef3ae4"}, ] [[package]] @@ -915,7 +927,7 @@ files = [ [[package]] name = "mike" -version = "2.0.0" +version = "2.1.1" summary = "Manage multiple versions of your MkDocs-powered documentation" groups = ["dev", "docs"] dependencies = [ @@ -924,18 +936,19 @@ dependencies = [ "jinja2>=2.7", "mkdocs>=1.0", "pyparsing>=3.0", + "pyyaml-env-tag", "pyyaml>=5.1", "verspec", ] files = [ - {file = "mike-2.0.0-py3-none-any.whl", hash = "sha256:87f496a65900f93ba92d72940242b65c86f3f2f82871bc60ebdcffc91fad1d9e"}, - {file = "mike-2.0.0.tar.gz", hash = "sha256:566f1cab1a58cc50b106fb79ea2f1f56e7bfc8b25a051e95e6eaee9fba0922de"}, + {file = "mike-2.1.1-py3-none-any.whl", hash = "sha256:0b1d01a397a423284593eeb1b5f3194e37169488f929b860c9bfe95c0d5efb79"}, + {file = "mike-2.1.1.tar.gz", hash = "sha256:f39ed39f3737da83ad0adc33e9f885092ed27f8c9e7ff0523add0480352a2c22"}, ] [[package]] name = "mkdocs" -version = "1.5.3" -requires_python = ">=3.7" +version = "1.6.0" +requires_python = ">=3.8" summary = "Project documentation with Markdown." groups = ["dev", "docs"] dependencies = [ @@ -943,19 +956,19 @@ dependencies = [ "colorama>=0.4; platform_system == \"Windows\"", "ghp-import>=1.0", "jinja2>=2.11.1", - "markdown>=3.2.1", + "markdown>=3.3.6", "markupsafe>=2.0.1", "mergedeep>=1.3.4", + "mkdocs-get-deps>=0.2.0", "packaging>=20.5", "pathspec>=0.11.1", - "platformdirs>=2.2.0", "pyyaml-env-tag>=0.1", "pyyaml>=5.1", "watchdog>=2.0", ] files = [ - {file = "mkdocs-1.5.3-py3-none-any.whl", hash = "sha256:3b3a78e736b31158d64dbb2f8ba29bd46a379d0c6e324c2246c3bc3d2189cfc1"}, - {file = "mkdocs-1.5.3.tar.gz", hash = "sha256:eb7c99214dcb945313ba30426c2451b735992c73c2e10838f76d09e39ff4d0e2"}, + {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, + {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, ] [[package]] @@ -974,9 +987,25 @@ files = [ {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +requires_python = ">=3.8" +summary = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +groups = ["dev", "docs"] +dependencies = [ + "mergedeep>=1.3.4", + "platformdirs>=2.2.0", + "pyyaml>=5.1", +] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + [[package]] name = "mkdocs-material" -version = "9.5.14" +version = "9.5.26" requires_python = ">=3.8" summary = "Documentation that simply works" groups = ["dev", "docs"] @@ -986,7 +1015,7 @@ dependencies = [ "jinja2~=3.0", "markdown~=3.2", "mkdocs-material-extensions~=1.3", - "mkdocs~=1.5.3", + "mkdocs~=1.6", "paginate~=0.5", "pygments~=2.16", "pymdown-extensions~=10.2", @@ -994,8 +1023,8 @@ dependencies = [ "requests~=2.26", ] files = [ - {file = "mkdocs_material-9.5.14-py3-none-any.whl", hash = "sha256:a45244ac221fda46ecf8337f00ec0e5cb5348ab9ffb203ca2a0c313b0d4dbc27"}, - {file = "mkdocs_material-9.5.14.tar.gz", hash = "sha256:2a1f8e67cda2587ab93ecea9ba42d0ca61d1d7b5fad8cf690eeaeb39dcd4b9af"}, + {file = "mkdocs_material-9.5.26-py3-none-any.whl", hash = "sha256:5d01fb0aa1c7946a1e3ae8689aa2b11a030621ecb54894e35aabb74c21016312"}, + {file = "mkdocs_material-9.5.26.tar.gz", hash = "sha256:56aeb91d94cffa43b6296fa4fbf0eb7c840136e563eecfd12c2d9e92e50ba326"}, ] [[package]] @@ -1020,6 +1049,7 @@ dependencies = [ ] files = [ {file = "mkdocs-redirects-1.2.1.tar.gz", hash = "sha256:9420066d70e2a6bb357adf86e67023dcdca1857f97f07c7fe450f8f1fb42f861"}, + {file = "mkdocs_redirects-1.2.1-py3-none-any.whl", hash = "sha256:497089f9e0219e7389304cffefccdfa1cac5ff9509f2cb706f4c9b221726dffb"}, ] [[package]] @@ -1035,7 +1065,7 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.25.1" requires_python = ">=3.8" summary = "Automatic documentation from sources, for MkDocs." groups = ["dev", "docs"] @@ -1050,8 +1080,8 @@ dependencies = [ "pymdown-extensions>=6.3", ] files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkdocstrings-0.25.1-py3-none-any.whl", hash = "sha256:da01fcc2670ad61888e8fe5b60afe9fee5781017d67431996832d63e887c2e51"}, + {file = "mkdocstrings-0.25.1.tar.gz", hash = "sha256:c3a2515f31577f311a9ee58d089e4c51fc6046dbd9e9b4c3de4c3194667fe9bf"}, ] [[package]] @@ -1072,18 +1102,36 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.25.1" extras = ["python"] requires_python = ">=3.8" summary = "Automatic documentation from sources, for MkDocs." groups = ["dev", "docs"] dependencies = [ "mkdocstrings-python>=0.5.2", - "mkdocstrings==0.24.1", + "mkdocstrings==0.25.1", +] +files = [ + {file = "mkdocstrings-0.25.1-py3-none-any.whl", hash = "sha256:da01fcc2670ad61888e8fe5b60afe9fee5781017d67431996832d63e887c2e51"}, + {file = "mkdocstrings-0.25.1.tar.gz", hash = "sha256:c3a2515f31577f311a9ee58d089e4c51fc6046dbd9e9b4c3de4c3194667fe9bf"}, +] + +[[package]] +name = "mkl" +version = "2021.4.0" +summary = "IntelĀ® oneAPI Math Kernel Library" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +dependencies = [ + "intel-openmp==2021.*", + "tbb==2021.*", ] files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, + {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, + {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, ] [[package]] @@ -1205,7 +1253,7 @@ files = [ [[package]] name = "nox" -version = "2024.3.2" +version = "2024.4.15" requires_python = ">=3.7" summary = "Flexible test automation." groups = ["dev", "typecheck"] @@ -1213,11 +1261,12 @@ dependencies = [ "argcomplete<4.0,>=1.9.4", "colorlog<7.0.0,>=2.6.1", "packaging>=20.9", + "tomli>=1; python_version < \"3.11\"", "virtualenv>=20.14.1", ] files = [ - {file = "nox-2024.3.2-py3-none-any.whl", hash = "sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be"}, - {file = "nox-2024.3.2.tar.gz", hash = "sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553"}, + {file = "nox-2024.4.15-py3-none-any.whl", hash = "sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565"}, + {file = "nox-2024.4.15.tar.gz", hash = "sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f"}, ] [[package]] @@ -1377,13 +1426,14 @@ files = [ [[package]] name = "nvidia-nccl-cu12" -version = "2.19.3" +version = "2.20.5" requires_python = ">=3" summary = "NVIDIA Collective Communication Library (NCCL) Runtime" groups = ["all", "default", "vision"] marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ - {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, ] [[package]] @@ -1428,7 +1478,7 @@ files = [ [[package]] name = "onnx" -version = "1.16.0" +version = "1.16.1" requires_python = ">=3.8" summary = "Open Neural Network Exchange" groups = ["default"] @@ -1437,61 +1487,58 @@ dependencies = [ "protobuf>=3.20.2", ] files = [ - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f"}, - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3"}, - {file = "onnx-1.16.0-cp310-cp310-win32.whl", hash = "sha256:66300197b52beca08bc6262d43c103289c5d45fde43fb51922ed1eb83658cf0c"}, - {file = "onnx-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:ae0029f5e47bf70a1a62e7f88c80bca4ef39b844a89910039184221775df5e43"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:f51179d4af3372b4f3800c558d204b592c61e4b4a18b8f61e0eea7f46211221a"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5202559070afec5144332db216c20f2fff8323cf7f6512b0ca11b215eacc5bf3"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77579e7c15b4df39d29465b216639a5f9b74026bdd9e4b6306cd19a32dcfe67c"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e60ca76ac24b65c25860d0f2d2cdd96d6320d062a01dd8ce87c5743603789b8"}, - {file = "onnx-1.16.0-cp311-cp311-win32.whl", hash = "sha256:81b4ee01bc554e8a2b11ac6439882508a5377a1c6b452acd69a1eebb83571117"}, - {file = "onnx-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:7449241e70b847b9c3eb8dae622df8c1b456d11032a9d7e26e0ee8a698d5bf86"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:03a627488b1a9975d95d6a55582af3e14c7f3bb87444725b999935ddd271d352"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c392faeabd9283ee344ccb4b067d1fea9dfc614fa1f0de7c47589efd79e15e78"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0efeb46985de08f0efe758cb54ad3457e821a05c2eaf5ba2ccb8cd1602c08084"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf14a3d32234f23e44abb73a755cb96a423fac7f004e8f046f36b10214151ee"}, - {file = "onnx-1.16.0-cp312-cp312-win32.whl", hash = "sha256:62a2e27ae8ba5fc9b4a2620301446a517b5ffaaf8566611de7a7c2160f5bcf4c"}, - {file = "onnx-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:3e0860fea94efde777e81a6f68f65761ed5e5f3adea2e050d7fbe373a9ae05b3"}, - {file = "onnx-1.16.0.tar.gz", hash = "sha256:237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7"}, + {file = "onnx-1.16.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:bb2d392e5b7060082c2fb38eb5c44f67eb34ff5f0681bd6f45beff9abc6f7094"}, + {file = "onnx-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15abf94a7868eed6db15a8b5024ba570c891cae77ca4d0e7258dabdad76980df"}, + {file = "onnx-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6251910e554f811fdd070164b0bc76d76b067b95576cb9dad4d52ae64fe014b5"}, + {file = "onnx-1.16.1-cp310-cp310-win32.whl", hash = "sha256:c11e3b15eee46cd20767e505cc3ba97457ef5ac93c3e459cdfb77943ff8fe9a7"}, + {file = "onnx-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:b3d10405706807ec2ef493b2a78519fa0264cf190363e89478585aac1179b596"}, + {file = "onnx-1.16.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:006ba5059c85ce43e89a1486cc0276d0f1a8ec9c6efd1a9334fd3fa0f6e33b64"}, + {file = "onnx-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1521ea7cd3497ecaf57d3b5e72d637ca5ebca632122a0806a9df99bedbeecdf8"}, + {file = "onnx-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45cf20421aeac03872bea5fd6ebf92abe15c4d1461a2572eb839add5059e2a09"}, + {file = "onnx-1.16.1-cp311-cp311-win32.whl", hash = "sha256:f98e275b4f46a617a9c527e60c02531eae03cf67a04c26db8a1c20acee539533"}, + {file = "onnx-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:95aa20aa65a9035d7543e81713e8b0f611e213fc02171959ef4ee09311d1bf28"}, + {file = "onnx-1.16.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:32e11d39bee04f927fab09f74c46cf76584094462311bab1aca9ccdae6ed3366"}, + {file = "onnx-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8884bf53b552873c0c9b072cb8625e7d4e8f3cc0529191632d24e3de58a3b93a"}, + {file = "onnx-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595b2830093f81361961295f7b0ebb6000423bcd04123d516d081c306002e387"}, + {file = "onnx-1.16.1-cp312-cp312-win32.whl", hash = "sha256:2fde4dd5bc278b3fc8148f460bce8807b2874c66f48529df9444cdbc9ecf456b"}, + {file = "onnx-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:e69ad8c110d8c37d759cad019d498fdf3fd24e0bfaeb960e52fed0469a5d2974"}, + {file = "onnx-1.16.1.tar.gz", hash = "sha256:8299193f0f2a3849bfc069641aa8e4f93696602da8d165632af8ee48ec7556b6"}, ] [[package]] name = "onnxruntime" -version = "1.17.1" +version = "1.18.0" summary = "ONNX Runtime is a runtime accelerator for Machine Learning models" groups = ["default"] dependencies = [ "coloredlogs", "flatbuffers", - "numpy>=1.21.6", + "numpy>=1.26.0", "packaging", "protobuf", "sympy", ] files = [ - {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, - {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55b5e92a4c76a23981c998078b9bf6145e4fb0b016321a8274b1607bd3c6bd35"}, - {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ebbcd2bc3a066cf54e6f18c75708eb4d309ef42be54606d22e5bdd78afc5b0d7"}, - {file = "onnxruntime-1.17.1-cp310-cp310-win32.whl", hash = "sha256:5e3716b5eec9092e29a8d17aab55e737480487deabfca7eac3cd3ed952b6ada9"}, - {file = "onnxruntime-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:fbb98cced6782ae1bb799cc74ddcbbeeae8819f3ad1d942a74d88e72b6511337"}, - {file = "onnxruntime-1.17.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:36fd6f87a1ecad87e9c652e42407a50fb305374f9a31d71293eb231caae18784"}, - {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99a8bddeb538edabc524d468edb60ad4722cff8a49d66f4e280c39eace70500b"}, - {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd7fddb4311deb5a7d3390cd8e9b3912d4d963efbe4dfe075edbaf18d01c024e"}, - {file = "onnxruntime-1.17.1-cp311-cp311-win32.whl", hash = "sha256:606a7cbfb6680202b0e4f1890881041ffc3ac6e41760a25763bd9fe146f0b335"}, - {file = "onnxruntime-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:53e4e06c0a541696ebdf96085fd9390304b7b04b748a19e02cf3b35c869a1e76"}, - {file = "onnxruntime-1.17.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:40f08e378e0f85929712a2b2c9b9a9cc400a90c8a8ca741d1d92c00abec60843"}, - {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac79da6d3e1bb4590f1dad4bb3c2979d7228555f92bb39820889af8b8e6bd472"}, - {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ae9ba47dc099004e3781f2d0814ad710a13c868c739ab086fc697524061695ea"}, - {file = "onnxruntime-1.17.1-cp312-cp312-win32.whl", hash = "sha256:2dff1a24354220ac30e4a4ce2fb1df38cb1ea59f7dac2c116238d63fe7f4c5ff"}, - {file = "onnxruntime-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:6226a5201ab8cafb15e12e72ff2a4fc8f50654e8fa5737c6f0bd57c5ff66827e"}, + {file = "onnxruntime-1.18.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:5a3b7993a5ecf4a90f35542a4757e29b2d653da3efe06cdd3164b91167bbe10d"}, + {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15b944623b2cdfe7f7945690bfb71c10a4531b51997c8320b84e7b0bb59af902"}, + {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e61ce5005118064b1a0ed73ebe936bc773a102f067db34108ea6c64dd62a179"}, + {file = "onnxruntime-1.18.0-cp310-cp310-win32.whl", hash = "sha256:a4fc8a2a526eb442317d280610936a9f73deece06c7d5a91e51570860802b93f"}, + {file = "onnxruntime-1.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:71ed219b768cab004e5cd83e702590734f968679bf93aa488c1a7ffbe6e220c3"}, + {file = "onnxruntime-1.18.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:3d24bd623872a72a7fe2f51c103e20fcca2acfa35d48f2accd6be1ec8633d960"}, + {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f15e41ca9b307a12550bfd2ec93f88905d9fba12bab7e578f05138ad0ae10d7b"}, + {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f45ca2887f62a7b847d526965686b2923efa72538c89b7703c7b3fe970afd59"}, + {file = "onnxruntime-1.18.0-cp311-cp311-win32.whl", hash = "sha256:9e24d9ecc8781323d9e2eeda019b4b24babc4d624e7d53f61b1fe1a929b0511a"}, + {file = "onnxruntime-1.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:f8608398976ed18aef450d83777ff6f77d0b64eced1ed07a985e1a7db8ea3771"}, + {file = "onnxruntime-1.18.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:f1d79941f15fc40b1ee67738b2ca26b23e0181bf0070b5fb2984f0988734698f"}, + {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e8caf3a8565c853a22d323a3eebc2a81e3de7591981f085a4f74f7a60aab2d"}, + {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:498d2b8380635f5e6ebc50ec1b45f181588927280f32390fb910301d234f97b8"}, + {file = "onnxruntime-1.18.0-cp312-cp312-win32.whl", hash = "sha256:ba7cc0ce2798a386c082aaa6289ff7e9bedc3dee622eef10e74830cff200a72e"}, + {file = "onnxruntime-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:1fa175bd43f610465d5787ae06050c81f7ce09da2bf3e914eb282cb8eab363ef"}, ] [[package]] name = "opencv-python-headless" -version = "4.9.0.80" +version = "4.10.0.82" requires_python = ">=3.6" summary = "Wrapper package for OpenCV python bindings." groups = ["all", "vision"] @@ -1506,13 +1553,13 @@ dependencies = [ "numpy>=1.26.0; python_version >= \"3.12\"", ] files = [ - {file = "opencv-python-headless-4.9.0.80.tar.gz", hash = "sha256:71a4cd8cf7c37122901d8e81295db7fb188730e33a0e40039a4e59c1030b0958"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:2ea8a2edc4db87841991b2fbab55fc07b97ecb602e0f47d5d485bd75cee17c1a"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:e0ee54e27be493e8f7850847edae3128e18b540dac1d7b2e4001b8944e11e1c6"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57ce2865e8fec431c6f97a81e9faaf23fa5be61011d0a75ccf47a3c0d65fa73d"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:976656362d68d9f40a5c66f83901430538002465f7db59142784f3893918f3df"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:11e3849d83e6651d4e7699aadda9ec7ed7c38957cbbcb99db074f2a2d2de9670"}, - {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:a8056c2cb37cd65dfcdf4153ca16f7362afcf3a50d600d6bb69c660fc61ee29c"}, + {file = "opencv-python-headless-4.10.0.82.tar.gz", hash = "sha256:de9e742c1b9540816fbd115b0b03841d41ed0c65566b0d7a5371f98b131b7e6d"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:a09ed50ba21cc5bf5d436cb0e784ad09c692d6b1d1454252772f6c8f2c7b4088"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:977a5fd21e1fe0d3d2134887db4441f8725abeae95150126302f31fcd9f548fa"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4ec6755838b0be12510bfc9ffb014779c612418f11f4f7e6f505c36124a3aa"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a37fa5276967ecf6eb297295b16b28b7a2eb3b568ca0ee469fb1a5954de298"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-win32.whl", hash = "sha256:94736e9b322d13db4768fd35588ad5e8995e78e207263076bfbee18aac835ad5"}, + {file = "opencv_python_headless-4.10.0.82-cp37-abi3-win_amd64.whl", hash = "sha256:c1822fa23d1641c0249ed5eb906f4c385f7959ff1bd601a776d56b0c18914af4"}, ] [[package]] @@ -1556,41 +1603,41 @@ files = [ [[package]] name = "pandas" -version = "2.2.1" +version = "2.2.2" requires_python = ">=3.9" summary = "Powerful data structures for data analysis, time series, and statistics" groups = ["default"] dependencies = [ - "numpy<2,>=1.22.4; python_version < \"3.11\"", - "numpy<2,>=1.23.2; python_version == \"3.11\"", - "numpy<2,>=1.26.0; python_version >= \"3.12\"", + "numpy>=1.22.4; python_version < \"3.11\"", + "numpy>=1.23.2; python_version == \"3.11\"", + "numpy>=1.26.0; python_version >= \"3.12\"", "python-dateutil>=2.8.2", "pytz>=2020.1", "tzdata>=2022.7", ] files = [ - {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, - {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, - {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, - {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, - {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, - {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, - {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, - {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, - {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, - {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, - {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, - {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, - {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, - {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, + {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, + {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, + {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, + {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, ] [[package]] @@ -1684,40 +1731,40 @@ files = [ [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" requires_python = ">=3.8" summary = "plugin and hook calling mechanisms for python" groups = ["dev", "test", "typecheck"] files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [[package]] name = "protobuf" -version = "5.26.0" +version = "4.25.3" requires_python = ">=3.8" summary = "" groups = ["default"] files = [ - {file = "protobuf-5.26.0-cp310-abi3-win32.whl", hash = "sha256:f9ecc8eb6f18037e0cbf43256db0325d4723f429bca7ef5cd358b7c29d65f628"}, - {file = "protobuf-5.26.0-cp310-abi3-win_amd64.whl", hash = "sha256:dfd29f6eb34107dccf289a93d44fb6b131e68888d090b784b691775ac84e8213"}, - {file = "protobuf-5.26.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:7e47c57303466c867374a17b2b5e99c5a7c8b72a94118e2f28efb599f19b4069"}, - {file = "protobuf-5.26.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e184175276edc222e2d5e314a72521e10049938a9a4961fe4bea9b25d073c03f"}, - {file = "protobuf-5.26.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:6ee9d1aa02f951c5ce10bf8c6cfb7604133773038e33f913183c8b5201350600"}, - {file = "protobuf-5.26.0-py3-none-any.whl", hash = "sha256:a49b6c5359bf34fb7bf965bf21abfab4476e4527d822ab5289ee3bf73f291159"}, - {file = "protobuf-5.26.0.tar.gz", hash = "sha256:82f5870d74c99addfe4152777bdf8168244b9cf0ac65f8eccf045ddfa9d80d9b"}, + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, ] [[package]] name = "pygments" -version = "2.17.2" -requires_python = ">=3.7" +version = "2.18.0" +requires_python = ">=3.8" summary = "Pygments is a syntax highlighting package written in Python." groups = ["default", "dev", "docs", "lint", "test"] files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [[package]] @@ -1759,7 +1806,7 @@ files = [ [[package]] name = "pyright" -version = "1.1.355" +version = "1.1.366" requires_python = ">=3.7" summary = "Command line wrapper for pyright" groups = ["dev", "typecheck"] @@ -1767,13 +1814,13 @@ dependencies = [ "nodeenv>=1.6.0", ] files = [ - {file = "pyright-1.1.355-py3-none-any.whl", hash = "sha256:bf30b6728fd68ae7d09c98292b67152858dd89738569836896df786e52b5fe48"}, - {file = "pyright-1.1.355.tar.gz", hash = "sha256:dca4104cd53d6484e6b1b50b7a239ad2d16d2ffd20030bcf3111b56f44c263bf"}, + {file = "pyright-1.1.366-py3-none-any.whl", hash = "sha256:c09e73ccc894976bcd6d6a5784aa84d724dbd9ceb7b873b39d475ca61c2de071"}, + {file = "pyright-1.1.366.tar.gz", hash = "sha256:10e4d60be411f6d960cd39b0b58bf2ff76f2c83b9aeb102ffa9d9fda2e1303cb"}, ] [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.2" requires_python = ">=3.8" summary = "pytest: simple powerful testing with Python" groups = ["dev", "test", "typecheck"] @@ -1782,18 +1829,18 @@ dependencies = [ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", "iniconfig", "packaging", - "pluggy<2.0,>=1.4", + "pluggy<2.0,>=1.5", "tomli>=1; python_version < \"3.11\"", ] files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, ] [[package]] name = "pytest-cov" -version = "4.1.0" -requires_python = ">=3.7" +version = "5.0.0" +requires_python = ">=3.8" summary = "Pytest plugin for measuring coverage." groups = ["dev", "test"] dependencies = [ @@ -1801,8 +1848,8 @@ dependencies = [ "pytest>=4.6", ] files = [ - {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, - {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, ] [[package]] @@ -1988,28 +2035,28 @@ files = [ [[package]] name = "ruff" -version = "0.3.3" +version = "0.4.8" requires_python = ">=3.7" summary = "An extremely fast Python linter and code formatter, written in Rust." groups = ["dev", "lint"] files = [ - {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:973a0e388b7bc2e9148c7f9be8b8c6ae7471b9be37e1cc732f8f44a6f6d7720d"}, - {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfa60d23269d6e2031129b053fdb4e5a7b0637fc6c9c0586737b962b2f834493"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eca7ff7a47043cf6ce5c7f45f603b09121a7cc047447744b029d1b719278eb5"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7d3f6762217c1da954de24b4a1a70515630d29f71e268ec5000afe81377642d"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b24c19e8598916d9c6f5a5437671f55ee93c212a2c4c569605dc3842b6820386"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5a6cbf216b69c7090f0fe4669501a27326c34e119068c1494f35aaf4cc683778"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352e95ead6964974b234e16ba8a66dad102ec7bf8ac064a23f95371d8b198aab"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d6ab88c81c4040a817aa432484e838aaddf8bfd7ca70e4e615482757acb64f8"}, - {file = "ruff-0.3.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79bca3a03a759cc773fca69e0bdeac8abd1c13c31b798d5bb3c9da4a03144a9f"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2700a804d5336bcffe063fd789ca2c7b02b552d2e323a336700abb8ae9e6a3f8"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd66469f1a18fdb9d32e22b79f486223052ddf057dc56dea0caaf1a47bdfaf4e"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45817af234605525cdf6317005923bf532514e1ea3d9270acf61ca2440691376"}, - {file = "ruff-0.3.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0da458989ce0159555ef224d5b7c24d3d2e4bf4c300b85467b08c3261c6bc6a8"}, - {file = "ruff-0.3.3-py3-none-win32.whl", hash = "sha256:f2831ec6a580a97f1ea82ea1eda0401c3cdf512cf2045fa3c85e8ef109e87de0"}, - {file = "ruff-0.3.3-py3-none-win_amd64.whl", hash = "sha256:be90bcae57c24d9f9d023b12d627e958eb55f595428bafcb7fec0791ad25ddfc"}, - {file = "ruff-0.3.3-py3-none-win_arm64.whl", hash = "sha256:0171aab5fecdc54383993389710a3d1227f2da124d76a2784a7098e818f92d61"}, - {file = "ruff-0.3.3.tar.gz", hash = "sha256:38671be06f57a2f8aba957d9f701ea889aa5736be806f18c0cd03d6ff0cbca8d"}, + {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, + {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, + {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, + {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, + {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, + {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, + {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, + {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, ] [[package]] @@ -2097,13 +2144,13 @@ files = [ [[package]] name = "setuptools" -version = "69.2.0" +version = "70.0.0" requires_python = ">=3.8" summary = "Easily download, build, install, upgrade, and uninstall Python packages" groups = ["default", "dev", "docs", "typecheck"] files = [ - {file = "setuptools-69.2.0-py3-none-any.whl", hash = "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"}, - {file = "setuptools-69.2.0.tar.gz", hash = "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e"}, + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, ] [[package]] @@ -2145,9 +2192,22 @@ files = [ {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, ] +[[package]] +name = "tbb" +version = "2021.12.0" +summary = "IntelĀ® oneAPI Threading Building Blocks (oneTBB)" +groups = ["all", "default", "vision"] +marker = "platform_system == \"Windows\"" +files = [ + {file = "tbb-2021.12.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:f2cc9a7f8ababaa506cbff796ce97c3bf91062ba521e15054394f773375d81d8"}, + {file = "tbb-2021.12.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:a925e9a7c77d3a46ae31c34b0bb7f801c4118e857d137b68f68a8e458fcf2bd7"}, + {file = "tbb-2021.12.0-py3-none-win32.whl", hash = "sha256:b1725b30c174048edc8be70bd43bb95473f396ce895d91151a474d0fa9f450a8"}, + {file = "tbb-2021.12.0-py3-none-win_amd64.whl", hash = "sha256:fc2772d850229f2f3df85f1109c4844c495a2db7433d38200959ee9265b34789"}, +] + [[package]] name = "tensorboard" -version = "2.16.2" +version = "2.17.0" requires_python = ">=3.9" summary = "TensorBoard lets you watch Tensors Flow" groups = ["default"] @@ -2156,14 +2216,14 @@ dependencies = [ "grpcio>=1.48.2", "markdown>=2.6.8", "numpy>=1.12.0", - "protobuf!=4.24.0,>=3.19.6", + "protobuf!=4.24.0,<5.0.0,>=3.19.6", "setuptools>=41.0.0", "six>1.9", "tensorboard-data-server<0.8.0,>=0.7.0", "werkzeug>=1.0.1", ] files = [ - {file = "tensorboard-2.16.2-py3-none-any.whl", hash = "sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45"}, + {file = "tensorboard-2.17.0-py3-none-any.whl", hash = "sha256:859a499a9b1fb68a058858964486627100b71fcb21646861c61d31846a6478fb"}, ] [[package]] @@ -2180,7 +2240,7 @@ files = [ [[package]] name = "timm" -version = "0.9.16" +version = "1.0.3" requires_python = ">=3.8" summary = "PyTorch Image Models" groups = ["all", "vision"] @@ -2192,13 +2252,13 @@ dependencies = [ "torchvision", ] files = [ - {file = "timm-0.9.16-py3-none-any.whl", hash = "sha256:bf5704014476ab011589d3c14172ee4c901fd18f9110a928019cac5be2945914"}, - {file = "timm-0.9.16.tar.gz", hash = "sha256:891e54f375d55adf31a71ab0c117761f0e472f9f3971858ecdd1e7376b7071e6"}, + {file = "timm-1.0.3-py3-none-any.whl", hash = "sha256:d1ec86f7765aa79fbc7491508fa6e285d38a38f10bf4fe44ba2e9c70f91f0f5b"}, + {file = "timm-1.0.3.tar.gz", hash = "sha256:83920a7efe2cfd503b2a1257dc8808d6ff7dcd18a4b79f451c283e7d71497329"}, ] [[package]] name = "tokenizers" -version = "0.15.2" +version = "0.19.1" requires_python = ">=3.7" summary = "" groups = ["default"] @@ -2206,80 +2266,70 @@ dependencies = [ "huggingface-hub<1.0,>=0.16.4", ] files = [ - {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, - {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, - {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, - {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, - {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, - {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, - {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, - {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, - {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, + {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, + {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, + {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, + {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, + {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, + {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, + {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, ] [[package]] @@ -2307,7 +2357,7 @@ files = [ [[package]] name = "torch" -version = "2.2.1" +version = "2.3.0" requires_python = ">=3.8.0" summary = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" groups = ["all", "default", "vision"] @@ -2315,6 +2365,7 @@ dependencies = [ "filelock", "fsspec", "jinja2", + "mkl<=2021.4.0,>=2021.1.1; platform_system == \"Windows\"", "networkx", "nvidia-cublas-cu12==12.1.3.1; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cuda-cupti-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", @@ -2325,28 +2376,25 @@ dependencies = [ "nvidia-curand-cu12==10.3.2.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cusolver-cu12==11.4.5.107; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-cusparse-cu12==12.1.0.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-nccl-cu12==2.19.3; platform_system == \"Linux\" and platform_machine == \"x86_64\"", + "nvidia-nccl-cu12==2.20.5; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "nvidia-nvtx-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", "sympy", - "triton==2.2.0; platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"", + "triton==2.3.0; platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"", "typing-extensions>=4.8.0", ] files = [ - {file = "torch-2.2.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8d3bad336dd2c93c6bcb3268e8e9876185bda50ebde325ef211fb565c7d15273"}, - {file = "torch-2.2.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5297f13370fdaca05959134b26a06a7f232ae254bf2e11a50eddec62525c9006"}, - {file = "torch-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:5f5dee8433798888ca1415055f5e3faf28a3bad660e4c29e1014acd3275ab11a"}, - {file = "torch-2.2.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b6d78338acabf1fb2e88bf4559d837d30230cf9c3e4337261f4d83200df1fcbe"}, - {file = "torch-2.2.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:6ab3ea2e29d1aac962e905142bbe50943758f55292f1b4fdfb6f4792aae3323e"}, - {file = "torch-2.2.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:d86664ec85902967d902e78272e97d1aff1d331f7619d398d3ffab1c9b8e9157"}, - {file = "torch-2.2.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d6227060f268894f92c61af0a44c0d8212e19cb98d05c20141c73312d923bc0a"}, - {file = "torch-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:77e990af75fb1675490deb374d36e726f84732cd5677d16f19124934b2409ce9"}, - {file = "torch-2.2.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:46085e328d9b738c261f470231e987930f4cc9472d9ffb7087c7a1343826ac51"}, - {file = "torch-2.2.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:2d9e7e5ecbb002257cf98fae13003abbd620196c35f85c9e34c2adfb961321ec"}, - {file = "torch-2.2.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ada53aebede1c89570e56861b08d12ba4518a1f8b82d467c32665ec4d1f4b3c8"}, - {file = "torch-2.2.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be21d4c41ecebed9e99430dac87de1439a8c7882faf23bba7fea3fea7b906ac1"}, - {file = "torch-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:79848f46196750367dcdf1d2132b722180b9d889571e14d579ae82d2f50596c5"}, - {file = "torch-2.2.1-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:7ee804847be6be0032fbd2d1e6742fea2814c92bebccb177f0d3b8e92b2d2b18"}, - {file = "torch-2.2.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:84b2fb322ab091039fdfe74e17442ff046b258eb5e513a28093152c5b07325a7"}, + {file = "torch-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d8ea5a465dbfd8501f33c937d1f693176c9aef9d1c1b0ca1d44ed7b0a18c52ac"}, + {file = "torch-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c"}, + {file = "torch-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1bf023aa20902586f614f7682fedfa463e773e26c58820b74158a72470259459"}, + {file = "torch-2.3.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:758ef938de87a2653bba74b91f703458c15569f1562bf4b6c63c62d9c5a0c1f5"}, + {file = "torch-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:493d54ee2f9df100b5ce1d18c96dbb8d14908721f76351e908c9d2622773a788"}, + {file = "torch-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bce43af735c3da16cc14c7de2be7ad038e2fbf75654c2e274e575c6c05772ace"}, + {file = "torch-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:729804e97b7cf19ae9ab4181f91f5e612af07956f35c8b2c8e9d9f3596a8e877"}, + {file = "torch-2.3.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:d24e328226d8e2af7cf80fcb1d2f1d108e0de32777fab4aaa2b37b9765d8be73"}, + {file = "torch-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b0de2bdc0486ea7b14fc47ff805172df44e421a7318b7c4d92ef589a75d27410"}, + {file = "torch-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a306c87a3eead1ed47457822c01dfbd459fe2920f2d38cbdf90de18f23f72542"}, + {file = "torch-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9b98bf1a3c8af2d4c41f0bf1433920900896c446d1ddc128290ff146d1eb4bd"}, + {file = "torch-2.3.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:dca986214267b34065a79000cee54232e62b41dff1ec2cab9abc3fc8b3dee0ad"}, ] [[package]] @@ -2368,31 +2416,28 @@ files = [ [[package]] name = "torchvision" -version = "0.17.1" +version = "0.18.0" requires_python = ">=3.8" summary = "image and video datasets and models for torch deep learning" groups = ["all", "vision"] dependencies = [ "numpy", "pillow!=8.3.*,>=5.3.0", - "torch==2.2.1", + "torch==2.3.0", ] files = [ - {file = "torchvision-0.17.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:06418880212b66e45e855dd39f536e7fd48b4e6b034a11dd9fe9e2384afb51ec"}, - {file = "torchvision-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33d65d0c7fdcb3f7bc1dd8ed30ea3cd7e0587b4ad1b104b5677c8191a8bad9f1"}, - {file = "torchvision-0.17.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:aaefef2be6a02f206085ce4bb6c0078b03ebf48cb6ff82bd762ff6248475e08e"}, - {file = "torchvision-0.17.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:ebe5fdb466aff8a8e8e755de84a843418b6f8d500624752c05eaa638d7700f3d"}, - {file = "torchvision-0.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:9d4d45a996f4313e9c5db4da71d31508d44f7ccfbf29d3442bdcc2ad13e0b6f3"}, - {file = "torchvision-0.17.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:ea2ccdbf5974e0bf27fd6644a33b19cb0700297cf397bb0469e762c11c6c4105"}, - {file = "torchvision-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9106e32c9f1e70afa8172cf1b064cf9c2998d8dff0769ec69d537b20209ee43d"}, - {file = "torchvision-0.17.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5966936c669a08870f6547cd0a90d08b157aeda03293f79e2adbb934687175ed"}, - {file = "torchvision-0.17.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:e74f5a26ef8190eab0c38b3f63914fea94e58e3b2f0e5466611c9f63bd91a80b"}, - {file = "torchvision-0.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:a2109c1a1dcf71e8940d43e91f78c4dd5bf0fcefb3a0a42244102752009f5862"}, - {file = "torchvision-0.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5d241d2a5fb4e608677fccf6f80b34a124446d324ee40c7814ce54bce888275b"}, - {file = "torchvision-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e0fe98d9d92c23d2262ff82f973242951b9357fb640f8888ac50848bd00f5b45"}, - {file = "torchvision-0.17.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:32dc5de86d2ade399e11087095674ca08a1649fb322cfe69336d28add467edcb"}, - {file = "torchvision-0.17.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:54902877410ffb5458ee52b6d0de4b25cf01496bee736d6825301a5f0398536e"}, - {file = "torchvision-0.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:cc22c1ed0f1aba3f98fd72b6f60021f57aec1d2f6af518522e8a0a83848de3a8"}, + {file = "torchvision-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dd61628a3d189c6852a12dc5ed4cd2eece66d2d67f35a866cb16f1dcb06c8c62"}, + {file = "torchvision-0.18.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:493c45f9937dad37aa1b64b14da17c7a589c72b91adc4837d431009cfe29bd53"}, + {file = "torchvision-0.18.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5337f6acfa1fe959d5cb340d01a00614d6b31ce7a4824ccb95435a85c5273b95"}, + {file = "torchvision-0.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd8e6f3b5beb49965f15c461302488edfa3d8c2d01d3bb79b150d6fb62711e3a"}, + {file = "torchvision-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6896a52168befe1105fb3c9335287390ed227e71d1e4ec4d68b62e8a3099fc09"}, + {file = "torchvision-0.18.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:3d7955398d4ceaad77c487c2c44f6f7813112402c9bab8cd906d346005891048"}, + {file = "torchvision-0.18.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:e5a24d620cea14a4bb89f24aa2b506230c0a16a3ada57fc53ad80cfd256a2128"}, + {file = "torchvision-0.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:6ad70ddfa879bda5ed886b2518fe562640e0059787cbd65cb2bffa7674541410"}, + {file = "torchvision-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:eb9d83c0e1dbb54ecb0fb04c87f786333e3a6fb8b9c400aca7c31081f9aa5707"}, + {file = "torchvision-0.18.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b657d052d146f24cb3b2a78219bfc82ae70a9706671c50f632528907d10cccec"}, + {file = "torchvision-0.18.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a964afbc7ddf50a46b941477f6c35729b416deedd139756befd488245e2e226d"}, + {file = "torchvision-0.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:7c770f0f748e0b17f57c0297508d7254f686cdf03fc2e2949f422b20574f4c0f"}, ] [[package]] @@ -2411,30 +2456,30 @@ files = [ [[package]] name = "transformers" -version = "4.39.0" +version = "4.41.2" requires_python = ">=3.8.0" summary = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" groups = ["default"] dependencies = [ "filelock", - "huggingface-hub<1.0,>=0.19.3", + "huggingface-hub<1.0,>=0.23.0", "numpy>=1.17", "packaging>=20.0", "pyyaml>=5.1", "regex!=2019.12.17", "requests", "safetensors>=0.4.1", - "tokenizers<0.19,>=0.14", + "tokenizers<0.20,>=0.19", "tqdm>=4.27", ] files = [ - {file = "transformers-4.39.0-py3-none-any.whl", hash = "sha256:7801785b1f016d667467e8c372c1c3653c18fe32ba97952059e3bea79ba22b08"}, - {file = "transformers-4.39.0.tar.gz", hash = "sha256:517a13cd633b10bea01c92ab0b3059762872c7c29da3d223db9d28e926fe330d"}, + {file = "transformers-4.41.2-py3-none-any.whl", hash = "sha256:05555d20e43f808de1ef211ab64803cdb513170cef70d29a888b589caebefc67"}, + {file = "transformers-4.41.2.tar.gz", hash = "sha256:80a4db216533d573e9cc7388646c31ed9480918feb7c55eb211249cb23567f87"}, ] [[package]] name = "triton" -version = "2.2.0" +version = "2.3.0" summary = "A language and compiler for custom Deep Learning operations" groups = ["all", "default", "vision"] marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"" @@ -2442,9 +2487,9 @@ dependencies = [ "filelock", ] files = [ - {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, - {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, - {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ce4b8ff70c48e47274c66f269cce8861cf1dc347ceeb7a67414ca151b1822d8"}, + {file = "triton-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3d9607f85103afdb279938fc1dd2a66e4f5999a58eb48a346bd42738f986dd"}, + {file = "triton-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:218d742e67480d9581bafb73ed598416cc8a56f6316152e5562ee65e33de01c0"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index d60cf684..97c2dfb3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "pdm.backend" [project] name = "kaiko-eva" -version = "0.0.1" +version = "0.0.2" description = "Evaluation Framework for oncology foundation models." keywords = [ "machine-learning", @@ -34,8 +34,9 @@ maintainers = [ ] requires-python = ">=3.10" dependencies = [ + "torch==2.3.0", "lightning>=2.2.2", - "jsonargparse[omegaconf]>=4.27.4", + "jsonargparse[omegaconf]==4.28", "tensorboard>=2.16.2", "loguru>=0.7.2", "pandas>=2.2.0", diff --git a/src/eva/core/callbacks/__init__.py b/src/eva/core/callbacks/__init__.py index dc14697c..3b36a6db 100644 --- a/src/eva/core/callbacks/__init__.py +++ b/src/eva/core/callbacks/__init__.py @@ -1,5 +1,6 @@ """Callbacks API.""" -from eva.core.callbacks.writers import EmbeddingsWriter +from eva.core.callbacks.config import ConfigurationLogger +from eva.core.callbacks.writers import ClassificationEmbeddingsWriter -__all__ = ["EmbeddingsWriter"] +__all__ = ["ConfigurationLogger", "ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/config.py b/src/eva/core/callbacks/config.py new file mode 100644 index 00000000..79cae704 --- /dev/null +++ b/src/eva/core/callbacks/config.py @@ -0,0 +1,143 @@ +"""Configuration logger callback.""" + +import ast +import os +import sys +from types import BuiltinFunctionType +from typing import Any, Dict, List + +import lightning.pytorch as pl +import yaml +from lightning_fabric.utilities import cloud_io +from loguru import logger as cli_logger +from omegaconf import OmegaConf +from typing_extensions import TypeGuard, override + +from eva.core import loggers + + +class ConfigurationLogger(pl.Callback): + """Logs the submitted configuration to the experimental logger.""" + + _save_as: str = "config.yaml" + + def __init__(self, verbose: bool = True) -> None: + """Initializes the callback. + + Args: + verbose: Whether to print the configurations to print the + configuration to the terminal. + """ + super().__init__() + + self._verbose = verbose + + @override + def setup( + self, + trainer: pl.Trainer, + pl_module: pl.LightningModule, + stage: str | None = None, + ) -> None: + log_dir = trainer.log_dir + if not _logdir_exists(log_dir): + return + + configuration = _load_submitted_config() + + if self._verbose: + config_as_text = yaml.dump(configuration, sort_keys=False) + print(f"Configuration:\033[94m\n---\n{config_as_text}\033[0m") + + save_as = os.path.join(log_dir, self._save_as) + fs = cloud_io.get_filesystem(log_dir) + with fs.open(save_as, "w") as output_file: + yaml.dump(configuration, output_file, sort_keys=False) + + loggers.log_parameters(trainer.loggers, tag="configuration", parameters=configuration) + + +def _logdir_exists(logdir: str | None, verbose: bool = True) -> TypeGuard[str]: + """Checks if the trainer has a log directory. + + Args: + logdir: Trainer's logdir. + name: The name to log with. + verbose: Whether to log if it does not exist. + + Returns: + A bool indicating if the log directory exists or not. + """ + exists = isinstance(logdir, str) + if not exists and verbose: + print("\n") + cli_logger.warning("Log directory is `None`. Configuration file will not be logged.\n") + return exists + + +def _load_submitted_config() -> Dict[str, Any]: + """Retrieves and loads the submitted configuration. + + Returns: + The path to the configuration file. + """ + config_paths = _fetch_submitted_config_path() + return _load_yaml_files(config_paths) + + +def _fetch_submitted_config_path() -> List[str]: + """Fetches the config path from command line arguments. + + Returns: + The path to the configuration file. + """ + return list(filter(lambda f: f.endswith(".yaml"), sys.argv)) + + +def _load_yaml_files(paths: List[str]) -> Dict[str, Any]: + """Loads yaml files and merge them from multiple paths. + + Args: + paths: The paths to the yaml files. + + Returns: + The merged configurations as a dictionary. + """ + merged_config = {} + for config_path in paths: + fs = cloud_io.get_filesystem(config_path) + with fs.open(config_path, "r") as file: + omegaconf_file = OmegaConf.load(file) # type: ignore + config_dict = OmegaConf.to_object(omegaconf_file) # type: ignore + parsed_config = _type_resolver(config_dict) # type: ignore + merged_config.update(parsed_config) + return merged_config + + +def _type_resolver(mapping: Dict[str, Any]) -> Dict[str, Any]: + """Parses the string values of a dictionary in-place. + + Args: + mapping: A dictionary object. + + Returns: + The mapping with the formatted values. + """ + for key, value in mapping.items(): + if isinstance(value, dict): + formatted_value = _type_resolver(value) + elif isinstance(value, list) and isinstance(value[0], dict): + formatted_value = [_type_resolver(subvalue) for subvalue in value] + else: + try: + parsed_value = ast.literal_eval(value) # type: ignore + formatted_value = ( + value if isinstance(parsed_value, BuiltinFunctionType) else parsed_value + ) + + except Exception: + formatted_value = value + + mapping[key] = formatted_value + + return mapping diff --git a/src/eva/core/callbacks/writers/__init__.py b/src/eva/core/callbacks/writers/__init__.py index a731f06f..8d907e66 100644 --- a/src/eva/core/callbacks/writers/__init__.py +++ b/src/eva/core/callbacks/writers/__init__.py @@ -1,5 +1,5 @@ """Callbacks API.""" -from eva.core.callbacks.writers.embeddings import EmbeddingsWriter +from eva.core.callbacks.writers.embeddings import ClassificationEmbeddingsWriter -__all__ = ["EmbeddingsWriter"] +__all__ = ["ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py deleted file mode 100644 index 4c8d4520..00000000 --- a/src/eva/core/callbacks/writers/embeddings.py +++ /dev/null @@ -1,269 +0,0 @@ -"""Embeddings writer.""" - -import csv -import io -import os -from typing import Any, Dict, List, Sequence, Tuple - -import lightning.pytorch as pl -import torch -from lightning.pytorch import callbacks -from loguru import logger -from torch import multiprocessing, nn -from typing_extensions import override - -from eva.core.callbacks.writers.typings import ITEM_DICT_ENTRY, QUEUE_ITEM -from eva.core.models.modules.typings import INPUT_BATCH -from eva.core.utils import multiprocessing as eva_multiprocessing - - -class EmbeddingsWriter(callbacks.BasePredictionWriter): - """Callback for writing generated embeddings to disk.""" - - def __init__( - self, - output_dir: str, - backbone: nn.Module | None = None, - dataloader_idx_map: Dict[int, str] | None = None, - metadata_keys: List[str] | None = None, - overwrite: bool = True, - save_every_n: int = 100, - ) -> None: - """Initializes a new EmbeddingsWriter instance. - - This callback writes the embedding files in a separate process to avoid blocking the - main process where the model forward pass is executed. - - Args: - output_dir: The directory where the embeddings will be saved. - backbone: A model to be used as feature extractor. If `None`, - it will be expected that the input batch returns the features directly. - dataloader_idx_map: A dictionary mapping dataloader indices to their respective - names (e.g. train, val, test). - metadata_keys: An optional list of keys to extract from the batch metadata and store - as additional columns in the manifest file. - overwrite: Whether to overwrite the output directory. Defaults to True. - save_every_n: Interval for number of iterations to save the embeddings to disk. - During this interval, the embeddings are accumulated in memory. - """ - super().__init__(write_interval="batch") - - self._output_dir = output_dir - self._backbone = backbone - self._dataloader_idx_map = dataloader_idx_map or {} - self._overwrite = overwrite - self._save_every_n = save_every_n - self._metadata_keys = metadata_keys or [] - - self._write_queue: multiprocessing.Queue - self._write_process: eva_multiprocessing.Process - - @override - def on_predict_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: - os.makedirs(self._output_dir, exist_ok=self._overwrite) - self._initialize_write_process() - self._write_process.start() - - if self._backbone is not None: - self._backbone = self._backbone.to(pl_module.device) - self._backbone.eval() - - @override - def write_on_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - prediction: Any, - batch_indices: Sequence[int], - batch: INPUT_BATCH, - batch_idx: int, - dataloader_idx: int, - ) -> None: - dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore - _, targets, metadata = INPUT_BATCH(*batch) - split = self._dataloader_idx_map.get(dataloader_idx) - - embeddings = self._get_embeddings(prediction) - for local_idx, global_idx in enumerate(batch_indices[: len(embeddings)]): - input_name = dataset.filename(global_idx) - save_name = os.path.splitext(input_name)[0] + ".pt" - embeddings_buffer, target_buffer = io.BytesIO(), io.BytesIO() - torch.save(embeddings[local_idx].clone(), embeddings_buffer) - torch.save(targets[local_idx], target_buffer) # type: ignore - item_metadata = self._get_item_metadata(metadata, local_idx) - item = QUEUE_ITEM( - embeddings_buffer, target_buffer, input_name, save_name, split, item_metadata - ) - self._write_queue.put(item) - - self._write_process.check_exceptions() - - @override - def on_predict_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: - self._write_queue.put(None) - self._write_process.join() - logger.info(f"Predictions and manifest saved to {self._output_dir}") - - def _initialize_write_process(self) -> None: - self._write_queue = multiprocessing.Queue() - self._write_process = eva_multiprocessing.Process( - target=_process_write_queue, - args=( - self._write_queue, - self._output_dir, - self._metadata_keys, - self._save_every_n, - self._overwrite, - ), - ) - - def _get_embeddings(self, prediction: torch.Tensor) -> torch.Tensor: - """Returns the embeddings from predictions.""" - if self._backbone is None: - return prediction - - with torch.no_grad(): - return self._backbone(prediction) - - def _get_item_metadata( - self, metadata: Dict[str, Any] | None, local_idx: int - ) -> Dict[str, Any] | None: - """Returns the metadata for the item at the given local index.""" - if not metadata: - if self._metadata_keys: - raise ValueError("Metadata keys are provided but the batch metadata is empty.") - else: - return None - - item_metadata = {} - for key in self._metadata_keys: - if key not in metadata: - raise KeyError(f"Metadata key '{key}' not found in the batch metadata.") - item_metadata[key] = metadata[key][local_idx] - - return item_metadata - - -def _process_write_queue( - write_queue: multiprocessing.Queue, - output_dir: str, - metadata_keys: List[str], - save_every_n: int, - overwrite: bool = False, -) -> None: - """This function receives and processes items added by the main process to the queue.""" - manifest_file, manifest_writer = _init_manifest(output_dir, metadata_keys, overwrite) - - name_to_items: Dict[str, ITEM_DICT_ENTRY] = {} - - counter = 0 - while True: - item = write_queue.get() - if item is None: - break - - item = QUEUE_ITEM(*item) - - if item.save_name in name_to_items: - name_to_items[item.save_name].items.append(item) - else: - name_to_items[item.save_name] = ITEM_DICT_ENTRY(items=[item], save_count=0) - - if counter > 0 and counter % save_every_n == 0: - name_to_items = _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) - - counter += 1 - - if len(name_to_items) > 0: - _save_items(name_to_items, metadata_keys, output_dir, manifest_writer) - - manifest_file.close() - - -def _save_items( - name_to_items: Dict[str, ITEM_DICT_ENTRY], - metadata_keys: List[str], - output_dir: str, - manifest_writer: Any, -) -> Dict[str, ITEM_DICT_ENTRY]: - """Saves predictions to disk and updates the manifest file. - - If multiple items share the same filename, the predictions are concatenated and saved - to the same file. Furthermore, the manifest file will only contain one entry for each - filename, which is why this function checks if it's the first time saving to a file. - - Args: - name_to_items: A dictionary mapping save names to the corresponding queue items - holding the prediction tensors and the information for the manifest file. - metadata_keys: A list of keys to extract from the batch metadata. These will be - stored as additional columns in the manifest file. - output_dir: The directory where the embedding tensors & manifest will be saved. - manifest_writer: The CSV writer for the writing to the manifest file. - """ - for save_name, entry in name_to_items.items(): - if len(entry.items) > 0: - save_path = os.path.join(output_dir, save_name) - is_first_save = entry.save_count == 0 - if is_first_save: - _, target, input_name, _, split, metadata = QUEUE_ITEM(*entry.items[0]) - metadata = [metadata[key] for key in metadata_keys] # type: ignore - _update_manifest(target, input_name, save_name, split, metadata, manifest_writer) - prediction_buffers = [item.prediction_buffer for item in entry.items] - _save_predictions(prediction_buffers, save_path, is_first_save) - name_to_items[save_name].save_count += 1 - name_to_items[save_name].items = [] - - return name_to_items - - -def _save_predictions( - prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool -) -> None: - """Saves the embedding tensors as list to .pt files. - - If it's not the first save to this save_path, the new predictions are concatenated - with the existing ones and saved to the same file. - - Example Usecase: Save all patch embeddings corresponding to the same WSI to a single file. - """ - predictions = [ - torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") - for buffer in prediction_buffers - ] - - if not is_first_save: - previous_predictions = torch.load(save_path, map_location="cpu") - if not isinstance(previous_predictions, list): - raise ValueError("Previous predictions should be a list of tensors.") - predictions = predictions + previous_predictions - - os.makedirs(os.path.dirname(save_path), exist_ok=True) - torch.save(predictions, save_path) - - -def _init_manifest( - output_dir: str, metadata_keys: List[str] | None, overwrite: bool = False -) -> Tuple[io.TextIOWrapper, Any]: - manifest_path = os.path.join(output_dir, "manifest.csv") - if os.path.exists(manifest_path) and not overwrite: - raise FileExistsError( - f"Manifest file already exists at {manifest_path}. This likely means that the " - "embeddings have been computed before. Consider using `eva fit` instead " - "of `eva predict_fit` or `eva predict`." - ) - manifest_file = open(manifest_path, "w", newline="") - manifest_writer = csv.writer(manifest_file) - manifest_writer.writerow(["origin", "embeddings", "target", "split"] + (metadata_keys or [])) - return manifest_file, manifest_writer - - -def _update_manifest( - target_buffer: io.BytesIO, - input_name: str, - save_name: str, - split: str | None, - metadata: List[str], - manifest_writer, -) -> None: - target = torch.load(io.BytesIO(target_buffer.getbuffer()), map_location="cpu") - manifest_writer.writerow([input_name, save_name, target.item(), split] + metadata) diff --git a/src/eva/core/callbacks/writers/embeddings/__init__.py b/src/eva/core/callbacks/writers/embeddings/__init__.py new file mode 100644 index 00000000..63cf7099 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/__init__.py @@ -0,0 +1,5 @@ +"""Embedding callback writers.""" + +from eva.core.callbacks.writers.embeddings.classification import ClassificationEmbeddingsWriter + +__all__ = ["ClassificationEmbeddingsWriter"] diff --git a/src/eva/core/callbacks/writers/embeddings/_manifest.py b/src/eva/core/callbacks/writers/embeddings/_manifest.py new file mode 100644 index 00000000..3b1a49f6 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/_manifest.py @@ -0,0 +1,68 @@ +"""Manifest file manager.""" + +import csv +import io +import os +from typing import Any, Dict, List + +import _csv + + +class ManifestManager: + """Class for writing the embedding manifest files.""" + + def __init__( + self, + output_dir: str, + metadata_keys: List[str] | None = None, + overwrite: bool = False, + ) -> None: + """Initializes the writing manager. + + Args: + output_dir: The directory where the embeddings will be saved. + metadata_keys: An optional list of keys to extract from the batch + metadata and store as additional columns in the manifest file. + overwrite: Whether to overwrite the output directory. + """ + self._output_dir = output_dir + self._metadata_keys = metadata_keys or [] + self._overwrite = overwrite + + self._manifest_file: io.TextIOWrapper + self._manifest_writer: _csv.Writer # type: ignore + + self._setup() + + def _setup(self) -> None: + """Initializes the manifest file and sets the file object and writer.""" + manifest_path = os.path.join(self._output_dir, "manifest.csv") + if os.path.exists(manifest_path) and not self._overwrite: + raise FileExistsError( + f"Manifest file already exists at {manifest_path}. This likely means that the " + "embeddings have been computed before. Consider using `eva fit` instead " + "of `eva predict_fit` or `eva predict`." + ) + + self._manifest_file = open(manifest_path, "w", newline="") + self._manifest_writer = csv.writer(self._manifest_file) + self._manifest_writer.writerow( + ["origin", "embeddings", "target", "split"] + self._metadata_keys + ) + + def update( + self, + input_name: str, + save_name: str, + target: str, + split: str | None, + metadata: Dict[str, Any] | None = None, + ) -> None: + """Adds a new entry to the manifest file.""" + metadata_entries = list(metadata.values()) if metadata else [] + self._manifest_writer.writerow([input_name, save_name, target, split] + metadata_entries) + + def close(self) -> None: + """Closes the manifest file.""" + if self._manifest_file: + self._manifest_file.close() diff --git a/src/eva/core/callbacks/writers/embeddings/base.py b/src/eva/core/callbacks/writers/embeddings/base.py new file mode 100644 index 00000000..92c12bcc --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/base.py @@ -0,0 +1,172 @@ +"""Embeddings writer base class.""" + +import abc +import io +import os +from typing import Any, Dict, List, Sequence + +import lightning.pytorch as pl +import torch +from lightning.pytorch import callbacks +from loguru import logger +from torch import multiprocessing, nn +from typing_extensions import override + +from eva.core.callbacks.writers.embeddings.typings import QUEUE_ITEM +from eva.core.models.modules.typings import INPUT_BATCH +from eva.core.utils import multiprocessing as eva_multiprocessing + + +class EmbeddingsWriter(callbacks.BasePredictionWriter, abc.ABC): + """Callback for writing generated embeddings to disk.""" + + def __init__( + self, + output_dir: str, + backbone: nn.Module | None = None, + dataloader_idx_map: Dict[int, str] | None = None, + metadata_keys: List[str] | None = None, + overwrite: bool = True, + save_every_n: int = 100, + ) -> None: + """Initializes a new EmbeddingsWriter instance. + + This callback writes the embedding files in a separate process to avoid blocking the + main process where the model forward pass is executed. + + Args: + output_dir: The directory where the embeddings will be saved. + backbone: A model to be used as feature extractor. If `None`, + it will be expected that the input batch returns the features directly. + dataloader_idx_map: A dictionary mapping dataloader indices to their respective + names (e.g. train, val, test). + metadata_keys: An optional list of keys to extract from the batch metadata and store + as additional columns in the manifest file. + overwrite: Whether to overwrite the output directory. + save_every_n: Interval for number of iterations to save the embeddings to disk. + During this interval, the embeddings are accumulated in memory. + """ + super().__init__(write_interval="batch") + + self._output_dir = output_dir + self._backbone = backbone + self._dataloader_idx_map = dataloader_idx_map or {} + self._overwrite = overwrite + self._save_every_n = save_every_n + self._metadata_keys = metadata_keys or [] + + self._write_queue: multiprocessing.Queue + self._write_process: eva_multiprocessing.Process + + @staticmethod + @abc.abstractmethod + def _process_write_queue( + write_queue: multiprocessing.Queue, + output_dir: str, + metadata_keys: List[str], + save_every_n: int, + overwrite: bool = False, + ) -> None: + """This function receives and processes items added by the main process to the queue. + + Queue items contain the embedding tensors, targets and metadata which need to be + saved to disk (.pt files and manifest). + """ + + @override + def on_predict_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: + os.makedirs(self._output_dir, exist_ok=self._overwrite) + self._initialize_write_process() + self._write_process.start() + + if self._backbone is not None: + self._backbone = self._backbone.to(pl_module.device) + self._backbone.eval() + + @override + def write_on_batch_end( + self, + trainer: pl.Trainer, + pl_module: pl.LightningModule, + prediction: Any, + batch_indices: Sequence[int], + batch: INPUT_BATCH, + batch_idx: int, + dataloader_idx: int, + ) -> None: + dataset = trainer.predict_dataloaders[dataloader_idx].dataset # type: ignore + _, targets, metadata = INPUT_BATCH(*batch) + split = self._dataloader_idx_map.get(dataloader_idx) + if not isinstance(targets, torch.Tensor): + raise ValueError(f"Targets ({type(targets)}) should be `torch.Tensor`.") + + embeddings = self._get_embeddings(prediction) + + for local_idx, global_idx in enumerate(batch_indices[: len(embeddings)]): + data_name = dataset.filename(global_idx) + save_name = os.path.splitext(data_name)[0] + ".pt" + embeddings_buffer, target_buffer = _as_io_buffers( + embeddings[local_idx], targets[local_idx] + ) + item_metadata = self._get_item_metadata(metadata, local_idx) + item = QUEUE_ITEM( + prediction_buffer=embeddings_buffer, + target_buffer=target_buffer, + data_name=data_name, + save_name=save_name, + split=split, + metadata=item_metadata, + ) + self._write_queue.put(item) + + self._write_process.check_exceptions() + + @override + def on_predict_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: + self._write_queue.put(None) + self._write_process.join() + logger.info(f"Predictions and manifest saved to {self._output_dir}") + + def _initialize_write_process(self) -> None: + self._write_queue = multiprocessing.Queue() + self._write_process = eva_multiprocessing.Process( + target=self._process_write_queue, + args=( + self._write_queue, + self._output_dir, + self._metadata_keys, + self._save_every_n, + self._overwrite, + ), + ) + + @torch.no_grad() + def _get_embeddings(self, tensor: torch.Tensor) -> torch.Tensor: + """Returns the embeddings from predictions.""" + return self._backbone(tensor) if self._backbone else tensor + + def _get_item_metadata( + self, metadata: Dict[str, Any] | None, local_idx: int + ) -> Dict[str, Any] | None: + """Returns the metadata for the item at the given local index.""" + if not metadata: + if self._metadata_keys: + raise ValueError("Metadata keys are provided but the batch metadata is empty.") + else: + return None + + item_metadata = {} + for key in self._metadata_keys: + if key not in metadata: + raise KeyError(f"Metadata key '{key}' not found in the batch metadata.") + item_metadata[key] = metadata[key][local_idx] + + return item_metadata + + +def _as_io_buffers(*items: torch.Tensor) -> Sequence[io.BytesIO]: + """Loads torch tensors as io buffers.""" + buffers = [io.BytesIO() for _ in range(len(items))] + for tensor, buffer in zip(items, buffers, strict=False): + torch.save(tensor.clone(), buffer) + return buffers diff --git a/src/eva/core/callbacks/writers/embeddings/classification.py b/src/eva/core/callbacks/writers/embeddings/classification.py new file mode 100644 index 00000000..1a3b3cb7 --- /dev/null +++ b/src/eva/core/callbacks/writers/embeddings/classification.py @@ -0,0 +1,112 @@ +"""Embeddings writer for classification.""" + +import io +import os +from typing import Dict, List + +import torch +from torch import multiprocessing +from typing_extensions import override + +from eva.core.callbacks.writers.embeddings import base +from eva.core.callbacks.writers.embeddings._manifest import ManifestManager +from eva.core.callbacks.writers.embeddings.typings import ITEM_DICT_ENTRY, QUEUE_ITEM + + +class ClassificationEmbeddingsWriter(base.EmbeddingsWriter): + """Callback for writing generated embeddings to disk for classification tasks.""" + + @staticmethod + @override + def _process_write_queue( + write_queue: multiprocessing.Queue, + output_dir: str, + metadata_keys: List[str], + save_every_n: int, + overwrite: bool = False, + ) -> None: + """Processes the write queue and saves the predictions to disk. + + Note that in Multi Instance Learning (MIL) scenarios, we can have multiple + embeddings per input data point. In that case, this function will save all + embeddings that correspond to the same data point as a list of tensors to + the same .pt file. + """ + manifest_manager = ManifestManager(output_dir, metadata_keys, overwrite) + name_to_items: Dict[str, ITEM_DICT_ENTRY] = {} + + counter = 0 + while True: + item = write_queue.get() + if item is None: + break + item = QUEUE_ITEM(*item) + + if item.save_name in name_to_items: + name_to_items[item.save_name].items.append(item) + else: + name_to_items[item.save_name] = ITEM_DICT_ENTRY(items=[item], save_count=0) + + if counter > 0 and counter % save_every_n == 0: + name_to_items = _save_items(name_to_items, output_dir, manifest_manager) + counter += 1 + + if len(name_to_items) > 0: + _save_items(name_to_items, output_dir, manifest_manager) + + manifest_manager.close() + + +def _save_items( + name_to_items: Dict[str, ITEM_DICT_ENTRY], + output_dir: str, + manifest_manager: ManifestManager, +) -> Dict[str, ITEM_DICT_ENTRY]: + """Saves predictions to disk and updates the manifest file. + + Args: + name_to_items: A dictionary mapping save data point names to the corresponding queue items + holding the prediction tensors and the information for the manifest file. + output_dir: The directory where the embedding tensors & manifest will be saved. + manifest_manager: The manifest manager instance to update the manifest file. + """ + for save_name, entry in name_to_items.items(): + if len(entry.items) > 0: + save_path = os.path.join(output_dir, save_name) + is_first_save = entry.save_count == 0 + if is_first_save: + _, target, input_name, _, split, metadata = QUEUE_ITEM(*entry.items[0]) + target = torch.load(io.BytesIO(target.getbuffer()), map_location="cpu").item() + manifest_manager.update(input_name, save_name, target, split, metadata) + + prediction_buffers = [item.prediction_buffer for item in entry.items] + _save_predictions(prediction_buffers, save_path, is_first_save) + name_to_items[save_name].save_count += 1 + name_to_items[save_name].items = [] + + return name_to_items + + +def _save_predictions( + prediction_buffers: List[io.BytesIO], save_path: str, is_first_save: bool +) -> None: + """Saves the embedding tensors as list to .pt files. + + If it's not the first save to this save_path, the new predictions are appended to + the existing ones and saved to the same file. + + Example use-case: Save all patch embeddings corresponding to the same WSI to a single file. + """ + predictions = [ + torch.load(io.BytesIO(buffer.getbuffer()), map_location="cpu") + for buffer in prediction_buffers + ] + + if not is_first_save: + previous_predictions = torch.load(save_path, map_location="cpu") + if not isinstance(previous_predictions, list): + raise ValueError("Previous predictions should be a list of tensors.") + predictions = predictions + previous_predictions + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + torch.save(predictions, save_path) diff --git a/src/eva/core/callbacks/writers/typings.py b/src/eva/core/callbacks/writers/embeddings/typings.py similarity index 89% rename from src/eva/core/callbacks/writers/typings.py rename to src/eva/core/callbacks/writers/embeddings/typings.py index 9e061de8..377a57e4 100644 --- a/src/eva/core/callbacks/writers/typings.py +++ b/src/eva/core/callbacks/writers/embeddings/typings.py @@ -14,8 +14,8 @@ class QUEUE_ITEM(NamedTuple): target_buffer: io.BytesIO """IO buffer containing the target tensor.""" - input_name: str - """Name of the original input file that was used to generate the embedding.""" + data_name: str + """Name of the input data that was used to generate the embedding.""" save_name: str """Name to store the generated embedding.""" diff --git a/src/eva/core/data/datasets/__init__.py b/src/eva/core/data/datasets/__init__.py index b9826b5b..ba4da0cf 100644 --- a/src/eva/core/data/datasets/__init__.py +++ b/src/eva/core/data/datasets/__init__.py @@ -1,11 +1,11 @@ """Datasets API.""" from eva.core.data.datasets.base import Dataset -from eva.core.data.datasets.dataset import TorchDataset -from eva.core.data.datasets.embeddings import ( +from eva.core.data.datasets.classification import ( EmbeddingsClassificationDataset, MultiEmbeddingsClassificationDataset, ) +from eva.core.data.datasets.dataset import TorchDataset __all__ = [ "Dataset", diff --git a/src/eva/core/data/datasets/classification/__init__.py b/src/eva/core/data/datasets/classification/__init__.py new file mode 100644 index 00000000..442bbe19 --- /dev/null +++ b/src/eva/core/data/datasets/classification/__init__.py @@ -0,0 +1,8 @@ +"""Embedding cllassification datasets API.""" + +from eva.core.data.datasets.classification.embeddings import EmbeddingsClassificationDataset +from eva.core.data.datasets.classification.multi_embeddings import ( + MultiEmbeddingsClassificationDataset, +) + +__all__ = ["EmbeddingsClassificationDataset", "MultiEmbeddingsClassificationDataset"] diff --git a/src/eva/core/data/datasets/embeddings/classification/embeddings.py b/src/eva/core/data/datasets/classification/embeddings.py similarity index 87% rename from src/eva/core/data/datasets/embeddings/classification/embeddings.py rename to src/eva/core/data/datasets/classification/embeddings.py index 8904394d..7b4cce07 100644 --- a/src/eva/core/data/datasets/embeddings/classification/embeddings.py +++ b/src/eva/core/data/datasets/classification/embeddings.py @@ -3,14 +3,13 @@ import os from typing import Callable, Dict, Literal -import numpy as np import torch from typing_extensions import override -from eva.core.data.datasets.embeddings import base +from eva.core.data.datasets import embeddings as embeddings_base -class EmbeddingsClassificationDataset(base.EmbeddingsDataset): +class EmbeddingsClassificationDataset(embeddings_base.EmbeddingsDataset[torch.Tensor]): """Embeddings dataset class for classification tasks.""" def __init__( @@ -18,7 +17,7 @@ def __init__( root: str, manifest_file: str, split: Literal["train", "val", "test"] | None = None, - column_mapping: Dict[str, str] = base.default_column_mapping, + column_mapping: Dict[str, str] = embeddings_base.default_column_mapping, embeddings_transforms: Callable | None = None, target_transforms: Callable | None = None, ) -> None: @@ -63,9 +62,9 @@ def _load_embeddings(self, index: int) -> torch.Tensor: return tensor.squeeze(0) @override - def _load_target(self, index: int) -> np.ndarray: + def _load_target(self, index: int) -> torch.Tensor: target = self._data.at[index, self._column_mapping["target"]] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.int64) @override def __len__(self) -> int: diff --git a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py b/src/eva/core/data/datasets/classification/multi_embeddings.py similarity index 94% rename from src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py rename to src/eva/core/data/datasets/classification/multi_embeddings.py index 4cb031da..130c17b7 100644 --- a/src/eva/core/data/datasets/embeddings/classification/multi_embeddings.py +++ b/src/eva/core/data/datasets/classification/multi_embeddings.py @@ -7,10 +7,10 @@ import torch from typing_extensions import override -from eva.core.data.datasets.embeddings import base +from eva.core.data.datasets import embeddings as embeddings_base -class MultiEmbeddingsClassificationDataset(base.EmbeddingsDataset): +class MultiEmbeddingsClassificationDataset(embeddings_base.EmbeddingsDataset[torch.Tensor]): """Dataset class for where a sample corresponds to multiple embeddings. Example use case: Slide level dataset where each slide has multiple patch embeddings. @@ -21,7 +21,7 @@ def __init__( root: str, manifest_file: str, split: Literal["train", "val", "test"], - column_mapping: Dict[str, str] = base.default_column_mapping, + column_mapping: Dict[str, str] = embeddings_base.default_column_mapping, embeddings_transforms: Callable | None = None, target_transforms: Callable | None = None, ): diff --git a/src/eva/core/data/datasets/embeddings/base.py b/src/eva/core/data/datasets/embeddings.py similarity index 91% rename from src/eva/core/data/datasets/embeddings/base.py rename to src/eva/core/data/datasets/embeddings.py index 37b78138..81b22ad1 100644 --- a/src/eva/core/data/datasets/embeddings/base.py +++ b/src/eva/core/data/datasets/embeddings.py @@ -2,9 +2,8 @@ import abc import os -from typing import Callable, Dict, Literal, Tuple +from typing import Callable, Dict, Generic, Literal, Tuple, TypeVar -import numpy as np import pandas as pd import torch from typing_extensions import override @@ -12,6 +11,10 @@ from eva.core.data.datasets import base from eva.core.utils import io +TargetType = TypeVar("TargetType") +"""The target data type.""" + + default_column_mapping: Dict[str, str] = { "path": "embeddings", "target": "target", @@ -21,7 +24,7 @@ """The default column mapping of the variables to the manifest columns.""" -class EmbeddingsDataset(base.Dataset): +class EmbeddingsDataset(base.Dataset, Generic[TargetType]): """Abstract base class for embedding datasets.""" def __init__( @@ -62,32 +65,6 @@ def __init__( self._data: pd.DataFrame - @abc.abstractmethod - def _load_embeddings(self, index: int) -> torch.Tensor: - """Returns the `index`'th embedding sample. - - Args: - index: The index of the data sample to load. - - Returns: - The embedding sample as a tensor. - """ - - @abc.abstractmethod - def _load_target(self, index: int) -> np.ndarray: - """Returns the `index`'th target sample. - - Args: - index: The index of the data sample to load. - - Returns: - The sample target as an array. - """ - - @abc.abstractmethod - def __len__(self) -> int: - """Returns the total length of the data.""" - def filename(self, index: int) -> str: """Returns the filename of the `index`'th data sample. @@ -105,7 +82,11 @@ def filename(self, index: int) -> str: def setup(self): self._data = self._load_manifest() - def __getitem__(self, index) -> Tuple[torch.Tensor, np.ndarray]: + @abc.abstractmethod + def __len__(self) -> int: + """Returns the total length of the data.""" + + def __getitem__(self, index) -> Tuple[torch.Tensor, TargetType]: """Returns the `index`'th data sample. Args: @@ -118,6 +99,28 @@ def __getitem__(self, index) -> Tuple[torch.Tensor, np.ndarray]: target = self._load_target(index) return self._apply_transforms(embeddings, target) + @abc.abstractmethod + def _load_embeddings(self, index: int) -> torch.Tensor: + """Returns the `index`'th embedding sample. + + Args: + index: The index of the data sample to load. + + Returns: + The embedding sample as a tensor. + """ + + @abc.abstractmethod + def _load_target(self, index: int) -> TargetType: + """Returns the `index`'th target sample. + + Args: + index: The index of the data sample to load. + + Returns: + The sample target as an array. + """ + def _load_manifest(self) -> pd.DataFrame: """Loads manifest file and filters the data based on the split column. @@ -132,8 +135,8 @@ def _load_manifest(self) -> pd.DataFrame: return data def _apply_transforms( - self, embeddings: torch.Tensor, target: np.ndarray - ) -> Tuple[torch.Tensor, np.ndarray]: + self, embeddings: torch.Tensor, target: TargetType + ) -> Tuple[torch.Tensor, TargetType]: """Applies the transforms to the provided data and returns them. Args: diff --git a/src/eva/core/data/datasets/embeddings/__init__.py b/src/eva/core/data/datasets/embeddings/__init__.py deleted file mode 100644 index efd0eae4..00000000 --- a/src/eva/core/data/datasets/embeddings/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Datasets API.""" - -from eva.core.data.datasets.embeddings.base import EmbeddingsDataset -from eva.core.data.datasets.embeddings.classification import ( - EmbeddingsClassificationDataset, - MultiEmbeddingsClassificationDataset, -) - -__all__ = [ - "EmbeddingsDataset", - "EmbeddingsClassificationDataset", - "MultiEmbeddingsClassificationDataset", -] diff --git a/src/eva/core/data/datasets/embeddings/classification/__init__.py b/src/eva/core/data/datasets/embeddings/classification/__init__.py deleted file mode 100644 index 3f5d8ee7..00000000 --- a/src/eva/core/data/datasets/embeddings/classification/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Embedding cllassification datasets API.""" - -from eva.core.data.datasets.embeddings.classification.embeddings import ( - EmbeddingsClassificationDataset, -) -from eva.core.data.datasets.embeddings.classification.multi_embeddings import ( - MultiEmbeddingsClassificationDataset, -) - -__all__ = ["EmbeddingsClassificationDataset", "MultiEmbeddingsClassificationDataset"] diff --git a/src/eva/core/loggers/__init__.py b/src/eva/core/loggers/__init__.py index f5ccfaa2..61ecbc70 100644 --- a/src/eva/core/loggers/__init__.py +++ b/src/eva/core/loggers/__init__.py @@ -1,5 +1,7 @@ -"""Loggers API.""" +"""Experimental loggers API.""" from eva.core.loggers.dummy import DummyLogger +from eva.core.loggers.experimental_loggers import ExperimentalLoggers +from eva.core.loggers.log import log_parameters -__all__ = ["DummyLogger"] +__all__ = ["DummyLogger", "ExperimentalLoggers", "log_parameters"] diff --git a/src/eva/core/loggers/experimental_loggers.py b/src/eva/core/loggers/experimental_loggers.py new file mode 100644 index 00000000..53b24138 --- /dev/null +++ b/src/eva/core/loggers/experimental_loggers.py @@ -0,0 +1,8 @@ +"""Experiment loggers.""" + +from typing import Union + +from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger + +"""Supported loggers.""" +ExperimentalLoggers = Union[CSVLogger, TensorBoardLogger] diff --git a/src/eva/core/loggers/log/__init__.py b/src/eva/core/loggers/log/__init__.py new file mode 100644 index 00000000..a6b65238 --- /dev/null +++ b/src/eva/core/loggers/log/__init__.py @@ -0,0 +1,5 @@ +"""Experiment loggers actions.""" + +from eva.core.loggers.log.parameters import log_parameters + +__all__ = ["log_parameters"] diff --git a/src/eva/core/loggers/log/parameters.py b/src/eva/core/loggers/log/parameters.py new file mode 100644 index 00000000..53bd2c97 --- /dev/null +++ b/src/eva/core/loggers/log/parameters.py @@ -0,0 +1,64 @@ +"""Text log functionality.""" + +import functools +from typing import Any, Dict + +import yaml + +from eva.core.loggers import experimental_loggers as loggers_lib +from eva.core.loggers.log import utils + + +@functools.singledispatch +def log_parameters( + logger, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to the logger. + + Args: + logger: The desired logger. + tag: The log tag. + parameters: The parameters to log. + """ + utils.raise_not_supported(logger, "parameters") + + +@log_parameters.register +def _( + loggers: list, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to a list of supported loggers.""" + for logger in loggers: + log_parameters(logger, tag=tag, parameters=parameters) + + +@log_parameters.register +def _( + logger: loggers_lib.TensorBoardLogger, + tag: str, + parameters: Dict[str, Any], +) -> None: + """Adds parameters to a TensorBoard logger.""" + as_markdown_text = _yaml_to_markdown(parameters) + logger.experiment.add_text( + tag=tag, + text_string=as_markdown_text, + global_step=0, + ) + + +def _yaml_to_markdown(data: Dict[str, Any]) -> str: + """Casts yaml data to markdown. + + Args: + data: The yaml data. + + Returns: + A string markdown friendly formatted. + """ + text = yaml.dump(data, sort_keys=False) + return f"```yaml\n{text}```" diff --git a/src/eva/core/loggers/log/utils.py b/src/eva/core/loggers/log/utils.py new file mode 100644 index 00000000..47ff3895 --- /dev/null +++ b/src/eva/core/loggers/log/utils.py @@ -0,0 +1,13 @@ +"""Logging related utilities.""" + +from loguru import logger as cli_logger + +from eva.core.loggers import ExperimentalLoggers + + +def raise_not_supported(logger: ExperimentalLoggers, data_type: str) -> None: + """Raises a warning for not supported tasks from the given logger.""" + print("\n") + cli_logger.debug( + f"Logger '{logger.__class__.__name__}' is not supported for " f"'{data_type}' data." + ) diff --git a/src/eva/core/models/modules/module.py b/src/eva/core/models/modules/module.py index cb5e222a..d1e2ab64 100644 --- a/src/eva/core/models/modules/module.py +++ b/src/eva/core/models/modules/module.py @@ -4,6 +4,7 @@ import lightning.pytorch as pl import torch +from lightning.pytorch.strategies.single_device import SingleDeviceStrategy from lightning.pytorch.utilities import memory from lightning.pytorch.utilities.types import STEP_OUTPUT from typing_extensions import override @@ -46,6 +47,21 @@ def default_postprocess(self) -> batch_postprocess.BatchPostProcess: """The default post-processes.""" return batch_postprocess.BatchPostProcess() + @property + def metrics_device(self) -> torch.device: + """Returns the device by which the metrics should be calculated. + + We allocate the metrics to CPU when operating on single device, as + it is much faster, but to GPU when employing multiple ones, as DDP + strategy requires the metrics to be allocated to the module's GPU. + """ + move_to_cpu = isinstance(self.trainer.strategy, SingleDeviceStrategy) + return torch.device("cpu") if move_to_cpu else self.device + + @override + def on_fit_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_train_batch_end( self, @@ -59,6 +75,10 @@ def on_train_batch_end( batch_outputs=outputs, ) + @override + def on_validation_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_validation_batch_end( self, @@ -78,6 +98,10 @@ def on_validation_batch_end( def on_validation_epoch_end(self) -> None: self._compute_and_log_metrics(self.metrics.validation_metrics) + @override + def on_test_start(self) -> None: + self.metrics.to(device=self.metrics_device) + @override def on_test_batch_end( self, @@ -110,7 +134,7 @@ def _common_batch_end(self, outputs: STEP_OUTPUT) -> STEP_OUTPUT: The updated outputs. """ self._postprocess(outputs) - return memory.recursive_detach(outputs, to_cpu=self.device.type == "cpu") + return memory.recursive_detach(outputs, to_cpu=self.metrics_device.type == "cpu") def _forward_and_log_metrics( self, diff --git a/src/eva/core/models/modules/typings.py b/src/eva/core/models/modules/typings.py index fa476bd1..e9c56675 100644 --- a/src/eva/core/models/modules/typings.py +++ b/src/eva/core/models/modules/typings.py @@ -16,7 +16,7 @@ class INPUT_BATCH(NamedTuple): data: torch.Tensor """The data batch.""" - targets: torch.Tensor | Dict[str, Any] | None = None + targets: torch.Tensor | None = None """The target batch.""" metadata: Dict[str, Any] | None = None diff --git a/src/eva/vision/data/datasets/__init__.py b/src/eva/vision/data/datasets/__init__.py index 864d5a4a..7d05c16e 100644 --- a/src/eva/vision/data/datasets/__init__.py +++ b/src/eva/vision/data/datasets/__init__.py @@ -7,7 +7,6 @@ PANDA, Camelyon16, PatchCamelyon, - TotalSegmentatorClassification, WsiClassificationDataset, ) from eva.vision.data.datasets.segmentation import ImageSegmentation, TotalSegmentator2D @@ -22,7 +21,6 @@ "PatchCamelyon", "PANDA", "Camelyon16", - "TotalSegmentatorClassification", "TotalSegmentator2D", "VisionDataset", "WsiDataset", diff --git a/src/eva/vision/data/datasets/_utils.py b/src/eva/vision/data/datasets/_utils.py index 2d2fe30b..1a17d7e9 100644 --- a/src/eva/vision/data/datasets/_utils.py +++ b/src/eva/vision/data/datasets/_utils.py @@ -1,6 +1,6 @@ """Dataset related function and helper functions.""" -from typing import List, Tuple +from typing import List, Sequence, Tuple def indices_to_ranges(indices: List[int]) -> List[Tuple[int, int]]: @@ -33,11 +33,11 @@ def indices_to_ranges(indices: List[int]) -> List[Tuple[int, int]]: return ranges -def ranges_to_indices(ranges: List[Tuple[int, int]]) -> List[int]: +def ranges_to_indices(ranges: Sequence[Tuple[int, int]]) -> List[int]: """Unpacks a list of ranges to individual indices. Args: - ranges: The list of ranges to produce the indices from. + ranges: A sequence of ranges to produce the indices from. Return: A list of the indices. diff --git a/src/eva/vision/data/datasets/classification/__init__.py b/src/eva/vision/data/datasets/classification/__init__.py index 0b86ee5c..c9daabbe 100644 --- a/src/eva/vision/data/datasets/classification/__init__.py +++ b/src/eva/vision/data/datasets/classification/__init__.py @@ -6,7 +6,6 @@ from eva.vision.data.datasets.classification.mhist import MHIST from eva.vision.data.datasets.classification.panda import PANDA from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon -from eva.vision.data.datasets.classification.total_segmentator import TotalSegmentatorClassification from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset __all__ = [ @@ -14,7 +13,6 @@ "CRC", "MHIST", "PatchCamelyon", - "TotalSegmentatorClassification", "WsiClassificationDataset", "PANDA", "Camelyon16", diff --git a/src/eva/vision/data/datasets/classification/bach.py b/src/eva/vision/data/datasets/classification/bach.py index 935ab609..b8009701 100644 --- a/src/eva/vision/data/datasets/classification/bach.py +++ b/src/eva/vision/data/datasets/classification/bach.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import folder, utils from typing_extensions import override @@ -52,8 +53,7 @@ def __init__( root: str, split: Literal["train", "val"] | None = None, download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initialize the dataset. @@ -68,15 +68,10 @@ def __init__( Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not yet exist on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -130,14 +125,14 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_path, _ = self._samples[self._indices[index]] - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, target = self._samples[self._indices[index]] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.long) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/base.py b/src/eva/vision/data/datasets/classification/base.py index 56f95082..1127f6db 100644 --- a/src/eva/vision/data/datasets/classification/base.py +++ b/src/eva/vision/data/datasets/classification/base.py @@ -3,32 +3,29 @@ import abc from typing import Any, Callable, Dict, List, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import vision -class ImageClassification(vision.VisionDataset[Tuple[np.ndarray, np.ndarray]], abc.ABC): +class ImageClassification(vision.VisionDataset[Tuple[tv_tensors.Image, torch.Tensor]], abc.ABC): """Image classification abstract dataset.""" def __init__( self, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the image classification dataset. Args: - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ super().__init__() - self._image_transforms = image_transforms - self._target_transforms = target_transforms + self._transforms = transforms @property def classes(self) -> List[str] | None: @@ -49,7 +46,7 @@ def load_metadata(self, index: int) -> Dict[str, Any] | None: """ @abc.abstractmethod - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: """Returns the `index`'th image sample. Args: @@ -60,7 +57,7 @@ def load_image(self, index: int) -> np.ndarray: """ @abc.abstractmethod - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: """Returns the `index`'th target sample. Args: @@ -76,15 +73,15 @@ def __len__(self) -> int: raise NotImplementedError @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: image = self.load_image(index) target = self.load_target(index) image, target = self._apply_transforms(image, target) return image, target, self.load_metadata(index) or {} def _apply_transforms( - self, image: np.ndarray, target: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + self, image: tv_tensors.Image, target: torch.Tensor + ) -> Tuple[tv_tensors.Image, torch.Tensor]: """Applies the transforms to the provided data and returns them. Args: @@ -94,10 +91,6 @@ def _apply_transforms( Returns: A tuple with the image and the target transformed. """ - if self._image_transforms is not None: - image = self._image_transforms(image) - - if self._target_transforms is not None: - target = self._target_transforms(target) - + if self._transforms is not None: + image, target = self._transforms(image, target) return image, target diff --git a/src/eva/vision/data/datasets/classification/camelyon16.py b/src/eva/vision/data/datasets/classification/camelyon16.py index e0072906..10846440 100644 --- a/src/eva/vision/data/datasets/classification/camelyon16.py +++ b/src/eva/vision/data/datasets/classification/camelyon16.py @@ -5,9 +5,10 @@ import os from typing import Any, Callable, Dict, List, Literal, Tuple -import numpy as np import pandas as pd import torch +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.vision.data.datasets import _validators, wsi @@ -193,18 +194,19 @@ def filename(self, index: int) -> str: return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override - def load_image(self, index: int) -> torch.Tensor: - return wsi.MultiWsiDataset.__getitem__(self, index) + def load_image(self, index: int) -> tv_tensors.Image: + image_array = wsi.MultiWsiDataset.__getitem__(self, index) + return functional.to_image(image_array) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: file_path = self._file_paths[self._get_dataset_idx(index)] class_name = self.annotations[self._get_id_from_path(file_path)] - return np.asarray(self.class_to_idx[class_name], dtype=np.int64) + return torch.tensor(self.class_to_idx[class_name], dtype=torch.int64) @override def load_metadata(self, index: int) -> Dict[str, Any]: diff --git a/src/eva/vision/data/datasets/classification/crc.py b/src/eva/vision/data/datasets/classification/crc.py index 5c661d45..618aa208 100644 --- a/src/eva/vision/data/datasets/classification/crc.py +++ b/src/eva/vision/data/datasets/classification/crc.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import folder, utils from typing_extensions import override @@ -37,8 +38,7 @@ def __init__( root: str, split: Literal["train", "val"], download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the dataset. @@ -56,15 +56,10 @@ def __init__( Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not yet exist on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -122,14 +117,14 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_path, _ = self._samples[index] - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, target = self._samples[index] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.long) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/mhist.py b/src/eva/vision/data/datasets/classification/mhist.py index 75297183..7453e75c 100644 --- a/src/eva/vision/data/datasets/classification/mhist.py +++ b/src/eva/vision/data/datasets/classification/mhist.py @@ -3,7 +3,8 @@ import os from typing import Callable, Dict, List, Literal, Tuple -import numpy as np +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import _validators @@ -18,23 +19,17 @@ def __init__( self, root: str, split: Literal["train", "test"], - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initialize the dataset. Args: root: Path to the root directory of the dataset. split: Dataset split to use. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -74,16 +69,16 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: image_filename, _ = self._samples[index] image_path = os.path.join(self._dataset_path, image_filename) - return io.read_image(image_path) + return io.read_image_as_tensor(image_path) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: _, label = self._samples[index] target = self.class_to_idx[label] - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.float32) @override def __len__(self) -> int: diff --git a/src/eva/vision/data/datasets/classification/panda.py b/src/eva/vision/data/datasets/classification/panda.py index 000099e0..b8d2f49c 100644 --- a/src/eva/vision/data/datasets/classification/panda.py +++ b/src/eva/vision/data/datasets/classification/panda.py @@ -5,10 +5,11 @@ import os from typing import Any, Callable, Dict, List, Literal, Tuple -import numpy as np import pandas as pd import torch +from torchvision import tv_tensors from torchvision.datasets import utils +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.core.data import splitting @@ -120,17 +121,18 @@ def filename(self, index: int) -> str: return os.path.basename(self._file_paths[self._get_dataset_idx(index)]) @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override - def load_image(self, index: int) -> torch.Tensor: - return wsi.MultiWsiDataset.__getitem__(self, index) + def load_image(self, index: int) -> tv_tensors.Image: + image_array = wsi.MultiWsiDataset.__getitem__(self, index) + return functional.to_image(image_array) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: file_path = self._file_paths[self._get_dataset_idx(index)] - return np.asarray(self._get_target_from_path(file_path)) + return torch.tensor(self._get_target_from_path(file_path), dtype=torch.int64) @override def load_metadata(self, index: int) -> Dict[str, Any]: diff --git a/src/eva/vision/data/datasets/classification/patch_camelyon.py b/src/eva/vision/data/datasets/classification/patch_camelyon.py index e9eaa5f5..5891bc41 100644 --- a/src/eva/vision/data/datasets/classification/patch_camelyon.py +++ b/src/eva/vision/data/datasets/classification/patch_camelyon.py @@ -4,8 +4,10 @@ from typing import Callable, Dict, List, Literal import h5py -import numpy as np +import torch +from torchvision import tv_tensors from torchvision.datasets import utils +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.vision.data.datasets import _validators, structs @@ -70,8 +72,7 @@ def __init__( root: str, split: Literal["train", "val", "test"], download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, + transforms: Callable | None = None, ) -> None: """Initializes the dataset. @@ -82,15 +83,10 @@ def __init__( download: Whether to download the data for the specified split. Note that the download will be executed only by additionally calling the :meth:`prepare_data` method. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. + transforms: A function/transform which returns a transformed + version of the raw data samples. """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) + super().__init__(transforms=transforms) self._root = root self._split = split @@ -131,13 +127,13 @@ def validate(self) -> None: ) @override - def load_image(self, index: int) -> np.ndarray: + def load_image(self, index: int) -> tv_tensors.Image: return self._load_from_h5("x", index) @override - def load_target(self, index: int) -> np.ndarray: + def load_target(self, index: int) -> torch.Tensor: target = self._load_from_h5("y", index).squeeze() - return np.asarray(target, dtype=np.int64) + return torch.tensor(target, dtype=torch.float32) @override def __len__(self) -> int: @@ -162,7 +158,7 @@ def _load_from_h5( self, data_key: Literal["x", "y"], index: int | None = None, - ) -> np.ndarray: + ) -> tv_tensors.Image: """Load data or targets from an HDF5 file. Args: @@ -176,7 +172,8 @@ def _load_from_h5( h5_file = self._h5_file(data_key) with h5py.File(h5_file, "r") as file: data = file[data_key] - return data[:] if index is None else data[index] # type: ignore + image_array = data[:] if index is None else data[index] # type: ignore + return functional.to_image(image_array) # type: ignore def _fetch_dataset_length(self) -> int: """Fetches the dataset split length from its HDF5 file.""" diff --git a/src/eva/vision/data/datasets/classification/total_segmentator.py b/src/eva/vision/data/datasets/classification/total_segmentator.py deleted file mode 100644 index c7c0c88d..00000000 --- a/src/eva/vision/data/datasets/classification/total_segmentator.py +++ /dev/null @@ -1,213 +0,0 @@ -"""TotalSegmentator 2D segmentation dataset class.""" - -import functools -import os -from glob import glob -from typing import Callable, Dict, List, Literal, Tuple - -import numpy as np -from torchvision.datasets import utils -from typing_extensions import override - -from eva.vision.data.datasets import _utils, _validators, structs -from eva.vision.data.datasets.classification import base -from eva.vision.utils import io - - -class TotalSegmentatorClassification(base.ImageClassification): - """TotalSegmentator multi-label classification dataset.""" - - _train_index_ranges: List[Tuple[int, int]] = [(0, 83)] - """Train range indices.""" - - _val_index_ranges: List[Tuple[int, int]] = [(83, 103)] - """Validation range indices.""" - - _n_slices_per_image: int = 20 - """The amount of slices to sample per 3D CT scan image.""" - - _resources_full: List[structs.DownloadResource] = [ - structs.DownloadResource( - filename="Totalsegmentator_dataset_v201.zip", - url="https://zenodo.org/records/10047292/files/Totalsegmentator_dataset_v201.zip", - md5="fe250e5718e0a3b5df4c4ea9d58a62fe", - ), - ] - """Resources for the full dataset version.""" - - _resources_small: List[structs.DownloadResource] = [ - structs.DownloadResource( - filename="Totalsegmentator_dataset_small_v201.zip", - url="https://zenodo.org/records/10047263/files/Totalsegmentator_dataset_small_v201.zip", - md5="6b5524af4b15e6ba06ef2d700c0c73e0", - ), - ] - """Resources for the small dataset version.""" - - def __init__( - self, - root: str, - split: Literal["train", "val"] | None, - version: Literal["small", "full"] = "small", - download: bool = False, - image_transforms: Callable | None = None, - target_transforms: Callable | None = None, - ) -> None: - """Initialize dataset. - - Args: - root: Path to the root directory of the dataset. The dataset will - be downloaded and extracted here, if it does not already exist. - split: Dataset split to use. If None, the entire dataset is used. - version: The version of the dataset to initialize. - download: Whether to download the data for the specified split. - Note that the download will be executed only by additionally - calling the :meth:`prepare_data` method and if the data does not - exist yet on disk. - image_transforms: A function/transform that takes in an image - and returns a transformed version. - target_transforms: A function/transform that takes in the target - and transforms it. - """ - super().__init__( - image_transforms=image_transforms, - target_transforms=target_transforms, - ) - - self._root = root - self._split = split - self._version = version - self._download = download - - self._samples_dirs: List[str] = [] - self._indices: List[int] = [] - - @functools.cached_property - @override - def classes(self) -> List[str]: - def get_filename(path: str) -> str: - """Returns the filename from the full path.""" - return os.path.basename(path).split(".")[0] - - first_sample_labels = os.path.join( - self._root, self._samples_dirs[0], "segmentations", "*.nii.gz" - ) - return sorted(map(get_filename, glob(first_sample_labels))) - - @property - @override - def class_to_idx(self) -> Dict[str, int]: - return {label: index for index, label in enumerate(self.classes)} - - @override - def filename(self, index: int) -> str: - sample_dir = self._samples_dirs[self._indices[index]] - return os.path.join(sample_dir, "ct.nii.gz") - - @override - def prepare_data(self) -> None: - if self._download: - self._download_dataset() - _validators.check_dataset_exists(self._root, True) - - @override - def configure(self) -> None: - self._samples_dirs = self._fetch_samples_dirs() - self._indices = self._create_indices() - - @override - def validate(self) -> None: - _validators.check_dataset_integrity( - self, - length=1660 if self._split == "train" else 400, - n_classes=117, - first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"), - ) - - @override - def __len__(self) -> int: - return len(self._indices) * self._n_slices_per_image - - @override - def load_image(self, index: int) -> np.ndarray: - image_path = self._get_image_path(index) - slice_index = self._get_sample_slice_index(index) - image_array = io.read_nifti_slice(image_path, slice_index) - return image_array.repeat(3, axis=2) - - @override - def load_target(self, index: int) -> np.ndarray: - masks = self._load_masks(index) - targets = [1 in masks[..., mask_index] for mask_index in range(masks.shape[-1])] - return np.asarray(targets, dtype=np.int64) - - def _load_masks(self, index: int) -> np.ndarray: - """Returns the `index`'th target mask sample.""" - masks_dir = self._get_masks_dir(index) - slice_index = self._get_sample_slice_index(index) - mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes) - masks = [io.read_nifti_slice(path, slice_index) for path in mask_paths] - return np.concatenate(masks, axis=-1) - - def _get_masks_dir(self, index: int) -> str: - """Returns the directory of the corresponding masks.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "segmentations") - - def _get_image_path(self, index: int) -> str: - """Returns the corresponding image path.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "ct.nii.gz") - - def _get_sample_dir(self, index: int) -> str: - """Returns the corresponding sample directory.""" - sample_index = self._indices[index // self._n_slices_per_image] - return self._samples_dirs[sample_index] - - def _get_sample_slice_index(self, index: int) -> int: - """Returns the corresponding slice index.""" - image_path = self._get_image_path(index) - total_slices = io.fetch_total_nifti_slices(image_path) - slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int) - return slice_indices[index % self._n_slices_per_image] - - def _fetch_samples_dirs(self) -> List[str]: - """Returns the name of all the samples of all the splits of the dataset.""" - sample_filenames = [ - filename - for filename in os.listdir(self._root) - if os.path.isdir(os.path.join(self._root, filename)) - ] - return sorted(sample_filenames) - - def _create_indices(self) -> List[int]: - """Builds the dataset indices for the specified split.""" - split_index_ranges = { - "train": self._train_index_ranges, - "val": self._val_index_ranges, - None: [(0, 103)], - } - index_ranges = split_index_ranges.get(self._split) - if index_ranges is None: - raise ValueError("Invalid data split. Use 'train', 'val' or `None`.") - - return _utils.ranges_to_indices(index_ranges) - - def _download_dataset(self) -> None: - """Downloads the dataset.""" - dataset_resources = { - "small": self._resources_small, - "full": self._resources_full, - None: (0, 103), - } - resources = dataset_resources.get(self._version) - if resources is None: - raise ValueError("Invalid data version. Use 'small' or 'full'.") - - for resource in resources: - utils.download_and_extract_archive( - resource.url, - download_root=self._root, - filename=resource.filename, - remove_finished=True, - ) diff --git a/src/eva/vision/data/datasets/classification/wsi.py b/src/eva/vision/data/datasets/classification/wsi.py index d34cde8b..3889be1e 100644 --- a/src/eva/vision/data/datasets/classification/wsi.py +++ b/src/eva/vision/data/datasets/classification/wsi.py @@ -5,6 +5,8 @@ import numpy as np import pandas as pd +import torch +from torchvision import tv_tensors from typing_extensions import override from eva.vision.data.datasets import wsi @@ -72,7 +74,7 @@ def filename(self, index: int) -> str: return os.path.basename(path) if os.path.isabs(path) else path @override - def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: + def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]: return base.ImageClassification.__getitem__(self, index) @override diff --git a/src/eva/vision/data/datasets/segmentation/total_segmentator.py b/src/eva/vision/data/datasets/segmentation/total_segmentator.py index 4892e6b6..92bb8992 100644 --- a/src/eva/vision/data/datasets/segmentation/total_segmentator.py +++ b/src/eva/vision/data/datasets/segmentation/total_segmentator.py @@ -18,14 +18,14 @@ class TotalSegmentator2D(base.ImageSegmentation): """TotalSegmentator 2D segmentation dataset.""" - _train_index_ranges: List[Tuple[int, int]] = [(0, 83)] - """Train range indices.""" + _expected_dataset_lengths: Dict[str, int] = { + "train_small": 29892, + "val_small": 6480, + } + """Dataset version and split to the expected size.""" - _val_index_ranges: List[Tuple[int, int]] = [(83, 103)] - """Validation range indices.""" - - _n_slices_per_image: int = 20 - """The amount of slices to sample per 3D CT scan image.""" + _sample_every_n_slices: int | None = None + """The amount of slices to sub-sample per 3D CT scan image.""" _resources_full: List[structs.DownloadResource] = [ structs.DownloadResource( @@ -49,7 +49,7 @@ def __init__( self, root: str, split: Literal["train", "val"] | None, - version: Literal["small", "full"] = "small", + version: Literal["small", "full"] | None = "small", download: bool = False, as_uint8: bool = True, transforms: Callable | None = None, @@ -60,7 +60,8 @@ def __init__( root: Path to the root directory of the dataset. The dataset will be downloaded and extracted here, if it does not already exist. split: Dataset split to use. If `None`, the entire dataset is used. - version: The version of the dataset to initialize. + version: The version of the dataset to initialize. If `None`, it will + use the files located at root as is and wont perform any checks. download: Whether to download the data for the specified split. Note that the download will be executed only by additionally calling the :meth:`prepare_data` method and if the data does not @@ -78,7 +79,7 @@ def __init__( self._as_uint8 = as_uint8 self._samples_dirs: List[str] = [] - self._indices: List[int] = [] + self._indices: List[Tuple[int, int]] = [] @functools.cached_property @override @@ -99,7 +100,8 @@ def class_to_idx(self) -> Dict[str, int]: @override def filename(self, index: int) -> str: - sample_dir = self._samples_dirs[self._indices[index]] + sample_idx, _ = self._indices[index] + sample_dir = self._samples_dirs[sample_idx] return os.path.join(sample_dir, "ct.nii.gz") @override @@ -114,21 +116,24 @@ def configure(self) -> None: @override def validate(self) -> None: + if self._version is None: + return + _validators.check_dataset_integrity( self, - length=1660 if self._split == "train" else 400, + length=self._expected_dataset_lengths.get(f"{self._split}_{self._version}", 0), n_classes=117, first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"), ) @override def __len__(self) -> int: - return len(self._indices) * self._n_slices_per_image + return len(self._indices) @override def load_image(self, index: int) -> tv_tensors.Image: - image_path = self._get_image_path(index) - slice_index = self._get_sample_slice_index(index) + sample_index, slice_index = self._indices[index] + image_path = self._get_image_path(sample_index) image_array = io.read_nifti_slice(image_path, slice_index) if self._as_uint8: image_array = convert.to_8bit(image_array) @@ -137,8 +142,8 @@ def load_image(self, index: int) -> tv_tensors.Image: @override def load_mask(self, index: int) -> tv_tensors.Mask: - masks_dir = self._get_masks_dir(index) - slice_index = self._get_sample_slice_index(index) + sample_index, slice_index = self._indices[index] + masks_dir = self._get_masks_dir(sample_index) mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes) one_hot_encoded = np.concatenate( [io.read_nifti_slice(path, slice_index) for path in mask_paths], @@ -149,27 +154,20 @@ def load_mask(self, index: int) -> tv_tensors.Mask: segmentation_label = np.argmax(one_hot_encoded_with_bg, axis=2) return tv_tensors.Mask(segmentation_label) - def _get_masks_dir(self, index: int) -> str: - """Returns the directory of the corresponding masks.""" - sample_dir = self._get_sample_dir(index) - return os.path.join(self._root, sample_dir, "segmentations") - - def _get_image_path(self, index: int) -> str: + def _get_image_path(self, sample_index: int) -> str: """Returns the corresponding image path.""" - sample_dir = self._get_sample_dir(index) + sample_dir = self._samples_dirs[sample_index] return os.path.join(self._root, sample_dir, "ct.nii.gz") - def _get_sample_dir(self, index: int) -> str: - """Returns the corresponding sample directory.""" - sample_index = self._indices[index // self._n_slices_per_image] - return self._samples_dirs[sample_index] + def _get_masks_dir(self, sample_index: int) -> str: + """Returns the directory of the corresponding masks.""" + sample_dir = self._samples_dirs[sample_index] + return os.path.join(self._root, sample_dir, "segmentations") - def _get_sample_slice_index(self, index: int) -> int: - """Returns the corresponding slice index.""" - image_path = self._get_image_path(index) - total_slices = io.fetch_total_nifti_slices(image_path) - slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int) - return slice_indices[index % self._n_slices_per_image] + def _get_number_of_slices_per_sample(self, sample_index: int) -> int: + """Returns the total amount of slices of a sample.""" + image_path = self._get_image_path(sample_index) + return io.fetch_total_nifti_slices(image_path) def _fetch_samples_dirs(self) -> List[str]: """Returns the name of all the samples of all the splits of the dataset.""" @@ -180,29 +178,46 @@ def _fetch_samples_dirs(self) -> List[str]: ] return sorted(sample_filenames) - def _create_indices(self) -> List[int]: - """Builds the dataset indices for the specified split.""" - split_index_ranges = { - "train": self._train_index_ranges, - "val": self._val_index_ranges, - None: [(0, 103)], - } - index_ranges = split_index_ranges.get(self._split) - if index_ranges is None: - raise ValueError("Invalid data split. Use 'train', 'val' or `None`.") + def _get_split_indices(self) -> List[int]: + """Returns the samples indices that corresponding the dataset split and version.""" + key = f"{self._split}_{self._version}" + match key: + case "train_small": + index_ranges = [(0, 83)] + case "val_small": + index_ranges = [(83, 102)] + case _: + index_ranges = [(0, len(self._samples_dirs))] return _utils.ranges_to_indices(index_ranges) + def _create_indices(self) -> List[Tuple[int, int]]: + """Builds the dataset indices for the specified split. + + Returns: + A list of tuples, where the first value indicates the + sample index which the second its corresponding slice + index. + """ + indices = [ + (sample_idx, slide_idx) + for sample_idx in self._get_split_indices() + for slide_idx in range(self._get_number_of_slices_per_sample(sample_idx)) + if slide_idx % (self._sample_every_n_slices or 1) == 0 + ] + return indices + def _download_dataset(self) -> None: """Downloads the dataset.""" dataset_resources = { "small": self._resources_small, "full": self._resources_full, - None: (0, 103), } - resources = dataset_resources.get(self._version) + resources = dataset_resources.get(self._version or "") if resources is None: - raise ValueError("Invalid data version. Use 'small' or 'full'.") + raise ValueError( + f"Can't download data version '{self._version}'. Use 'small' or 'full'." + ) for resource in resources: if os.path.isdir(self._root): diff --git a/src/eva/vision/data/datasets/wsi.py b/src/eva/vision/data/datasets/wsi.py index 07bd5542..3557bfc5 100644 --- a/src/eva/vision/data/datasets/wsi.py +++ b/src/eva/vision/data/datasets/wsi.py @@ -4,9 +4,10 @@ import os from typing import Callable, List -import numpy as np from loguru import logger from torch.utils.data import dataset as torch_datasets +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from typing_extensions import override from eva.core.data.datasets import base @@ -71,14 +72,15 @@ def _coords(self) -> wsi.PatchCoordinates: ) @override - def __getitem__(self, index: int) -> np.ndarray: + def __getitem__(self, index: int) -> tv_tensors.Image: x, y = self._coords.x_y[index] width, height, level_idx = self._coords.width, self._coords.height, self._coords.level_idx patch = self._wsi.read_region((x, y), level_idx, (width, height)) + patch = functional.to_image(patch) patch = self._apply_transforms(patch) return patch - def _apply_transforms(self, image: np.ndarray) -> np.ndarray: + def _apply_transforms(self, image: tv_tensors.Image) -> tv_tensors.Image: if self._image_transforms is not None: image = self._image_transforms(image) return image diff --git a/src/eva/vision/data/transforms/common/resize_and_crop.py b/src/eva/vision/data/transforms/common/resize_and_crop.py index f1956a66..46b5aa67 100644 --- a/src/eva/vision/data/transforms/common/resize_and_crop.py +++ b/src/eva/vision/data/transforms/common/resize_and_crop.py @@ -3,10 +3,10 @@ from typing import Callable, Sequence import torch -import torchvision.transforms.v2 as torch_transforms +from torchvision.transforms import v2 -class ResizeAndCrop(torch_transforms.Compose): +class ResizeAndCrop(v2.Compose): """Resizes, crops and normalizes an input image while preserving its aspect ratio.""" def __init__( @@ -32,11 +32,10 @@ def __init__( def _build_transforms(self) -> Sequence[Callable]: """Builds and returns the list of transforms.""" transforms = [ - torch_transforms.ToImage(), - torch_transforms.Resize(size=self._size), - torch_transforms.CenterCrop(size=self._size), - torch_transforms.ToDtype(torch.float32, scale=True), - torch_transforms.Normalize( + v2.Resize(size=self._size), + v2.CenterCrop(size=self._size), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize( mean=self._mean, std=self._std, ), diff --git a/src/eva/vision/utils/io/__init__.py b/src/eva/vision/utils/io/__init__.py index 85d669b1..8fe1177b 100644 --- a/src/eva/vision/utils/io/__init__.py +++ b/src/eva/vision/utils/io/__init__.py @@ -1,11 +1,12 @@ """Vision I/O utilities.""" -from eva.vision.utils.io.image import read_image +from eva.vision.utils.io.image import read_image, read_image_as_tensor from eva.vision.utils.io.nifti import fetch_total_nifti_slices, read_nifti_slice from eva.vision.utils.io.text import read_csv __all__ = [ "read_image", + "read_image_as_tensor", "fetch_total_nifti_slices", "read_nifti_slice", "read_csv", diff --git a/src/eva/vision/utils/io/image.py b/src/eva/vision/utils/io/image.py index b20137d1..13f62187 100644 --- a/src/eva/vision/utils/io/image.py +++ b/src/eva/vision/utils/io/image.py @@ -3,6 +3,8 @@ import cv2 import numpy as np import numpy.typing as npt +from torchvision import tv_tensors +from torchvision.transforms.v2 import functional from eva.vision.utils.io import _utils @@ -14,7 +16,7 @@ def read_image(path: str) -> npt.NDArray[np.uint8]: path: The path of the image file. Returns: - The RGB image as a numpy array. + The RGB image as a numpy array (HxWxC). Raises: FileExistsError: If the path does not exist or it is unreachable. @@ -23,6 +25,23 @@ def read_image(path: str) -> npt.NDArray[np.uint8]: return read_image_as_array(path, cv2.IMREAD_COLOR) +def read_image_as_tensor(path: str) -> tv_tensors.Image: + """Reads and loads the image from a file path as a RGB torch tensor. + + Args: + path: The path of the image file. + + Returns: + The RGB image as a torch tensor (CxHxW). + + Raises: + FileExistsError: If the path does not exist or it is unreachable. + IOError: If the image could not be loaded. + """ + image_array = read_image(path) + return functional.to_image(image_array) + + def read_image_as_array(path: str, flags: int = cv2.IMREAD_UNCHANGED) -> npt.NDArray[np.uint8]: """Reads and loads an image file as a numpy array. @@ -51,4 +70,4 @@ def read_image_as_array(path: str, flags: int = cv2.IMREAD_UNCHANGED) -> npt.NDA if image.ndim == 2 and flags == cv2.IMREAD_COLOR: image = image[:, :, np.newaxis] - return np.asarray(image).astype(np.uint8) + return np.asarray(image, dtype=np.uint8) diff --git a/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz new file mode 100644 index 00000000..05b28f40 Binary files /dev/null and b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz differ diff --git a/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz new file mode 100644 index 00000000..05b28f40 Binary files /dev/null and b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz differ diff --git a/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz new file mode 100644 index 00000000..05b28f40 Binary files /dev/null and b/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz differ diff --git a/tests/eva/core/callbacks/writers/embeddings/__init__.py b/tests/eva/core/callbacks/writers/embeddings/__init__.py new file mode 100644 index 00000000..e2937b63 --- /dev/null +++ b/tests/eva/core/callbacks/writers/embeddings/__init__.py @@ -0,0 +1 @@ +"""Embeddings writer callback unit tests.""" diff --git a/tests/eva/core/callbacks/writers/test_embeddings.py b/tests/eva/core/callbacks/writers/embeddings/test_classification.py similarity index 99% rename from tests/eva/core/callbacks/writers/test_embeddings.py rename to tests/eva/core/callbacks/writers/embeddings/test_classification.py index ec04515c..0b7ab822 100644 --- a/tests/eva/core/callbacks/writers/test_embeddings.py +++ b/tests/eva/core/callbacks/writers/embeddings/test_classification.py @@ -42,7 +42,7 @@ def test_embeddings_writer(datamodule: datamodules.DataModule, model: modules.He metadata_keys = datamodule.datasets.predict[0]._metadata_keys # type: ignore expected_filenames = datamodule.datasets.predict[0]._filenames # type: ignore grouping_enabled = expected_filenames is not None - callback = writers.EmbeddingsWriter( + callback = writers.ClassificationEmbeddingsWriter( output_dir=output_dir, dataloader_idx_map={0: "train", 1: "val", 2: "test"}, backbone=nn.Flatten(), diff --git a/tests/eva/core/data/datasets/embeddings/classification/__init__.py b/tests/eva/core/data/datasets/classification/__init__.py similarity index 100% rename from tests/eva/core/data/datasets/embeddings/classification/__init__.py rename to tests/eva/core/data/datasets/classification/__init__.py diff --git a/tests/eva/core/data/datasets/embeddings/classification/test_embeddings.py b/tests/eva/core/data/datasets/classification/test_embeddings.py similarity index 91% rename from tests/eva/core/data/datasets/embeddings/classification/test_embeddings.py rename to tests/eva/core/data/datasets/classification/test_embeddings.py index 70459d6c..5803f4be 100644 --- a/tests/eva/core/data/datasets/embeddings/classification/test_embeddings.py +++ b/tests/eva/core/data/datasets/classification/test_embeddings.py @@ -3,11 +3,10 @@ import os from typing import Literal -import numpy as np import pytest import torch -from eva.core.data.datasets.embeddings import classification +from eva.core.data.datasets import classification @pytest.mark.parametrize("split", ["train", "val"]) @@ -21,7 +20,7 @@ def test_embedding_dataset(embeddings_dataset: classification.EmbeddingsClassifi embeddings, target = sample assert isinstance(embeddings, torch.Tensor) assert embeddings.shape == (8,) - assert isinstance(target, np.ndarray) + assert isinstance(target, torch.Tensor) assert target in [0, 1] diff --git a/tests/eva/core/data/datasets/embeddings/classification/test_multi_embeddings.py b/tests/eva/core/data/datasets/classification/test_multi_embeddings.py similarity index 98% rename from tests/eva/core/data/datasets/embeddings/classification/test_multi_embeddings.py rename to tests/eva/core/data/datasets/classification/test_multi_embeddings.py index 8439461b..7eff0fc9 100644 --- a/tests/eva/core/data/datasets/embeddings/classification/test_multi_embeddings.py +++ b/tests/eva/core/data/datasets/classification/test_multi_embeddings.py @@ -9,7 +9,7 @@ import torch.nn from eva.core.data import transforms -from eva.core.data.datasets.embeddings import classification +from eva.core.data.datasets import classification @pytest.mark.parametrize( diff --git a/tests/eva/core/data/datasets/embeddings/__init__.py b/tests/eva/core/data/datasets/embeddings/__init__.py deleted file mode 100644 index 6c935243..00000000 --- a/tests/eva/core/data/datasets/embeddings/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Tests for embeddings datasets.""" diff --git a/tests/eva/vision/data/datasets/classification/test_bach.py b/tests/eva/vision/data/datasets/classification/test_bach.py index 2d063a77..8c3ad47f 100644 --- a/tests/eva/vision/data/datasets/classification/test_bach.py +++ b/tests/eva/vision/data/datasets/classification/test_bach.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -29,15 +30,15 @@ def test_length(bach_dataset: datasets.BACH, expected_length: int) -> None: ) def test_sample(bach_dataset: datasets.BACH, index: int) -> None: """Tests the format of a dataset sample.""" - sample = bach_dataset[index] # assert data sample is a tuple + sample = bach_dataset[index] assert isinstance(sample, tuple) assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 16, 16) + assert isinstance(target, torch.Tensor) assert target in [0, 1, 2, 3] diff --git a/tests/eva/vision/data/datasets/classification/test_camelyon16.py b/tests/eva/vision/data/datasets/classification/test_camelyon16.py index 26125254..e198dc87 100644 --- a/tests/eva/vision/data/datasets/classification/test_camelyon16.py +++ b/tests/eva/vision/data/datasets/classification/test_camelyon16.py @@ -3,10 +3,10 @@ import os from typing import Any, Literal -import numpy as np import pytest import torch import torchvision.transforms.v2 as torch_transforms +from torchvision import tv_tensors from eva.vision.data import datasets from eva.vision.data import transforms as eva_transforms @@ -62,12 +62,10 @@ def _check_batch_shape(batch: Any): assert len(batch) == 3 image, target, metadata = batch - assert isinstance(image, torch.Tensor) + assert isinstance(image, tv_tensors.Image) assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) - assert isinstance(target, np.ndarray) - assert target.size == 1 - + assert isinstance(target, torch.Tensor) assert isinstance(metadata, dict) assert "wsi_id" in metadata diff --git a/tests/eva/vision/data/datasets/classification/test_crc.py b/tests/eva/vision/data/datasets/classification/test_crc.py index 1fb276bd..c3f5ba09 100644 --- a/tests/eva/vision/data/datasets/classification/test_crc.py +++ b/tests/eva/vision/data/datasets/classification/test_crc.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -26,9 +27,9 @@ def test_sample(crc_dataset: datasets.CRC, index: int) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 16, 16) + assert isinstance(target, torch.Tensor) assert target in [0, 1, 2, 3, 4, 5, 6, 7, 8] diff --git a/tests/eva/vision/data/datasets/classification/test_mhist.py b/tests/eva/vision/data/datasets/classification/test_mhist.py index f9e70105..5249e52e 100644 --- a/tests/eva/vision/data/datasets/classification/test_mhist.py +++ b/tests/eva/vision/data/datasets/classification/test_mhist.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -35,9 +36,9 @@ def test_sample(mhist_dataset: datasets.MHIST, index: int) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (224, 224, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 224, 224) + assert isinstance(target, torch.Tensor) assert target in [0, 1] diff --git a/tests/eva/vision/data/datasets/classification/test_panda.py b/tests/eva/vision/data/datasets/classification/test_panda.py index 8f523aeb..6b901344 100644 --- a/tests/eva/vision/data/datasets/classification/test_panda.py +++ b/tests/eva/vision/data/datasets/classification/test_panda.py @@ -8,6 +8,7 @@ import pytest import torch import torchvision.transforms.v2 as torch_transforms +from torchvision import tv_tensors from eva.vision.data import datasets from eva.vision.data import transforms as eva_transforms @@ -85,12 +86,10 @@ def _check_batch_shape(batch: Any): assert len(batch) == 3 image, target, metadata = batch - assert isinstance(image, torch.Tensor) + assert isinstance(image, tv_tensors.Image) assert image.shape == (3, TARGET_SIZE, TARGET_SIZE) - assert isinstance(target, np.ndarray) - assert target.size == 1 - + assert isinstance(target, torch.Tensor) assert isinstance(metadata, dict) assert "wsi_id" in metadata diff --git a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py index 9f9270f3..30ecb73a 100644 --- a/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py +++ b/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py @@ -3,8 +3,9 @@ import os from typing import Literal -import numpy as np import pytest +import torch +from torchvision import tv_tensors from eva.vision.data import datasets @@ -30,9 +31,9 @@ def test_sample(patch_camelyon_dataset: datasets.PatchCamelyon) -> None: assert len(sample) == 3 # assert the format of the `image` and `target` image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (96, 96, 3) - assert isinstance(target, np.ndarray) + assert isinstance(image, tv_tensors.Image) + assert image.shape == (3, 96, 96) + assert isinstance(target, torch.Tensor) assert target in [0, 1] diff --git a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py b/tests/eva/vision/data/datasets/classification/test_total_segmentator.py deleted file mode 100644 index 1c694f7b..00000000 --- a/tests/eva/vision/data/datasets/classification/test_total_segmentator.py +++ /dev/null @@ -1,63 +0,0 @@ -"""TotalSegmentator dataset tests.""" - -import os -from typing import Literal - -import numpy as np -import pytest - -from eva.vision.data import datasets - - -@pytest.mark.parametrize( - "split, expected_length", - [("train", 1660), ("val", 400), (None, 2060)], -) -def test_length( - total_segmentator_dataset: datasets.TotalSegmentatorClassification, expected_length: int -) -> None: - """Tests the length of the dataset.""" - assert len(total_segmentator_dataset) == expected_length - - -@pytest.mark.parametrize( - "split", - [ - None, - "train", - ], -) -def test_sample(total_segmentator_dataset: datasets.TotalSegmentatorClassification) -> None: - """Tests the format of a dataset sample.""" - sample = total_segmentator_dataset[0] - # assert data sample is a tuple - assert isinstance(sample, tuple) - assert len(sample) == 3 - # assert the format of the `image` and `target` - image, target, _ = sample - assert isinstance(image, np.ndarray) - assert image.shape == (16, 16, 3) - assert isinstance(target, np.ndarray) - assert all(target == [0, 0, 0]) - - -@pytest.fixture(scope="function") -def total_segmentator_dataset( - split: Literal["train", "val"], - assets_path: str, -) -> datasets.TotalSegmentatorClassification: - """TotalSegmentator dataset fixture.""" - dataset = datasets.TotalSegmentatorClassification( - root=os.path.join( - assets_path, - "vision", - "datasets", - "total_segmentator", - "Totalsegmentator_dataset_v201", - ), - split=split, - download=False, - ) - dataset.prepare_data() - dataset.configure() - return dataset diff --git a/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py b/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py index 3e7f09e6..9607a2a8 100644 --- a/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py +++ b/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py @@ -11,7 +11,7 @@ @pytest.mark.parametrize( "split, expected_length", - [("train", 1660), ("val", 400), (None, 2060)], + [("train", 9), ("val", 9), (None, 9)], ) def test_length( total_segmentator_dataset: datasets.TotalSegmentator2D, expected_length: int @@ -25,6 +25,7 @@ def test_length( [ (None, 0), ("train", 0), + ("val", 0), ], ) def test_sample(total_segmentator_dataset: datasets.TotalSegmentator2D, index: int) -> None: @@ -43,7 +44,7 @@ def test_sample(total_segmentator_dataset: datasets.TotalSegmentator2D, index: i @pytest.fixture(scope="function") def total_segmentator_dataset( - split: Literal["train", "val"], assets_path: str + split: Literal["train", "val"] | None, assets_path: str ) -> datasets.TotalSegmentator2D: """TotalSegmentator2D dataset fixture.""" dataset = datasets.TotalSegmentator2D( @@ -55,6 +56,7 @@ def total_segmentator_dataset( "Totalsegmentator_dataset_v201", ), split=split, + version=None, ) dataset.prepare_data() dataset.configure() diff --git a/tests/eva/vision/data/datasets/test_wsi.py b/tests/eva/vision/data/datasets/test_wsi.py index de816475..87959a60 100644 --- a/tests/eva/vision/data/datasets/test_wsi.py +++ b/tests/eva/vision/data/datasets/test_wsi.py @@ -66,7 +66,7 @@ def test_patch_shape(width: int, height: int, target_mpp: float, root: str, back dataset._wsi.mpp * dataset._wsi.level_downsamples[dataset._coords.level_idx] ) scaled_width, scaled_height = int(mpp_ratio * width), int(mpp_ratio * height) - assert dataset[0].shape == (scaled_width, scaled_height, 3) + assert dataset[0].shape == (3, scaled_width, scaled_height) def test_multi_dataset(root: str): diff --git a/tests/eva/vision/data/transforms/common/test_resize_and_crop.py b/tests/eva/vision/data/transforms/common/test_resize_and_crop.py index 4f3bd2ba..4399b057 100644 --- a/tests/eva/vision/data/transforms/common/test_resize_and_crop.py +++ b/tests/eva/vision/data/transforms/common/test_resize_and_crop.py @@ -2,11 +2,10 @@ from typing import Tuple -import numpy as np -import numpy.typing as npt import pytest import torch from torch import testing +from torchvision import tv_tensors from eva.vision.data.transforms import common @@ -14,23 +13,23 @@ @pytest.mark.parametrize( "image_size, target_size, expected_size, expected_mean", [ - ((512, 224, 3), [112, 224], (3, 112, 224), -0.00392), - ((224, 512, 3), [112, 224], (3, 112, 224), -0.00392), - ((512, 224, 3), [112, 97], (3, 112, 97), -0.00392), - ((512, 512, 3), 224, (3, 224, 224), -0.00392), - ((512, 224, 3), 224, (3, 224, 224), -0.00392), - ((224, 224, 3), 224, (3, 224, 224), -0.00392), - ((97, 97, 3), 224, (3, 224, 224), -0.00392), + ((3, 512, 224), [112, 224], (3, 112, 224), -0.00392), + ((3, 224, 512), [112, 224], (3, 112, 224), -0.00392), + ((3, 512, 224), [112, 97], (3, 112, 97), -0.00392), + ((3, 512, 512), 224, (3, 224, 224), -0.00392), + ((3, 512, 224), 224, (3, 224, 224), -0.00392), + ((3, 224, 224), 224, (3, 224, 224), -0.00392), + ((3, 97, 97), 224, (3, 224, 224), -0.00392), ], ) def test_resize_and_crop( - image_array: npt.NDArray, + image_tensor: tv_tensors.Image, resize_and_crop: common.ResizeAndCrop, expected_size: Tuple[int, int, int], expected_mean: float, ) -> None: """Tests the ResizeAndCrop transform.""" - output = resize_and_crop(image_array) + output = resize_and_crop(image_tensor) assert output.shape == expected_size testing.assert_close(output.mean(), torch.tensor(expected_mean)) @@ -42,6 +41,7 @@ def resize_and_crop(target_size: Tuple[int, int, int]) -> common.ResizeAndCrop: @pytest.fixture(scope="function") -def image_array(image_size: Tuple[int, int, int]) -> npt.NDArray: - """Image array fixture.""" - return 127 * np.ones(image_size, np.uint8) +def image_tensor(image_size: Tuple[int, int, int]) -> tv_tensors.Image: + """Image tensor fixture.""" + image_tensor = 127 * torch.ones(image_size, dtype=torch.uint8) + return tv_tensors.Image(image_tensor)