From 067a6ed3104d017f25d60fdde4aec68261afbae2 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 29 Nov 2021 23:42:26 +0000 Subject: [PATCH 01/23] add api usage log for io --- torchvision/io/image.py | 11 +++++++++++ torchvision/io/video.py | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/torchvision/io/image.py b/torchvision/io/image.py index f835565016c..dd1801d6bd6 100644 --- a/torchvision/io/image.py +++ b/torchvision/io/image.py @@ -3,6 +3,7 @@ import torch from .._internally_replaced_utils import _get_extension_path +from ..utils import _log_api_usage_once try: @@ -41,6 +42,7 @@ def read_file(path: str) -> torch.Tensor: Returns: data (Tensor) """ + _log_api_usage_once("torchvision.io.read_file") data = torch.ops.image.read_file(path) return data @@ -54,6 +56,7 @@ def write_file(filename: str, data: torch.Tensor) -> None: filename (str): the path to the file to be written data (Tensor): the contents to be written to the output file """ + _log_api_usage_once("torchvision.io.write_file") torch.ops.image.write_file(filename, data) @@ -74,6 +77,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_png") output = torch.ops.image.decode_png(input, mode.value, False) return output @@ -93,6 +97,7 @@ def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the PNG file. """ + _log_api_usage_once("torchvision.io.encode_png") output = torch.ops.image.encode_png(input, compression_level) return output @@ -109,6 +114,7 @@ def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): compression_level (int): Compression factor for the resulting file, it must be a number between 0 and 9. Default: 6 """ + _log_api_usage_once("torchvision.io.write_png") output = encode_png(input, compression_level) write_file(filename, output) @@ -137,6 +143,7 @@ def decode_jpeg( Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_jpeg") device = torch.device(device) if device.type == "cuda": output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device) @@ -160,6 +167,7 @@ def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the JPEG file. """ + _log_api_usage_once("torchvision.io.encode_jpeg") if quality < 1 or quality > 100: raise ValueError("Image quality should be a positive number between 1 and 100") @@ -178,6 +186,7 @@ def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): quality (int): Quality of the resulting JPEG file, it must be a number between 1 and 100. Default: 75 """ + _log_api_usage_once("torchvision.io.write_jpeg") output = encode_jpeg(input, quality) write_file(filename, output) @@ -201,6 +210,7 @@ def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHAN Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_image") output = torch.ops.image.decode_image(input, mode.value) return output @@ -221,6 +231,7 @@ def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torc Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.read_image") data = read_file(path) return decode_image(data, mode) diff --git a/torchvision/io/video.py b/torchvision/io/video.py index 0ddd60a4586..cdb426d6d09 100644 --- a/torchvision/io/video.py +++ b/torchvision/io/video.py @@ -9,6 +9,7 @@ import numpy as np import torch +from ..utils import _log_api_usage_once from . import _video_opt @@ -77,6 +78,7 @@ def write_video( audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc. audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream """ + _log_api_usage_once("torchvision.io.write_video") _check_av_available() video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() @@ -256,6 +258,7 @@ def read_video( aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int) """ + _log_api_usage_once("torchvision.io.read_video") from torchvision import get_video_backend @@ -374,6 +377,7 @@ def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[in video_fps (float, optional): the frame rate for the video """ + _log_api_usage_once("torchvision.io.read_video_timestamps") from torchvision import get_video_backend if get_video_backend() != "pyav": From 65cdaeaba83366381b8e7b9fd9850b7c0ca5e636 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 30 Nov 2021 09:29:00 +0100 Subject: [PATCH 02/23] cleanup CI config (#4983) * [DIRTY] cleanup CI config * remove pip install command * fix syntax * fix jobs * fix syntax * add support for editable install * sync * fix sudo apt install * fix editable install * sync * try pip without user install * [DIRTY] address review comments * [DIRTY] try use dynamic name * switch logic * address remaining comments * cleanup * more cleanup * fix enabling prototype tests * Update .circleci/config.yml.in Co-authored-by: Nicolas Hug * linebreak Co-authored-by: Nicolas Hug --- .circleci/config.yml | 243 ++++++++++++++++++++++++--------------- .circleci/config.yml.in | 247 +++++++++++++++++++++++++--------------- .pre-commit-config.yaml | 5 + 3 files changed, 310 insertions(+), 185 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 81329f6e85a..90ff1ffe079 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -99,6 +99,81 @@ commands: - brew_install: formulae: libtool + apt_install: + parameters: + args: + type: string + descr: + type: string + default: "" + update: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> apt install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: | + <<# parameters.update >> sudo apt update -qy <> + sudo apt install << parameters.args >> + + pip_install: + parameters: + args: + type: string + descr: + type: string + default: "" + user: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> pip install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: > + pip install + <<# parameters.user >> --user <> + --progress-bar=off + << parameters.args >> + + install_torchvision: + parameters: + editable: + type: boolean + default: true + steps: + - pip_install: + args: --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + descr: Install PyTorch from nightly releases + - pip_install: + args: --no-build-isolation <<# parameters.editable >> --editable <> . + descr: Install torchvision <<# parameters.editable >> in editable mode <> + + install_prototype_dependencies: + steps: + - pip_install: + args: iopath git+https://github.com/pytorch/data + descr: Install prototype dependencies + + # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. + # This command can be used if only a selection of tests need to be run, for ad-hoc files. + run_tests_selective: + parameters: + file_or_dir: + type: string + steps: + - run: + name: Install test utilities + command: pip install --progress-bar=off pytest pytest-mock + - run: + name: Run tests + command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> + - store_test_results: + path: test-results + binary_common: &binary_common parameters: # Edit these defaults to do a release @@ -171,107 +246,99 @@ jobs: - image: circleci/python:3.7 steps: - checkout + - pip_install: + args: jinja2 pyyaml - run: + name: Check CircleCI config consistency command: | - pip install --user --progress-bar off jinja2 pyyaml python .circleci/regenerate.py git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - python_lint: + lint_python_and_config: docker: - image: circleci/python:3.7 steps: - checkout + - pip_install: + args: pre-commit + descr: Install lint utilities - run: - command: | - pip install --user --progress-bar off pre-commit - pre-commit install-hooks - - run: pre-commit run --all-files + name: Install pre-commit hooks + command: pre-commit install-hooks + - run: + name: Lint Python code and config files + command: pre-commit run --all-files - run: name: Required lint modifications when: on_fail command: git --no-pager diff - python_type_check: + lint_c: docker: - image: circleci/python:3.7 steps: + - apt_install: + args: libtinfo5 + descr: Install additional system libraries - checkout - run: + name: Install lint utilities command: | - sudo apt-get update -y - sudo apt install -y libturbojpeg-dev - pip install --user --progress-bar off mypy - pip install --user --progress-bar off types-requests - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - pip install --user --progress-bar off git+https://github.com/pytorch/data.git - pip install --user --progress-bar off --no-build-isolation --editable . - mypy --config-file mypy.ini - - docstring_parameters_sync: - docker: - - image: circleci/python:3.7 - steps: - - checkout + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format - run: - command: | - pip install --user pydocstyle - pydocstyle + name: Lint C code + command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff - clang_format: + type_check_python: docker: - image: circleci/python:3.7 steps: + - apt_install: + args: libturbojpeg-dev + descr: Install additional system libraries - checkout + - install_torchvision: + editable: true + - install_prototype_dependencies + - pip_install: + args: mypy + descr: Install Python type check utilities - run: - command: | - sudo apt-get update -y - sudo apt install -y libtinfo5 - curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format - chmod +x clang-format - sudo mv clang-format /opt/clang-format - ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + name: Check Python types statically + command: mypy --config-file mypy.ini - torchhub_test: + unittest_torchhub: docker: - image: circleci/python:3.7 steps: - checkout - - run: - command: | - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - # need to install torchvision dependencies due to transitive imports - pip install --user --progress-bar off --no-build-isolation . - pip install pytest - python test/test_hub.py + - install_torchvision + - run_tests_selective: + file_or_dir: test/test_hub.py - torch_onnx_test: + unittest_onnx: docker: - image: circleci/python:3.7 steps: - checkout - - run: - command: | - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - # need to install torchvision dependencies due to transitive imports - pip install --user --progress-bar off --no-build-isolation . - pip install --user onnx - pip install --user onnxruntime - pip install --user pytest - python test/test_onnx.py - - prototype_test: + - install_torchvision + - pip_install: + args: onnx onnxruntime + descr: Install ONNX + - run_tests_selective: + file_or_dir: test/test_onnx.py + + unittest_prototype: docker: - image: circleci/python:3.7 resource_class: xlarge steps: - - run: - name: Install torch - command: | - pip install --user --progress-bar=off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - - run: - name: Install prototype dependencies - command: pip install --user --progress-bar=off git+https://github.com/pytorch/data.git - checkout - run: name: Download model weights @@ -281,19 +348,16 @@ jobs: mkdir -p ~/.cache/torch/hub/checkpoints python scripts/collect_model_urls.py torchvision/prototype/models \ | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' + - install_torchvision + - install_prototype_dependencies + - pip_install: + args: scipy + descr: Install optional dependencies - run: - name: Install torchvision - command: pip install --user --progress-bar off --no-build-isolation . - - run: - name: Install test requirements - command: pip install --user --progress-bar=off pytest pytest-mock scipy iopath - - run: - name: Run tests - environment: - PYTORCH_TEST_WITH_PROTOTYPE: 1 - command: pytest --junitxml=test-results/junit.xml -v --durations 20 test/test_prototype_*.py - - store_test_results: - path: test-results + name: Enable prototype tests + command: echo 'export PYTORCH_TEST_WITH_PROTOTYPE=1' >> $BASH_ENV + - run_tests_selective: + file_or_dir: test/test_prototype_*.py binary_linux_wheel: <<: *binary_common @@ -529,9 +593,10 @@ jobs: at: ~/workspace - designate_upload_channel - checkout + - pip_install: + args: awscli - run: command: | - pip install --user awscli export PATH="$HOME/.local/bin:$PATH" # Prevent credential from leaking set +x @@ -572,7 +637,8 @@ jobs: command: | set -x source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - run: name: smoke test command: | @@ -641,7 +707,8 @@ jobs: eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - run: name: smoke test command: | @@ -967,7 +1034,7 @@ jobs: eval "$(./conda/bin/conda shell.bash hook)" conda activate ./env pushd docs - pip install -r requirements.txt + pip install --progress-bar=off -r requirements.txt make html popd - persist_to_workspace: @@ -1008,9 +1075,15 @@ jobs: workflows: - build: + lint: jobs: - circleci_consistency + - lint_python_and_config + - lint_c + - type_check_python + + build: + jobs: - binary_linux_wheel: conda_docker_image: pytorch/conda-builder:cpu cu_version: cpu @@ -1515,13 +1588,6 @@ workflows: python_version: '3.7' requires: - build_docs - - python_lint - - python_type_check - - docstring_parameters_sync - - clang_format - - torchhub_test - - torch_onnx_test - - prototype_test - binary_ios_build: build_environment: binary-libtorchvision_ops-ios-12.0.0-x86_64 ios_arch: x86_64 @@ -1538,6 +1604,9 @@ workflows: unittest: jobs: + - unittest_torchhub + - unittest_onnx + - unittest_prototype - unittest_linux_cpu: cu_version: cpu name: unittest_linux_cpu_py3.6 @@ -1675,14 +1744,6 @@ workflows: nightly: jobs: - - circleci_consistency - - python_lint - - python_type_check - - docstring_parameters_sync - - clang_format - - torchhub_test - - torch_onnx_test - - prototype_test - binary_ios_build: build_environment: nightly-binary-libtorchvision_ops-ios-12.0.0-x86_64 filters: diff --git a/.circleci/config.yml.in b/.circleci/config.yml.in index 4f3adbff184..cadd8efccb2 100644 --- a/.circleci/config.yml.in +++ b/.circleci/config.yml.in @@ -99,6 +99,81 @@ commands: - brew_install: formulae: libtool + apt_install: + parameters: + args: + type: string + descr: + type: string + default: "" + update: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> apt install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: | + <<# parameters.update >> sudo apt update -qy <> + sudo apt install << parameters.args >> + + pip_install: + parameters: + args: + type: string + descr: + type: string + default: "" + user: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> pip install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: > + pip install + <<# parameters.user >> --user <> + --progress-bar=off + << parameters.args >> + + install_torchvision: + parameters: + editable: + type: boolean + default: true + steps: + - pip_install: + args: --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + descr: Install PyTorch from nightly releases + - pip_install: + args: --no-build-isolation <<# parameters.editable >> --editable <> . + descr: Install torchvision <<# parameters.editable >> in editable mode <> + + install_prototype_dependencies: + steps: + - pip_install: + args: iopath git+https://github.com/pytorch/data + descr: Install prototype dependencies + + # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. + # This command can be used if only a selection of tests need to be run, for ad-hoc files. + run_tests_selective: + parameters: + file_or_dir: + type: string + steps: + - run: + name: Install test utilities + command: pip install --progress-bar=off pytest pytest-mock + - run: + name: Run tests + command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> + - store_test_results: + path: test-results + binary_common: &binary_common parameters: # Edit these defaults to do a release @@ -171,107 +246,99 @@ jobs: - image: circleci/python:3.7 steps: - checkout + - pip_install: + args: jinja2 pyyaml - run: + name: Check CircleCI config consistency command: | - pip install --user --progress-bar off jinja2 pyyaml python .circleci/regenerate.py git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - python_lint: + lint_python_and_config: docker: - image: circleci/python:3.7 steps: - checkout + - pip_install: + args: pre-commit + descr: Install lint utilities - run: - command: | - pip install --user --progress-bar off pre-commit - pre-commit install-hooks - - run: pre-commit run --all-files + name: Install pre-commit hooks + command: pre-commit install-hooks + - run: + name: Lint Python code and config files + command: pre-commit run --all-files - run: name: Required lint modifications when: on_fail command: git --no-pager diff - python_type_check: + lint_c: docker: - image: circleci/python:3.7 steps: + - apt_install: + args: libtinfo5 + descr: Install additional system libraries - checkout - run: + name: Install lint utilities command: | - sudo apt-get update -y - sudo apt install -y libturbojpeg-dev - pip install --user --progress-bar off mypy - pip install --user --progress-bar off types-requests - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - pip install --user --progress-bar off git+https://github.com/pytorch/data.git - pip install --user --progress-bar off --no-build-isolation --editable . - mypy --config-file mypy.ini - - docstring_parameters_sync: - docker: - - image: circleci/python:3.7 - steps: - - checkout + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format - run: - command: | - pip install --user pydocstyle - pydocstyle + name: Lint C code + command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff - clang_format: + type_check_python: docker: - image: circleci/python:3.7 steps: + - apt_install: + args: libturbojpeg-dev + descr: Install additional system libraries - checkout - - run: - command: | - sudo apt-get update -y - sudo apt install -y libtinfo5 - curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format - chmod +x clang-format - sudo mv clang-format /opt/clang-format - ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + - install_torchvision: + editable: true + - install_prototype_dependencies + - pip_install: + args: mypy + descr: Install Python type check utilities + - run: + name: Check Python types statically + command: mypy --config-file mypy.ini - torchhub_test: + unittest_torchhub: docker: - image: circleci/python:3.7 steps: - checkout - - run: - command: | - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - # need to install torchvision dependencies due to transitive imports - pip install --user --progress-bar off --no-build-isolation . - pip install pytest - python test/test_hub.py + - install_torchvision + - run_tests_selective: + file_or_dir: test/test_hub.py - torch_onnx_test: + unittest_onnx: docker: - image: circleci/python:3.7 steps: - checkout - - run: - command: | - pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - # need to install torchvision dependencies due to transitive imports - pip install --user --progress-bar off --no-build-isolation . - pip install --user onnx - pip install --user onnxruntime - pip install --user pytest - python test/test_onnx.py + - install_torchvision + - pip_install: + args: onnx onnxruntime + descr: Install ONNX + - run_tests_selective: + file_or_dir: test/test_onnx.py - prototype_test: + unittest_prototype: docker: - image: circleci/python:3.7 resource_class: xlarge steps: - - run: - name: Install torch - command: | - pip install --user --progress-bar=off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - - run: - name: Install prototype dependencies - command: pip install --user --progress-bar=off git+https://github.com/pytorch/data.git - checkout - run: name: Download model weights @@ -281,19 +348,16 @@ jobs: mkdir -p ~/.cache/torch/hub/checkpoints python scripts/collect_model_urls.py torchvision/prototype/models \ | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' - - run: - name: Install torchvision - command: pip install --user --progress-bar off --no-build-isolation . - - run: - name: Install test requirements - command: pip install --user --progress-bar=off pytest pytest-mock scipy iopath - - run: - name: Run tests - environment: - PYTORCH_TEST_WITH_PROTOTYPE: 1 - command: pytest --junitxml=test-results/junit.xml -v --durations 20 test/test_prototype_*.py - - store_test_results: - path: test-results + - install_torchvision + - install_prototype_dependencies + - pip_install: + args: scipy + descr: Install optional dependencies + - run: + name: Enable prototype tests + command: echo 'export PYTORCH_TEST_WITH_PROTOTYPE=1' >> $BASH_ENV + - run_tests_selective: + file_or_dir: test/test_prototype_*.py binary_linux_wheel: <<: *binary_common @@ -529,9 +593,10 @@ jobs: at: ~/workspace - designate_upload_channel - checkout + - pip_install: + args: awscli - run: command: | - pip install --user awscli export PATH="$HOME/.local/bin:$PATH" # Prevent credential from leaking set +x @@ -572,7 +637,8 @@ jobs: command: | set -x source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - run: name: smoke test command: | @@ -641,7 +707,8 @@ jobs: eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - run: name: smoke test command: | @@ -967,7 +1034,7 @@ jobs: eval "$(./conda/bin/conda shell.bash hook)" conda activate ./env pushd docs - pip install -r requirements.txt + pip install --progress-bar=off -r requirements.txt make html popd - persist_to_workspace: @@ -1008,23 +1075,24 @@ jobs: workflows: - build: -{%- if True %} + lint: jobs: - circleci_consistency + - lint_python_and_config + - lint_c + - type_check_python + + build: + jobs: {{ build_workflows(windows_latest_only=True) }} - - python_lint - - python_type_check - - docstring_parameters_sync - - clang_format - - torchhub_test - - torch_onnx_test - - prototype_test {{ ios_workflows() }} {{ android_workflows() }} unittest: jobs: + - unittest_torchhub + - unittest_onnx + - unittest_prototype {{ unittest_workflows() }} cmake: @@ -1032,16 +1100,7 @@ workflows: {{ cmake_workflows() }} nightly: -{%- endif %} jobs: - - circleci_consistency - - python_lint - - python_type_check - - docstring_parameters_sync - - clang_format - - torchhub_test - - torch_onnx_test - - prototype_test {{ ios_workflows(nightly=True) }} {{ android_workflows(nightly=True) }} {{ build_workflows(prefix="nightly_", filter_branch="nightly", upload=True) }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 920916030be..7f66a0de672 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,3 +28,8 @@ repos: hooks: - id: flake8 args: [--config=setup.cfg] + + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.1.1 + hooks: + - id: pydocstyle From 3d8723d5f7782881fc19d802819130857ed68ca4 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Tue, 30 Nov 2021 12:56:37 +0000 Subject: [PATCH 03/23] Refactor the `get_weights` API (#5006) * Change the `default` weights mechanism to sue Enum aliases. * Change `get_weights` to work with full Enum names and make it public. * Applying improvements from code review. --- references/classification/train.py | 3 +- references/detection/train.py | 3 +- references/segmentation/train.py | 3 +- references/video_classification/train.py | 3 +- test/test_models.py | 6 +- test/test_prototype_models.py | 36 +++++++----- torchvision/prototype/models/__init__.py | 1 + torchvision/prototype/models/_api.py | 57 +++++++++---------- torchvision/prototype/models/alexnet.py | 2 +- torchvision/prototype/models/densenet.py | 8 +-- .../prototype/models/detection/faster_rcnn.py | 6 +- .../models/detection/keypoint_rcnn.py | 3 +- .../prototype/models/detection/mask_rcnn.py | 2 +- .../prototype/models/detection/retinanet.py | 2 +- torchvision/prototype/models/detection/ssd.py | 2 +- .../prototype/models/detection/ssdlite.py | 2 +- torchvision/prototype/models/efficientnet.py | 16 +++--- torchvision/prototype/models/googlenet.py | 2 +- torchvision/prototype/models/inception.py | 2 +- torchvision/prototype/models/mnasnet.py | 4 +- torchvision/prototype/models/mobilenetv2.py | 2 +- torchvision/prototype/models/mobilenetv3.py | 5 +- .../models/quantization/googlenet.py | 2 +- .../models/quantization/inception.py | 2 +- .../models/quantization/mobilenetv2.py | 2 +- .../models/quantization/mobilenetv3.py | 2 +- .../prototype/models/quantization/resnet.py | 8 +-- .../models/quantization/shufflenetv2.py | 4 +- torchvision/prototype/models/regnet.py | 28 ++++----- torchvision/prototype/models/resnet.py | 25 +++----- .../models/segmentation/deeplabv3.py | 6 +- .../prototype/models/segmentation/fcn.py | 4 +- .../prototype/models/segmentation/lraspp.py | 2 +- torchvision/prototype/models/shufflenetv2.py | 4 +- torchvision/prototype/models/squeezenet.py | 4 +- torchvision/prototype/models/vgg.py | 17 +++--- torchvision/prototype/models/video/resnet.py | 6 +- 37 files changed, 140 insertions(+), 146 deletions(-) diff --git a/references/classification/train.py b/references/classification/train.py index b16ed3d2a42..b2c6844df9b 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -158,8 +158,7 @@ def load_data(traindir, valdir, args): crop_size=val_crop_size, resize_size=val_resize_size, interpolation=interpolation ) else: - fn = PM.quantization.__dict__[args.model] if hasattr(args, "backend") else PM.__dict__[args.model] - weights = PM._api.get_weight(fn, args.weights) + weights = PM.get_weight(args.weights) preprocessing = weights.transforms() dataset_test = torchvision.datasets.ImageFolder( diff --git a/references/detection/train.py b/references/detection/train.py index ae13a32bd22..0788895af20 100644 --- a/references/detection/train.py +++ b/references/detection/train.py @@ -53,8 +53,7 @@ def get_transform(train, args): elif not args.weights: return presets.DetectionPresetEval() else: - fn = PM.detection.__dict__[args.model] - weights = PM._api.get_weight(fn, args.weights) + weights = PM.get_weight(args.weights) return weights.transforms() diff --git a/references/segmentation/train.py b/references/segmentation/train.py index 2dbb962fe2f..72a9bdb01f5 100644 --- a/references/segmentation/train.py +++ b/references/segmentation/train.py @@ -38,8 +38,7 @@ def get_transform(train, args): elif not args.weights: return presets.SegmentationPresetEval(base_size=520) else: - fn = PM.segmentation.__dict__[args.model] - weights = PM._api.get_weight(fn, args.weights) + weights = PM.get_weight(args.weights) return weights.transforms() diff --git a/references/video_classification/train.py b/references/video_classification/train.py index d66879e5b46..1f363f57dad 100644 --- a/references/video_classification/train.py +++ b/references/video_classification/train.py @@ -160,8 +160,7 @@ def main(args): if not args.weights: transform_test = presets.VideoClassificationPresetEval((128, 171), (112, 112)) else: - fn = PM.video.__dict__[args.model] - weights = PM._api.get_weight(fn, args.weights) + weights = PM.get_weight(args.weights) transform_test = weights.transforms() if args.cache_dataset and os.path.exists(cache_path): diff --git a/test/test_models.py b/test/test_models.py index 5fbe0dca38f..b5500ef08b4 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -22,7 +22,11 @@ def get_models_from_module(module): # TODO add a registration mechanism to torchvision.models - return [v for k, v in module.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + return [ + v + for k, v in module.__dict__.items() + if callable(v) and k[0].lower() == k[0] and k[0] != "_" and k != "get_weight" + ] @pytest.fixture diff --git a/test/test_prototype_models.py b/test/test_prototype_models.py index 92a88342534..1dc883528ef 100644 --- a/test/test_prototype_models.py +++ b/test/test_prototype_models.py @@ -24,6 +24,19 @@ def _get_parent_module(model_fn): return module +def _get_model_weights(model_fn): + module = _get_parent_module(model_fn) + weights_name = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" + try: + return next( + v + for k, v in module.__dict__.items() + if k.endswith(weights_name) and k.replace(weights_name, "").lower() == model_fn.__name__ + ) + except StopIteration: + return None + + def _build_model(fn, **kwargs): try: model = fn(**kwargs) @@ -36,24 +49,22 @@ def _build_model(fn, **kwargs): @pytest.mark.parametrize( - "model_fn, name, weight", + "name, weight", [ - (models.resnet50, "ImageNet1K_V1", models.ResNet50_Weights.ImageNet1K_V1), - (models.resnet50, "default", models.ResNet50_Weights.ImageNet1K_V2), + ("ResNet50_Weights.ImageNet1K_V1", models.ResNet50_Weights.ImageNet1K_V1), + ("ResNet50_Weights.default", models.ResNet50_Weights.ImageNet1K_V2), ( - models.quantization.resnet50, - "default", + "ResNet50_QuantizedWeights.default", models.quantization.ResNet50_QuantizedWeights.ImageNet1K_FBGEMM_V2, ), ( - models.quantization.resnet50, - "ImageNet1K_FBGEMM_V1", + "ResNet50_QuantizedWeights.ImageNet1K_FBGEMM_V1", models.quantization.ResNet50_QuantizedWeights.ImageNet1K_FBGEMM_V1, ), ], ) -def test_get_weight(model_fn, name, weight): - assert models._api.get_weight(model_fn, name) == weight +def test_get_weight(name, weight): + assert models.get_weight(name) == weight @pytest.mark.parametrize( @@ -65,10 +76,9 @@ def test_get_weight(model_fn, name, weight): + TM.get_models_from_module(models.video), ) def test_naming_conventions(model_fn): - model_name = model_fn.__name__ - module = _get_parent_module(model_fn) - weights_name = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" - assert model_name in set(x.replace(weights_name, "").lower() for x in module.__dict__ if x.endswith(weights_name)) + weights_enum = _get_model_weights(model_fn) + assert weights_enum is not None + assert len(weights_enum) == 0 or hasattr(weights_enum, "default") @pytest.mark.parametrize("model_fn", TM.get_models_from_module(models)) diff --git a/torchvision/prototype/models/__init__.py b/torchvision/prototype/models/__init__.py index f675dc37f25..12a4738e53c 100644 --- a/torchvision/prototype/models/__init__.py +++ b/torchvision/prototype/models/__init__.py @@ -15,3 +15,4 @@ from . import quantization from . import segmentation from . import video +from ._api import get_weight diff --git a/torchvision/prototype/models/_api.py b/torchvision/prototype/models/_api.py index 2935039e087..1f66fd2be45 100644 --- a/torchvision/prototype/models/_api.py +++ b/torchvision/prototype/models/_api.py @@ -1,7 +1,9 @@ +import importlib +import inspect +import sys from collections import OrderedDict from dataclasses import dataclass, fields from enum import Enum -from inspect import signature from typing import Any, Callable, Dict from ..._internally_replaced_utils import load_state_dict_from_url @@ -30,7 +32,6 @@ class Weights: url: str transforms: Callable meta: Dict[str, Any] - default: bool class WeightsEnum(Enum): @@ -50,7 +51,7 @@ def __init__(self, value: Weights): def verify(cls, obj: Any) -> Any: if obj is not None: if type(obj) is str: - obj = cls.from_str(obj) + obj = cls.from_str(obj.replace(cls.__name__ + ".", "")) elif not isinstance(obj, cls): raise TypeError( f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}." @@ -59,8 +60,8 @@ def verify(cls, obj: Any) -> Any: @classmethod def from_str(cls, value: str) -> "WeightsEnum": - for v in cls: - if v._name_ == value or (value == "default" and v.default): + for k, v in cls.__members__.items(): + if k == value: return v raise ValueError(f"Invalid value {value} for enum {cls.__name__}.") @@ -78,41 +79,35 @@ def __getattr__(self, name): return super().__getattr__(name) -def get_weight(fn: Callable, weight_name: str) -> WeightsEnum: +def get_weight(name: str) -> WeightsEnum: """ - Gets the weight enum of a specific model builder method and weight name combination. + Gets the weight enum value by its full name. Example: "ResNet50_Weights.ImageNet1K_V1" Args: - fn (Callable): The builder method used to create the model. - weight_name (str): The name of the weight enum entry of the specific model. + name (str): The name of the weight enum entry. Returns: WeightsEnum: The requested weight enum. """ - sig = signature(fn) - if "weights" not in sig.parameters: - raise ValueError("The method is missing the 'weights' parameter.") + try: + enum_name, value_name = name.split(".") + except ValueError: + raise ValueError(f"Invalid weight name provided: '{name}'.") + + base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1]) + base_module = importlib.import_module(base_module_name) + model_modules = [base_module] + [ + x[1] for x in inspect.getmembers(base_module, inspect.ismodule) if x[1].__file__.endswith("__init__.py") + ] - ann = signature(fn).parameters["weights"].annotation weights_enum = None - if isinstance(ann, type) and issubclass(ann, WeightsEnum): - weights_enum = ann - else: - # handle cases like Union[Optional, T] - # TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8 - for t in ann.__args__: # type: ignore[union-attr] - if isinstance(t, type) and issubclass(t, WeightsEnum): - # ensure the name exists. handles builders with multiple types of weights like in quantization - try: - t.from_str(weight_name) - except ValueError: - continue - weights_enum = t - break + for m in model_modules: + potential_class = m.__dict__.get(enum_name, None) + if potential_class is not None and issubclass(potential_class, WeightsEnum): + weights_enum = potential_class + break if weights_enum is None: - raise ValueError( - "The weight class for the specific method couldn't be retrieved. Make sure the typing info is correct." - ) + raise ValueError(f"The weight enum '{enum_name}' for the specific method couldn't be retrieved.") - return weights_enum.from_str(weight_name) + return weights_enum.from_str(value_name) diff --git a/torchvision/prototype/models/alexnet.py b/torchvision/prototype/models/alexnet.py index b45ca1e7085..28b0fa60504 100644 --- a/torchvision/prototype/models/alexnet.py +++ b/torchvision/prototype/models/alexnet.py @@ -25,8 +25,8 @@ class AlexNet_Weights(WeightsEnum): "acc@1": 56.522, "acc@5": 79.066, }, - default=True, ) + default = ImageNet1K_V1 def alexnet(weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet: diff --git a/torchvision/prototype/models/densenet.py b/torchvision/prototype/models/densenet.py index e779a2cd239..b8abbdde947 100644 --- a/torchvision/prototype/models/densenet.py +++ b/torchvision/prototype/models/densenet.py @@ -80,8 +80,8 @@ class DenseNet121_Weights(WeightsEnum): "acc@1": 74.434, "acc@5": 91.972, }, - default=True, ) + default = ImageNet1K_V1 class DenseNet161_Weights(WeightsEnum): @@ -93,8 +93,8 @@ class DenseNet161_Weights(WeightsEnum): "acc@1": 77.138, "acc@5": 93.560, }, - default=True, ) + default = ImageNet1K_V1 class DenseNet169_Weights(WeightsEnum): @@ -106,8 +106,8 @@ class DenseNet169_Weights(WeightsEnum): "acc@1": 75.600, "acc@5": 92.806, }, - default=True, ) + default = ImageNet1K_V1 class DenseNet201_Weights(WeightsEnum): @@ -119,8 +119,8 @@ class DenseNet201_Weights(WeightsEnum): "acc@1": 76.896, "acc@5": 93.370, }, - default=True, ) + default = ImageNet1K_V1 def densenet121(weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: diff --git a/torchvision/prototype/models/detection/faster_rcnn.py b/torchvision/prototype/models/detection/faster_rcnn.py index c83aaf222fb..1f5c6461698 100644 --- a/torchvision/prototype/models/detection/faster_rcnn.py +++ b/torchvision/prototype/models/detection/faster_rcnn.py @@ -45,8 +45,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-resnet-50-fpn", "map": 37.0, }, - default=True, ) + default = Coco_V1 class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): @@ -58,8 +58,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-fpn", "map": 32.8, }, - default=True, ) + default = Coco_V1 class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): @@ -71,8 +71,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#faster-r-cnn-mobilenetv3-large-320-fpn", "map": 22.8, }, - default=True, ) + default = Coco_V1 def fasterrcnn_resnet50_fpn( diff --git a/torchvision/prototype/models/detection/keypoint_rcnn.py b/torchvision/prototype/models/detection/keypoint_rcnn.py index 85250ac2a33..a811999681d 100644 --- a/torchvision/prototype/models/detection/keypoint_rcnn.py +++ b/torchvision/prototype/models/detection/keypoint_rcnn.py @@ -35,7 +35,6 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 50.6, "kp_map": 61.1, }, - default=False, ) Coco_V1 = Weights( url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth", @@ -46,8 +45,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 54.6, "kp_map": 65.0, }, - default=True, ) + default = Coco_V1 def keypointrcnn_resnet50_fpn( diff --git a/torchvision/prototype/models/detection/mask_rcnn.py b/torchvision/prototype/models/detection/mask_rcnn.py index ea7ab4f5fc7..4eb285fac0d 100644 --- a/torchvision/prototype/models/detection/mask_rcnn.py +++ b/torchvision/prototype/models/detection/mask_rcnn.py @@ -34,8 +34,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 37.9, "mask_map": 34.6, }, - default=True, ) + default = Coco_V1 def maskrcnn_resnet50_fpn( diff --git a/torchvision/prototype/models/detection/retinanet.py b/torchvision/prototype/models/detection/retinanet.py index d442c79d5b6..799bc21c379 100644 --- a/torchvision/prototype/models/detection/retinanet.py +++ b/torchvision/prototype/models/detection/retinanet.py @@ -34,8 +34,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#retinanet", "map": 36.4, }, - default=True, ) + default = Coco_V1 def retinanet_resnet50_fpn( diff --git a/torchvision/prototype/models/detection/ssd.py b/torchvision/prototype/models/detection/ssd.py index 37f5c2a6944..f57b47c00d6 100644 --- a/torchvision/prototype/models/detection/ssd.py +++ b/torchvision/prototype/models/detection/ssd.py @@ -33,8 +33,8 @@ class SSD300_VGG16_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16", "map": 25.1, }, - default=True, ) + default = Coco_V1 def ssd300_vgg16( diff --git a/torchvision/prototype/models/detection/ssdlite.py b/torchvision/prototype/models/detection/ssdlite.py index 309362f2f11..4a61c50101a 100644 --- a/torchvision/prototype/models/detection/ssdlite.py +++ b/torchvision/prototype/models/detection/ssdlite.py @@ -38,8 +38,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large", "map": 21.3, }, - default=True, ) + default = Coco_V1 def ssdlite320_mobilenet_v3_large( diff --git a/torchvision/prototype/models/efficientnet.py b/torchvision/prototype/models/efficientnet.py index 74ca6ccc71d..f4a69aac70c 100644 --- a/torchvision/prototype/models/efficientnet.py +++ b/torchvision/prototype/models/efficientnet.py @@ -79,8 +79,8 @@ class EfficientNet_B0_Weights(WeightsEnum): "acc@1": 77.692, "acc@5": 93.532, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B1_Weights(WeightsEnum): @@ -93,8 +93,8 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@1": 78.642, "acc@5": 94.186, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B2_Weights(WeightsEnum): @@ -107,8 +107,8 @@ class EfficientNet_B2_Weights(WeightsEnum): "acc@1": 80.608, "acc@5": 95.310, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B3_Weights(WeightsEnum): @@ -121,8 +121,8 @@ class EfficientNet_B3_Weights(WeightsEnum): "acc@1": 82.008, "acc@5": 96.054, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B4_Weights(WeightsEnum): @@ -135,8 +135,8 @@ class EfficientNet_B4_Weights(WeightsEnum): "acc@1": 83.384, "acc@5": 96.594, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B5_Weights(WeightsEnum): @@ -149,8 +149,8 @@ class EfficientNet_B5_Weights(WeightsEnum): "acc@1": 83.444, "acc@5": 96.628, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B6_Weights(WeightsEnum): @@ -163,8 +163,8 @@ class EfficientNet_B6_Weights(WeightsEnum): "acc@1": 84.008, "acc@5": 96.916, }, - default=True, ) + default = ImageNet1K_V1 class EfficientNet_B7_Weights(WeightsEnum): @@ -177,8 +177,8 @@ class EfficientNet_B7_Weights(WeightsEnum): "acc@1": 84.122, "acc@5": 96.908, }, - default=True, ) + default = ImageNet1K_V1 def efficientnet_b0( diff --git a/torchvision/prototype/models/googlenet.py b/torchvision/prototype/models/googlenet.py index 352c49d1a2e..f62c5a96e15 100644 --- a/torchvision/prototype/models/googlenet.py +++ b/torchvision/prototype/models/googlenet.py @@ -26,8 +26,8 @@ class GoogLeNet_Weights(WeightsEnum): "acc@1": 69.778, "acc@5": 89.530, }, - default=True, ) + default = ImageNet1K_V1 def googlenet(weights: Optional[GoogLeNet_Weights] = None, progress: bool = True, **kwargs: Any) -> GoogLeNet: diff --git a/torchvision/prototype/models/inception.py b/torchvision/prototype/models/inception.py index 9837b1fc4a6..4814fa76c5c 100644 --- a/torchvision/prototype/models/inception.py +++ b/torchvision/prototype/models/inception.py @@ -25,8 +25,8 @@ class Inception_V3_Weights(WeightsEnum): "acc@1": 77.294, "acc@5": 93.450, }, - default=True, ) + default = ImageNet1K_V1 def inception_v3(weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3: diff --git a/torchvision/prototype/models/mnasnet.py b/torchvision/prototype/models/mnasnet.py index 73aaea0beca..554057a9ba1 100644 --- a/torchvision/prototype/models/mnasnet.py +++ b/torchvision/prototype/models/mnasnet.py @@ -40,8 +40,8 @@ class MNASNet0_5_Weights(WeightsEnum): "acc@1": 67.734, "acc@5": 87.490, }, - default=True, ) + default = ImageNet1K_V1 class MNASNet0_75_Weights(WeightsEnum): @@ -58,8 +58,8 @@ class MNASNet1_0_Weights(WeightsEnum): "acc@1": 73.456, "acc@5": 91.510, }, - default=True, ) + default = ImageNet1K_V1 class MNASNet1_3_Weights(WeightsEnum): diff --git a/torchvision/prototype/models/mobilenetv2.py b/torchvision/prototype/models/mobilenetv2.py index 0c0f80d081a..64c7221da6d 100644 --- a/torchvision/prototype/models/mobilenetv2.py +++ b/torchvision/prototype/models/mobilenetv2.py @@ -25,8 +25,8 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@1": 71.878, "acc@5": 90.286, }, - default=True, ) + default = ImageNet1K_V1 def mobilenet_v2(weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any) -> MobileNetV2: diff --git a/torchvision/prototype/models/mobilenetv3.py b/torchvision/prototype/models/mobilenetv3.py index e014fb5acb2..a92c7667aab 100644 --- a/torchvision/prototype/models/mobilenetv3.py +++ b/torchvision/prototype/models/mobilenetv3.py @@ -54,7 +54,6 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@1": 74.042, "acc@5": 91.340, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth", @@ -65,8 +64,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@1": 75.274, "acc@5": 92.566, }, - default=True, ) + default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): @@ -79,8 +78,8 @@ class MobileNet_V3_Small_Weights(WeightsEnum): "acc@1": 67.668, "acc@5": 87.402, }, - default=True, ) + default = ImageNet1K_V1 def mobilenet_v3_large( diff --git a/torchvision/prototype/models/quantization/googlenet.py b/torchvision/prototype/models/quantization/googlenet.py index 3d26fd7d607..dc3c875b79a 100644 --- a/torchvision/prototype/models/quantization/googlenet.py +++ b/torchvision/prototype/models/quantization/googlenet.py @@ -38,8 +38,8 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): "acc@1": 69.826, "acc@5": 89.404, }, - default=True, ) + default = ImageNet1K_FBGEMM_V1 def googlenet( diff --git a/torchvision/prototype/models/quantization/inception.py b/torchvision/prototype/models/quantization/inception.py index ff779076df6..d1d5d4ca8fe 100644 --- a/torchvision/prototype/models/quantization/inception.py +++ b/torchvision/prototype/models/quantization/inception.py @@ -37,8 +37,8 @@ class Inception_V3_QuantizedWeights(WeightsEnum): "acc@1": 77.176, "acc@5": 93.354, }, - default=True, ) + default = ImageNet1K_FBGEMM_V1 def inception_v3( diff --git a/torchvision/prototype/models/quantization/mobilenetv2.py b/torchvision/prototype/models/quantization/mobilenetv2.py index c5afd731fad..81540f2f840 100644 --- a/torchvision/prototype/models/quantization/mobilenetv2.py +++ b/torchvision/prototype/models/quantization/mobilenetv2.py @@ -38,8 +38,8 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): "acc@1": 71.658, "acc@5": 90.150, }, - default=True, ) + default = ImageNet1K_QNNPACK_V1 def mobilenet_v2( diff --git a/torchvision/prototype/models/quantization/mobilenetv3.py b/torchvision/prototype/models/quantization/mobilenetv3.py index a29e3f44697..9d29484c18f 100644 --- a/torchvision/prototype/models/quantization/mobilenetv3.py +++ b/torchvision/prototype/models/quantization/mobilenetv3.py @@ -71,8 +71,8 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): "acc@1": 73.004, "acc@5": 90.858, }, - default=True, ) + default = ImageNet1K_QNNPACK_V1 def mobilenet_v3_large( diff --git a/torchvision/prototype/models/quantization/resnet.py b/torchvision/prototype/models/quantization/resnet.py index 0de4eb5557b..c6bd530f393 100644 --- a/torchvision/prototype/models/quantization/resnet.py +++ b/torchvision/prototype/models/quantization/resnet.py @@ -73,8 +73,8 @@ class ResNet18_QuantizedWeights(WeightsEnum): "acc@1": 69.494, "acc@5": 88.882, }, - default=True, ) + default = ImageNet1K_FBGEMM_V1 class ResNet50_QuantizedWeights(WeightsEnum): @@ -87,7 +87,6 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@1": 75.920, "acc@5": 92.814, }, - default=False, ) ImageNet1K_FBGEMM_V2 = Weights( url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth", @@ -98,8 +97,8 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@1": 80.282, "acc@5": 94.976, }, - default=True, ) + default = ImageNet1K_FBGEMM_V2 class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): @@ -112,7 +111,6 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@1": 78.986, "acc@5": 94.480, }, - default=False, ) ImageNet1K_FBGEMM_V2 = Weights( url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth", @@ -123,8 +121,8 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@1": 82.574, "acc@5": 96.132, }, - default=True, ) + default = ImageNet1K_FBGEMM_V2 def resnet18( diff --git a/torchvision/prototype/models/quantization/shufflenetv2.py b/torchvision/prototype/models/quantization/shufflenetv2.py index 6677983a1d9..111763f2614 100644 --- a/torchvision/prototype/models/quantization/shufflenetv2.py +++ b/torchvision/prototype/models/quantization/shufflenetv2.py @@ -69,8 +69,8 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): "acc@1": 57.972, "acc@5": 79.780, }, - default=True, ) + default = ImageNet1K_FBGEMM_V1 class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): @@ -83,8 +83,8 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): "acc@1": 68.360, "acc@5": 87.582, }, - default=True, ) + default = ImageNet1K_FBGEMM_V1 def shufflenet_v2_x0_5( diff --git a/torchvision/prototype/models/regnet.py b/torchvision/prototype/models/regnet.py index 1e12ae7bbd2..d810a0d1300 100644 --- a/torchvision/prototype/models/regnet.py +++ b/torchvision/prototype/models/regnet.py @@ -74,8 +74,8 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@1": 74.046, "acc@5": 91.716, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_800MF_Weights(WeightsEnum): @@ -88,8 +88,8 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@1": 76.420, "acc@5": 93.136, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_1_6GF_Weights(WeightsEnum): @@ -102,8 +102,8 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@1": 77.950, "acc@5": 93.966, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_3_2GF_Weights(WeightsEnum): @@ -116,8 +116,8 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@1": 78.948, "acc@5": 94.576, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_8GF_Weights(WeightsEnum): @@ -130,8 +130,8 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@1": 80.032, "acc@5": 95.048, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_16GF_Weights(WeightsEnum): @@ -144,8 +144,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@1": 80.424, "acc@5": 95.240, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_Y_32GF_Weights(WeightsEnum): @@ -158,8 +158,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@1": 80.878, "acc@5": 95.340, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_400MF_Weights(WeightsEnum): @@ -172,8 +172,8 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@1": 72.834, "acc@5": 90.950, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_800MF_Weights(WeightsEnum): @@ -186,8 +186,8 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@1": 75.212, "acc@5": 92.348, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_1_6GF_Weights(WeightsEnum): @@ -200,8 +200,8 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@1": 77.040, "acc@5": 93.440, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_3_2GF_Weights(WeightsEnum): @@ -214,8 +214,8 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@1": 78.364, "acc@5": 93.992, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_8GF_Weights(WeightsEnum): @@ -228,8 +228,8 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@1": 79.344, "acc@5": 94.686, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_16GF_Weights(WeightsEnum): @@ -242,8 +242,8 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@1": 80.058, "acc@5": 94.944, }, - default=True, ) + default = ImageNet1K_V1 class RegNet_X_32GF_Weights(WeightsEnum): @@ -256,8 +256,8 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@1": 80.622, "acc@5": 95.248, }, - default=True, ) + default = ImageNet1K_V1 def regnet_y_400mf(weights: Optional[RegNet_Y_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet: diff --git a/torchvision/prototype/models/resnet.py b/torchvision/prototype/models/resnet.py index e213864acbe..3c68f0a430c 100644 --- a/torchvision/prototype/models/resnet.py +++ b/torchvision/prototype/models/resnet.py @@ -64,8 +64,8 @@ class ResNet18_Weights(WeightsEnum): "acc@1": 69.758, "acc@5": 89.078, }, - default=True, ) + default = ImageNet1K_V1 class ResNet34_Weights(WeightsEnum): @@ -78,8 +78,8 @@ class ResNet34_Weights(WeightsEnum): "acc@1": 73.314, "acc@5": 91.420, }, - default=True, ) + default = ImageNet1K_V1 class ResNet50_Weights(WeightsEnum): @@ -92,7 +92,6 @@ class ResNet50_Weights(WeightsEnum): "acc@1": 76.130, "acc@5": 92.862, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/resnet50-f46c3f97.pth", @@ -103,8 +102,8 @@ class ResNet50_Weights(WeightsEnum): "acc@1": 80.674, "acc@5": 95.166, }, - default=True, ) + default = ImageNet1K_V2 class ResNet101_Weights(WeightsEnum): @@ -117,7 +116,6 @@ class ResNet101_Weights(WeightsEnum): "acc@1": 77.374, "acc@5": 93.546, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/resnet101-cd907fc2.pth", @@ -128,8 +126,8 @@ class ResNet101_Weights(WeightsEnum): "acc@1": 81.886, "acc@5": 95.780, }, - default=True, ) + default = ImageNet1K_V2 class ResNet152_Weights(WeightsEnum): @@ -142,7 +140,6 @@ class ResNet152_Weights(WeightsEnum): "acc@1": 78.312, "acc@5": 94.046, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/resnet152-f82ba261.pth", @@ -153,8 +150,8 @@ class ResNet152_Weights(WeightsEnum): "acc@1": 82.284, "acc@5": 96.002, }, - default=True, ) + default = ImageNet1K_V2 class ResNeXt50_32X4D_Weights(WeightsEnum): @@ -167,7 +164,6 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@1": 77.618, "acc@5": 93.698, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth", @@ -178,8 +174,8 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@1": 81.198, "acc@5": 95.340, }, - default=True, ) + default = ImageNet1K_V2 class ResNeXt101_32X8D_Weights(WeightsEnum): @@ -192,7 +188,6 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@1": 79.312, "acc@5": 94.526, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth", @@ -203,8 +198,8 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@1": 82.834, "acc@5": 96.228, }, - default=True, ) + default = ImageNet1K_V2 class Wide_ResNet50_2_Weights(WeightsEnum): @@ -217,7 +212,6 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@1": 78.468, "acc@5": 94.086, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth", @@ -228,8 +222,8 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@1": 81.602, "acc@5": 95.758, }, - default=True, ) + default = ImageNet1K_V2 class Wide_ResNet101_2_Weights(WeightsEnum): @@ -242,7 +236,6 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@1": 78.848, "acc@5": 94.284, }, - default=False, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth", @@ -253,8 +246,8 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@1": 82.510, "acc@5": 96.020, }, - default=True, ) + default = ImageNet1K_V2 def resnet18(weights: Optional[ResNet18_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: diff --git a/torchvision/prototype/models/segmentation/deeplabv3.py b/torchvision/prototype/models/segmentation/deeplabv3.py index 638b771c333..30c90013c9b 100644 --- a/torchvision/prototype/models/segmentation/deeplabv3.py +++ b/torchvision/prototype/models/segmentation/deeplabv3.py @@ -40,8 +40,8 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): "mIoU": 66.4, "acc": 92.4, }, - default=True, ) + default = CocoWithVocLabels_V1 class DeepLabV3_ResNet101_Weights(WeightsEnum): @@ -54,8 +54,8 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): "mIoU": 67.4, "acc": 92.4, }, - default=True, ) + default = CocoWithVocLabels_V1 class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): @@ -68,8 +68,8 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): "mIoU": 60.3, "acc": 91.2, }, - default=True, ) + default = CocoWithVocLabels_V1 def deeplabv3_resnet50( diff --git a/torchvision/prototype/models/segmentation/fcn.py b/torchvision/prototype/models/segmentation/fcn.py index 841e2ea95c5..42d15a0c3cf 100644 --- a/torchvision/prototype/models/segmentation/fcn.py +++ b/torchvision/prototype/models/segmentation/fcn.py @@ -30,8 +30,8 @@ class FCN_ResNet50_Weights(WeightsEnum): "mIoU": 60.5, "acc": 91.4, }, - default=True, ) + default = CocoWithVocLabels_V1 class FCN_ResNet101_Weights(WeightsEnum): @@ -44,8 +44,8 @@ class FCN_ResNet101_Weights(WeightsEnum): "mIoU": 63.7, "acc": 91.9, }, - default=True, ) + default = CocoWithVocLabels_V1 def fcn_resnet50( diff --git a/torchvision/prototype/models/segmentation/lraspp.py b/torchvision/prototype/models/segmentation/lraspp.py index 9743e02fa16..f80e1079c87 100644 --- a/torchvision/prototype/models/segmentation/lraspp.py +++ b/torchvision/prototype/models/segmentation/lraspp.py @@ -25,8 +25,8 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): "mIoU": 57.9, "acc": 91.2, }, - default=True, ) + default = CocoWithVocLabels_V1 def lraspp_mobilenet_v3_large( diff --git a/torchvision/prototype/models/shufflenetv2.py b/torchvision/prototype/models/shufflenetv2.py index 9fa98c44223..a8857c2996e 100644 --- a/torchvision/prototype/models/shufflenetv2.py +++ b/torchvision/prototype/models/shufflenetv2.py @@ -57,8 +57,8 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): "acc@1": 69.362, "acc@5": 88.316, }, - default=True, ) + default = ImageNet1K_V1 class ShuffleNet_V2_X1_0_Weights(WeightsEnum): @@ -70,8 +70,8 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): "acc@1": 60.552, "acc@5": 81.746, }, - default=True, ) + default = ImageNet1K_V1 class ShuffleNet_V2_X1_5_Weights(WeightsEnum): diff --git a/torchvision/prototype/models/squeezenet.py b/torchvision/prototype/models/squeezenet.py index fdfaa01e8be..77c9a1629d4 100644 --- a/torchvision/prototype/models/squeezenet.py +++ b/torchvision/prototype/models/squeezenet.py @@ -30,8 +30,8 @@ class SqueezeNet1_0_Weights(WeightsEnum): "acc@1": 58.092, "acc@5": 80.420, }, - default=True, ) + default = ImageNet1K_V1 class SqueezeNet1_1_Weights(WeightsEnum): @@ -43,8 +43,8 @@ class SqueezeNet1_1_Weights(WeightsEnum): "acc@1": 58.178, "acc@5": 80.624, }, - default=True, ) + default = ImageNet1K_V1 def squeezenet1_0(weights: Optional[SqueezeNet1_0_Weights] = None, progress: bool = True, **kwargs: Any) -> SqueezeNet: diff --git a/torchvision/prototype/models/vgg.py b/torchvision/prototype/models/vgg.py index a357426693d..708608826e0 100644 --- a/torchvision/prototype/models/vgg.py +++ b/torchvision/prototype/models/vgg.py @@ -57,8 +57,8 @@ class VGG11_Weights(WeightsEnum): "acc@1": 69.020, "acc@5": 88.628, }, - default=True, ) + default = ImageNet1K_V1 class VGG11_BN_Weights(WeightsEnum): @@ -70,8 +70,8 @@ class VGG11_BN_Weights(WeightsEnum): "acc@1": 70.370, "acc@5": 89.810, }, - default=True, ) + default = ImageNet1K_V1 class VGG13_Weights(WeightsEnum): @@ -83,8 +83,8 @@ class VGG13_Weights(WeightsEnum): "acc@1": 69.928, "acc@5": 89.246, }, - default=True, ) + default = ImageNet1K_V1 class VGG13_BN_Weights(WeightsEnum): @@ -96,8 +96,8 @@ class VGG13_BN_Weights(WeightsEnum): "acc@1": 71.586, "acc@5": 90.374, }, - default=True, ) + default = ImageNet1K_V1 class VGG16_Weights(WeightsEnum): @@ -109,7 +109,6 @@ class VGG16_Weights(WeightsEnum): "acc@1": 71.592, "acc@5": 90.382, }, - default=True, ) # We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the # same input standardization method as the paper. Only the `features` weights have proper values, those on the @@ -127,8 +126,8 @@ class VGG16_Weights(WeightsEnum): "acc@1": float("nan"), "acc@5": float("nan"), }, - default=False, ) + default = ImageNet1K_V1 class VGG16_BN_Weights(WeightsEnum): @@ -140,8 +139,8 @@ class VGG16_BN_Weights(WeightsEnum): "acc@1": 73.360, "acc@5": 91.516, }, - default=True, ) + default = ImageNet1K_V1 class VGG19_Weights(WeightsEnum): @@ -153,8 +152,8 @@ class VGG19_Weights(WeightsEnum): "acc@1": 72.376, "acc@5": 90.876, }, - default=True, ) + default = ImageNet1K_V1 class VGG19_BN_Weights(WeightsEnum): @@ -166,8 +165,8 @@ class VGG19_BN_Weights(WeightsEnum): "acc@1": 74.218, "acc@5": 91.842, }, - default=True, ) + default = ImageNet1K_V1 def vgg11(weights: Optional[VGG11_Weights] = None, progress: bool = True, **kwargs: Any) -> VGG: diff --git a/torchvision/prototype/models/video/resnet.py b/torchvision/prototype/models/video/resnet.py index c75f618a8b1..48c4293f0e1 100644 --- a/torchvision/prototype/models/video/resnet.py +++ b/torchvision/prototype/models/video/resnet.py @@ -68,8 +68,8 @@ class R3D_18_Weights(WeightsEnum): "acc@1": 52.75, "acc@5": 75.45, }, - default=True, ) + default = Kinetics400_V1 class MC3_18_Weights(WeightsEnum): @@ -81,8 +81,8 @@ class MC3_18_Weights(WeightsEnum): "acc@1": 53.90, "acc@5": 76.29, }, - default=True, ) + default = Kinetics400_V1 class R2Plus1D_18_Weights(WeightsEnum): @@ -94,8 +94,8 @@ class R2Plus1D_18_Weights(WeightsEnum): "acc@1": 57.50, "acc@5": 78.81, }, - default=True, ) + default = Kinetics400_V1 def r3d_18(weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: From 39cf02a67ce303765bc021befbac92433577807b Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 30 Nov 2021 17:59:43 +0100 Subject: [PATCH 04/23] improve COCO prototype (#4650) * improve COCO prototype * test 2017 annotations * add option to include captions * fix categories and add tests * cleanup * add correct image size to bounding boxes * fix annotation collation * appease mypy * add benchmark * always use image as reference * another refactor * add support for segmentations * add support for segmentations * fix CI dependencies --- .circleci/config.yml | 2 +- .circleci/config.yml.in | 2 +- test/builtin_dataset_mocks.py | 114 ++++++++++ test/datasets_utils.py | 7 + test/test_prototype_builtin_datasets.py | 28 ++- .../datasets/_builtin/coco.categories | 91 ++++++++ .../prototype/datasets/_builtin/coco.py | 162 ++++++++++++--- torchvision/prototype/datasets/benchmark.py | 195 +++++++++++------- .../prototype/features/_bounding_box.py | 2 +- 9 files changed, 498 insertions(+), 105 deletions(-) create mode 100644 torchvision/prototype/datasets/_builtin/coco.categories diff --git a/.circleci/config.yml b/.circleci/config.yml index 90ff1ffe079..bc724dcc3df 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -351,7 +351,7 @@ jobs: - install_torchvision - install_prototype_dependencies - pip_install: - args: scipy + args: scipy pycocotools descr: Install optional dependencies - run: name: Enable prototype tests diff --git a/.circleci/config.yml.in b/.circleci/config.yml.in index cadd8efccb2..c029fa766ad 100644 --- a/.circleci/config.yml.in +++ b/.circleci/config.yml.in @@ -351,7 +351,7 @@ jobs: - install_torchvision - install_prototype_dependencies - pip_install: - args: scipy + args: scipy pycocotools descr: Install optional dependencies - run: name: Enable prototype tests diff --git a/test/builtin_dataset_mocks.py b/test/builtin_dataset_mocks.py index 8d27240c75d..f399125b0af 100644 --- a/test/builtin_dataset_mocks.py +++ b/test/builtin_dataset_mocks.py @@ -1,5 +1,6 @@ import functools import gzip +import json import lzma import pathlib import pickle @@ -8,6 +9,7 @@ from typing import Any, Dict, Tuple import numpy as np +import PIL.Image import pytest import torch from datasets_utils import create_image_folder, make_tar, make_zip @@ -18,7 +20,9 @@ from torchvision.prototype.datasets._api import find from torchvision.prototype.utils._internal import add_suggestion + make_tensor = functools.partial(_make_tensor, device="cpu") +make_scalar = functools.partial(make_tensor, ()) __all__ = ["load"] @@ -490,3 +494,113 @@ def imagenet(info, root, config): make_tar(root, f"{devkit_root}.tar.gz", devkit_root, compression="gz") return num_samples + + +class CocoMockData: + @classmethod + def _make_images_archive(cls, root, name, *, num_samples): + image_paths = create_image_folder( + root, name, file_name_fn=lambda idx: f"{idx:012d}.jpg", num_examples=num_samples + ) + + images_meta = [] + for path in image_paths: + with PIL.Image.open(path) as image: + width, height = image.size + images_meta.append(dict(file_name=path.name, id=int(path.stem), width=width, height=height)) + + make_zip(root, f"{name}.zip") + + return images_meta + + @classmethod + def _make_annotations_json( + cls, + root, + name, + *, + images_meta, + fn, + ): + num_anns_per_image = torch.randint(1, 5, (len(images_meta),)) + num_anns_total = int(num_anns_per_image.sum()) + ann_ids_iter = iter(torch.arange(num_anns_total)[torch.randperm(num_anns_total)]) + + anns_meta = [] + for image_meta, num_anns in zip(images_meta, num_anns_per_image): + for _ in range(num_anns): + ann_id = int(next(ann_ids_iter)) + anns_meta.append(dict(fn(ann_id, image_meta), id=ann_id, image_id=image_meta["id"])) + anns_meta.sort(key=lambda ann: ann["id"]) + + with open(root / name, "w") as file: + json.dump(dict(images=images_meta, annotations=anns_meta), file) + + return num_anns_per_image + + @staticmethod + def _make_instances_data(ann_id, image_meta): + def make_rle_segmentation(): + height, width = image_meta["height"], image_meta["width"] + numel = height * width + counts = [] + while sum(counts) <= numel: + counts.append(int(torch.randint(5, 8, ()))) + if sum(counts) > numel: + counts[-1] -= sum(counts) - numel + return dict(counts=counts, size=[height, width]) + + return dict( + segmentation=make_rle_segmentation(), + bbox=make_tensor((4,), dtype=torch.float32, low=0).tolist(), + iscrowd=True, + area=float(make_scalar(dtype=torch.float32)), + category_id=int(make_scalar(dtype=torch.int64)), + ) + + @staticmethod + def _make_captions_data(ann_id, image_meta): + return dict(caption=f"Caption {ann_id} describing image {image_meta['id']}.") + + @classmethod + def _make_annotations(cls, root, name, *, images_meta): + num_anns_per_image = torch.zeros((len(images_meta),), dtype=torch.int64) + for annotations, fn in ( + ("instances", cls._make_instances_data), + ("captions", cls._make_captions_data), + ): + num_anns_per_image += cls._make_annotations_json( + root, f"{annotations}_{name}.json", images_meta=images_meta, fn=fn + ) + + return int(num_anns_per_image.sum()) + + @classmethod + def generate( + cls, + root, + *, + year, + num_samples, + ): + annotations_dir = root / "annotations" + annotations_dir.mkdir() + + for split in ("train", "val"): + config_name = f"{split}{year}" + + images_meta = cls._make_images_archive(root, config_name, num_samples=num_samples) + cls._make_annotations( + annotations_dir, + config_name, + images_meta=images_meta, + ) + + make_zip(root, f"annotations_trainval{year}.zip", annotations_dir) + + return num_samples + + +@dataset_mocks.register_mock_data_fn +def coco(info, root, config): + return CocoMockData.generate(root, year=config.year, num_samples=5) diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 4e06fdfffbe..4012b29e7c5 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -866,6 +866,13 @@ def _split_files_or_dirs(root, *files_or_dirs): def _make_archive(root, name, *files_or_dirs, opener, adder, remove=True): archive = pathlib.Path(root) / name + if not files_or_dirs: + dir = archive.parent / archive.name.replace("".join(archive.suffixes), "") + if dir.exists() and dir.is_dir(): + files_or_dirs = (dir,) + else: + raise ValueError("No file or dir provided.") + files, dirs = _split_files_or_dirs(root, *files_or_dirs) with opener(archive) as fh: diff --git a/test/test_prototype_builtin_datasets.py b/test/test_prototype_builtin_datasets.py index 4c2a05e2f0a..9f12324fe34 100644 --- a/test/test_prototype_builtin_datasets.py +++ b/test/test_prototype_builtin_datasets.py @@ -13,6 +13,17 @@ def to_bytes(file): return file.read() +def config_id(name, config): + parts = [name] + for name, value in config.items(): + if isinstance(value, bool): + part = ("" if value else "no_") + name + else: + part = str(value) + parts.append(part) + return "-".join(parts) + + def dataset_parametrization(*names, decoder=to_bytes): if not names: # TODO: Replace this with torchvision.prototype.datasets.list() as soon as all builtin datasets are supported @@ -27,16 +38,17 @@ def dataset_parametrization(*names, decoder=to_bytes): "caltech256", "caltech101", "imagenet", + "coco", ) - params = [] - for name in names: - for config in datasets.info(name)._configs: - id = f"{name}-{'-'.join([str(value) for value in config.values()])}" - dataset, mock_info = builtin_dataset_mocks.load(name, decoder=decoder, **config) - params.append(pytest.param(dataset, mock_info, id=id)) - - return pytest.mark.parametrize(("dataset", "mock_info"), params) + return pytest.mark.parametrize( + ("dataset", "mock_info"), + [ + pytest.param(*builtin_dataset_mocks.load(name, decoder=decoder, **config), id=config_id(name, config)) + for name in names + for config in datasets.info(name)._configs + ], + ) class TestCommon: diff --git a/torchvision/prototype/datasets/_builtin/coco.categories b/torchvision/prototype/datasets/_builtin/coco.categories new file mode 100644 index 00000000000..abe02d907e9 --- /dev/null +++ b/torchvision/prototype/datasets/_builtin/coco.categories @@ -0,0 +1,91 @@ +__background__,N/A +person,person +bicycle,vehicle +car,vehicle +motorcycle,vehicle +airplane,vehicle +bus,vehicle +train,vehicle +truck,vehicle +boat,vehicle +traffic light,outdoor +fire hydrant,outdoor +N/A,N/A +stop sign,outdoor +parking meter,outdoor +bench,outdoor +bird,animal +cat,animal +dog,animal +horse,animal +sheep,animal +cow,animal +elephant,animal +bear,animal +zebra,animal +giraffe,animal +N/A,N/A +backpack,accessory +umbrella,accessory +N/A,N/A +N/A,N/A +handbag,accessory +tie,accessory +suitcase,accessory +frisbee,sports +skis,sports +snowboard,sports +sports ball,sports +kite,sports +baseball bat,sports +baseball glove,sports +skateboard,sports +surfboard,sports +tennis racket,sports +bottle,kitchen +N/A,N/A +wine glass,kitchen +cup,kitchen +fork,kitchen +knife,kitchen +spoon,kitchen +bowl,kitchen +banana,food +apple,food +sandwich,food +orange,food +broccoli,food +carrot,food +hot dog,food +pizza,food +donut,food +cake,food +chair,furniture +couch,furniture +potted plant,furniture +bed,furniture +N/A,N/A +dining table,furniture +N/A,N/A +N/A,N/A +toilet,furniture +N/A,N/A +tv,electronic +laptop,electronic +mouse,electronic +remote,electronic +keyboard,electronic +cell phone,electronic +microwave,appliance +oven,appliance +toaster,appliance +sink,appliance +refrigerator,appliance +N/A,N/A +book,indoor +clock,indoor +vase,indoor +scissors,indoor +teddy bear,indoor +hair drier,indoor +toothbrush,indoor diff --git a/torchvision/prototype/datasets/_builtin/coco.py b/torchvision/prototype/datasets/_builtin/coco.py index 641d584dc43..194cfa8df7d 100644 --- a/torchvision/prototype/datasets/_builtin/coco.py +++ b/torchvision/prototype/datasets/_builtin/coco.py @@ -1,6 +1,8 @@ import io import pathlib -from typing import Any, Callable, Dict, List, Optional, Tuple +import re +from collections import OrderedDict +from typing import Any, Callable, Dict, List, Optional, Tuple, cast import torch from torchdata.datapipes.iter import ( @@ -26,24 +28,44 @@ from torchvision.prototype.datasets.utils._internal import ( MappingIterator, INFINITE_BUFFER_SIZE, + BUILTIN_DIR, getitem, path_accessor, - path_comparator, ) +from torchvision.prototype.features import BoundingBox, Label +from torchvision.prototype.features._feature import DEFAULT +from torchvision.prototype.utils._internal import FrozenMapping -HERE = pathlib.Path(__file__).parent + +class CocoLabel(Label): + super_category: Optional[str] + + @classmethod + def _parse_meta_data( + cls, + category: Optional[str] = DEFAULT, # type: ignore[assignment] + super_category: Optional[str] = DEFAULT, # type: ignore[assignment] + ) -> Dict[str, Tuple[Any, Any]]: + return dict(category=(category, None), super_category=(super_category, None)) class Coco(Dataset): def _make_info(self) -> DatasetInfo: + name = "coco" + categories, super_categories = zip(*DatasetInfo.read_categories_file(BUILTIN_DIR / f"{name}.categories")) + return DatasetInfo( - "coco", + name, type=DatasetType.IMAGE, + dependencies=("pycocotools",), + categories=categories, homepage="https://cocodataset.org/", valid_options=dict( split=("train", "val"), year=("2017", "2014"), + annotations=(*self._ANN_DECODERS.keys(), None), ), + extra=dict(category_to_super_category=FrozenMapping(zip(categories, super_categories))), ) _IMAGE_URL_BASE = "http://images.cocodataset.org/zips" @@ -73,6 +95,62 @@ def resources(self, config: DatasetConfig) -> List[OnlineResource]: ) return [images, meta] + def _segmentation_to_mask(self, segmentation: Any, *, is_crowd: bool, image_size: Tuple[int, int]) -> torch.Tensor: + from pycocotools import mask + + if is_crowd: + segmentation = mask.frPyObjects(segmentation, *image_size) + else: + segmentation = mask.merge(mask.frPyObjects(segmentation, *image_size)) + + return torch.from_numpy(mask.decode(segmentation)).to(torch.bool) + + def _decode_instances_anns(self, anns: List[Dict[str, Any]], image_meta: Dict[str, Any]) -> Dict[str, Any]: + image_size = (image_meta["height"], image_meta["width"]) + labels = [ann["category_id"] for ann in anns] + categories = [self.info.categories[label] for label in labels] + return dict( + # TODO: create a segmentation feature + segmentations=torch.stack( + [ + self._segmentation_to_mask(ann["segmentation"], is_crowd=ann["iscrowd"], image_size=image_size) + for ann in anns + ] + ), + areas=torch.tensor([ann["area"] for ann in anns]), + crowds=torch.tensor([ann["iscrowd"] for ann in anns], dtype=torch.bool), + bounding_boxes=BoundingBox( + [ann["bbox"] for ann in anns], + format="xywh", + image_size=image_size, + ), + labels=[ + CocoLabel( + label, + category=category, + super_category=self.info.extra.category_to_super_category[category], + ) + for label, category in zip(labels, categories) + ], + ann_ids=[ann["id"] for ann in anns], + ) + + def _decode_captions_ann(self, anns: List[Dict[str, Any]], image_meta: Dict[str, Any]) -> Dict[str, Any]: + return dict( + captions=[ann["caption"] for ann in anns], + ann_ids=[ann["id"] for ann in anns], + ) + + _ANN_DECODERS = OrderedDict([("instances", _decode_instances_anns), ("captions", _decode_captions_ann)]) + + _META_FILE_PATTERN = re.compile( + fr"(?P({'|'.join(_ANN_DECODERS.keys())}))_(?P[a-zA-Z]+)(?P\d+)[.]json" + ) + + def _filter_meta_files(self, data: Tuple[str, Any], *, split: str, year: str, annotations: str) -> bool: + match = self._META_FILE_PATTERN.match(pathlib.Path(data[0]).name) + return bool(match and match["split"] == split and match["year"] == year and match["annotations"] == annotations) + def _classify_meta(self, data: Tuple[str, Any]) -> Optional[int]: key, _ = data if key == "images": @@ -82,28 +160,27 @@ def _classify_meta(self, data: Tuple[str, Any]) -> Optional[int]: else: return None - def _decode_ann(self, ann: Dict[str, Any]) -> Dict[str, Any]: - area = torch.tensor(ann["area"]) - iscrowd = bool(ann["iscrowd"]) - bbox = torch.tensor(ann["bbox"]) - id = ann["id"] - return dict(area=area, iscrowd=iscrowd, bbox=bbox, id=id) + def _collate_and_decode_image( + self, data: Tuple[str, io.IOBase], *, decoder: Optional[Callable[[io.IOBase], torch.Tensor]] + ) -> Dict[str, Any]: + path, buffer = data + return dict(path=path, image=decoder(buffer) if decoder else buffer) def _collate_and_decode_sample( self, data: Tuple[Tuple[List[Dict[str, Any]], Dict[str, Any]], Tuple[str, io.IOBase]], *, + annotations: Optional[str], decoder: Optional[Callable[[io.IOBase], torch.Tensor]], ) -> Dict[str, Any]: ann_data, image_data = data anns, image_meta = ann_data - path, buffer = image_data - - anns = [self._decode_ann(ann) for ann in anns] - image = decoder(buffer) if decoder else buffer + sample = self._collate_and_decode_image(image_data, decoder=decoder) + if annotations: + sample.update(self._ANN_DECODERS[annotations](self, anns, image_meta)) - return dict(anns=anns, id=image_meta["id"], path=path, image=image) + return sample def _make_datapipe( self, @@ -114,8 +191,18 @@ def _make_datapipe( ) -> IterDataPipe[Dict[str, Any]]: images_dp, meta_dp = resource_dps + images_dp = ZipArchiveReader(images_dp) + + if config.annotations is None: + dp = Shuffler(images_dp) + return Mapper(dp, self._collate_and_decode_image, fn_kwargs=dict(decoder=decoder)) + meta_dp = ZipArchiveReader(meta_dp) - meta_dp = Filter(meta_dp, path_comparator("name", f"instances_{config.split}{config.year}.json")) + meta_dp = Filter( + meta_dp, + self._filter_meta_files, + fn_kwargs=dict(split=config.split, year=config.year, annotations=config.annotations), + ) meta_dp = JsonParser(meta_dp) meta_dp = Mapper(meta_dp, getitem(1)) meta_dp = MappingIterator(meta_dp) @@ -129,24 +216,20 @@ def _make_datapipe( images_meta_dp = Mapper(images_meta_dp, getitem(1)) images_meta_dp = UnBatcher(images_meta_dp) + images_meta_dp = Shuffler(images_meta_dp) anns_meta_dp = Mapper(anns_meta_dp, getitem(1)) anns_meta_dp = UnBatcher(anns_meta_dp) + anns_meta_dp = Grouper(anns_meta_dp, group_key_fn=getitem("image_id"), buffer_size=INFINITE_BUFFER_SIZE) - anns_dp = Grouper(anns_meta_dp, group_key_fn=getitem("image_id"), buffer_size=INFINITE_BUFFER_SIZE) - # drop images without annotations - anns_dp = Filter(anns_dp, bool) - anns_dp = Shuffler(anns_dp, buffer_size=INFINITE_BUFFER_SIZE) anns_dp = IterKeyZipper( - anns_dp, + anns_meta_dp, images_meta_dp, key_fn=getitem(0, "image_id"), ref_key_fn=getitem("id"), buffer_size=INFINITE_BUFFER_SIZE, ) - images_dp = ZipArchiveReader(images_dp) - dp = IterKeyZipper( anns_dp, images_dp, @@ -154,4 +237,35 @@ def _make_datapipe( ref_key_fn=path_accessor("name"), buffer_size=INFINITE_BUFFER_SIZE, ) - return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder)) + return Mapper( + dp, self._collate_and_decode_sample, fn_kwargs=dict(annotations=config.annotations, decoder=decoder) + ) + + def _generate_categories(self, root: pathlib.Path) -> Tuple[Tuple[str, str]]: + config = self.default_config + resources = self.resources(config) + + dp = resources[1].to_datapipe(pathlib.Path(root) / self.name) + dp = ZipArchiveReader(dp) + dp = Filter( + dp, self._filter_meta_files, fn_kwargs=dict(split=config.split, year=config.year, annotations="instances") + ) + dp = JsonParser(dp) + + _, meta = next(iter(dp)) + # List[Tuple[super_category, id, category]] + label_data = [cast(Tuple[str, int, str], tuple(info.values())) for info in meta["categories"]] + + # COCO actually defines 91 categories, but only 80 of them have instances. Still, the category_id refers to the + # full set. To keep the labels dense, we fill the gaps with N/A. Note that there are only 10 gaps, so the total + # number of categories is 90 rather than 91. + _, ids, _ = zip(*label_data) + missing_ids = set(range(1, max(ids) + 1)) - set(ids) + label_data.extend([("N/A", id, "N/A") for id in missing_ids]) + + # We also add a background category to be used during segmentation. + label_data.append(("N/A", 0, "__background__")) + + super_categories, _, categories = zip(*sorted(label_data, key=lambda info: info[1])) + + return cast(Tuple[Tuple[str, str]], tuple(zip(categories, super_categories))) diff --git a/torchvision/prototype/datasets/benchmark.py b/torchvision/prototype/datasets/benchmark.py index a555c021368..104ef95c9ae 100644 --- a/torchvision/prototype/datasets/benchmark.py +++ b/torchvision/prototype/datasets/benchmark.py @@ -3,7 +3,6 @@ import argparse import collections.abc import contextlib -import copy import inspect import itertools import os @@ -20,6 +19,7 @@ from torch.utils.data import DataLoader from torch.utils.data.dataloader_experimental import DataLoader2 from torchvision import datasets as legacy_datasets +from torchvision.datasets.utils import extract_archive from torchvision.prototype import datasets as new_datasets from torchvision.transforms import PILToTensor @@ -27,6 +27,7 @@ def main( name, *, + variant=None, legacy=True, new=True, start=True, @@ -36,46 +37,57 @@ def main( temp_root=None, num_workers=0, ): - for benchmark in DATASET_BENCHMARKS: - if benchmark.name == name: - break - else: - raise ValueError(f"No DatasetBenchmark available for dataset '{name}'") - - if legacy and start: - print( - "legacy", - "cold_start", - Measurement.time(benchmark.legacy_cold_start(temp_root, num_workers=num_workers), number=num_starts), - ) - print( - "legacy", - "warm_start", - Measurement.time(benchmark.legacy_warm_start(temp_root, num_workers=num_workers), number=num_starts), - ) + benchmarks = [ + benchmark + for benchmark in DATASET_BENCHMARKS + if benchmark.name == name and (variant is None or benchmark.variant == variant) + ] + if not benchmarks: + msg = f"No DatasetBenchmark available for dataset '{name}'" + if variant is not None: + msg += f" and variant '{variant}'" + raise ValueError(msg) + + for benchmark in benchmarks: + print("#" * 80) + print(f"{benchmark.name}" + (f" ({benchmark.variant})" if benchmark.variant is not None else "")) + + if legacy and start: + print( + "legacy", + "cold_start", + Measurement.time(benchmark.legacy_cold_start(temp_root, num_workers=num_workers), number=num_starts), + ) + print( + "legacy", + "warm_start", + Measurement.time(benchmark.legacy_warm_start(temp_root, num_workers=num_workers), number=num_starts), + ) - if legacy and iteration: - print( - "legacy", - "iteration", - Measurement.iterations_per_time( - benchmark.legacy_iteration(temp_root, num_workers=num_workers, num_samples=num_samples) - ), - ) + if legacy and iteration: + print( + "legacy", + "iteration", + Measurement.iterations_per_time( + benchmark.legacy_iteration(temp_root, num_workers=num_workers, num_samples=num_samples) + ), + ) - if new and start: - print( - "new", - "cold_start", - Measurement.time(benchmark.new_cold_start(num_workers=num_workers), number=num_starts), - ) + if new and start: + print( + "new", + "cold_start", + Measurement.time(benchmark.new_cold_start(num_workers=num_workers), number=num_starts), + ) - if new and iteration: - print( - "new", - "iteration", - Measurement.iterations_per_time(benchmark.new_iteration(num_workers=num_workers, num_samples=num_samples)), - ) + if new and iteration: + print( + "new", + "iteration", + Measurement.iterations_per_time( + benchmark.new_iteration(num_workers=num_workers, num_samples=num_samples) + ), + ) class DatasetBenchmark: @@ -83,6 +95,7 @@ def __init__( self, name: str, *, + variant=None, legacy_cls=None, new_config=None, legacy_config_map=None, @@ -90,6 +103,7 @@ def __init__( prepare_legacy_root=None, ): self.name = name + self.variant = variant self.new_raw_dataset = new_datasets._api.find(name) self.legacy_cls = legacy_cls or self._find_legacy_cls() @@ -97,14 +111,11 @@ def __init__( if new_config is None: new_config = self.new_raw_dataset.default_config elif isinstance(new_config, dict): - new_config = new_datasets.utils.DatasetConfig(new_config) + new_config = self.new_raw_dataset.info.make_config(**new_config) self.new_config = new_config - self.legacy_config = (legacy_config_map or dict)(copy.copy(new_config)) - - self.legacy_special_options = (legacy_special_options_map or self._legacy_special_options_map)( - copy.copy(new_config) - ) + self.legacy_config_map = legacy_config_map + self.legacy_special_options_map = legacy_special_options_map or self._legacy_special_options_map self.prepare_legacy_root = prepare_legacy_root def new_dataset(self, *, num_workers=0): @@ -142,12 +153,15 @@ def context_manager(): return context_manager() def legacy_dataset(self, root, *, num_workers=0, download=None): - special_options = self.legacy_special_options.copy() + legacy_config = self.legacy_config_map(self, root) if self.legacy_config_map else dict() + + special_options = self.legacy_special_options_map(self) if "download" in special_options and download is not None: special_options["download"] = download + with self.suppress_output(): return DataLoader( - self.legacy_cls(str(root), **self.legacy_config, **special_options), + self.legacy_cls(legacy_config.pop("root", str(root)), **legacy_config, **special_options), shuffle=True, num_workers=num_workers, ) @@ -260,16 +274,17 @@ def _find_legacy_cls(self): "download", } - def _legacy_special_options_map(self, config): + @staticmethod + def _legacy_special_options_map(benchmark): available_parameters = set() - for cls in self.legacy_cls.__mro__: + for cls in benchmark.legacy_cls.__mro__: if cls is legacy_datasets.VisionDataset: break available_parameters.update(inspect.signature(cls.__init__).parameters) - available_special_kwargs = self._SPECIAL_KWARGS.intersection(available_parameters) + available_special_kwargs = benchmark._SPECIAL_KWARGS.intersection(available_parameters) special_options = dict() @@ -345,15 +360,15 @@ def _compute_mean_and_std(cls, t): return mean, std -def no_split(config): - legacy_config = dict(config) +def no_split(benchmark, root): + legacy_config = dict(benchmark.new_config) del legacy_config["split"] return legacy_config def bool_split(name="train"): - def legacy_config_map(config): - legacy_config = dict(config) + def legacy_config_map(benchmark, root): + legacy_config = dict(benchmark.new_config) legacy_config[name] = legacy_config.pop("split") == "train" return legacy_config @@ -400,8 +415,8 @@ def __call__(self, *inputs): return tuple(transform(input) for transform, input in zip(self.transforms, inputs)) -def caltech101_legacy_config_map(config): - legacy_config = no_split(config) +def caltech101_legacy_config_map(benchmark, root): + legacy_config = no_split(benchmark, root) # The new dataset always returns the category and annotation legacy_config["target_type"] = ("category", "annotation") return legacy_config @@ -410,8 +425,8 @@ def caltech101_legacy_config_map(config): mnist_base_folder = base_folder(lambda benchmark: pathlib.Path(benchmark.legacy_cls.__name__) / "raw") -def mnist_legacy_config_map(config): - return dict(train=config.split == "train") +def mnist_legacy_config_map(benchmark, root): + return dict(train=benchmark.new_config.split == "train") def emnist_prepare_legacy_root(benchmark, root): @@ -420,20 +435,36 @@ def emnist_prepare_legacy_root(benchmark, root): return folder -def emnist_legacy_config_map(config): - legacy_config = mnist_legacy_config_map(config) - legacy_config["split"] = config.image_set.replace("_", "").lower() +def emnist_legacy_config_map(benchmark, root): + legacy_config = mnist_legacy_config_map(benchmark, root) + legacy_config["split"] = benchmark.new_config.image_set.replace("_", "").lower() return legacy_config -def qmnist_legacy_config_map(config): - legacy_config = mnist_legacy_config_map(config) - legacy_config["what"] = config.split +def qmnist_legacy_config_map(benchmark, root): + legacy_config = mnist_legacy_config_map(benchmark, root) + legacy_config["what"] = benchmark.new_config.split # The new dataset always returns the full label legacy_config["compat"] = False return legacy_config +def coco_legacy_config_map(benchmark, root): + images, _ = benchmark.new_raw_dataset.resources(benchmark.new_config) + return dict( + root=str(root / pathlib.Path(images.file_name).stem), + annFile=str( + root / "annotations" / f"{benchmark.variant}_{benchmark.new_config.split}{benchmark.new_config.year}.json" + ), + ) + + +def coco_prepare_legacy_root(benchmark, root): + images, annotations = benchmark.new_raw_dataset.resources(benchmark.new_config) + extract_archive(str(root / images.file_name)) + extract_archive(str(root / annotations.file_name)) + + DATASET_BENCHMARKS = [ DatasetBenchmark( "caltech101", @@ -453,8 +484,8 @@ def qmnist_legacy_config_map(config): DatasetBenchmark( "celeba", prepare_legacy_root=base_folder(), - legacy_config_map=lambda config: dict( - split="valid" if config.split == "val" else config.split, + legacy_config_map=lambda benchmark: dict( + split="valid" if benchmark.new_config.split == "val" else benchmark.new_config.split, # The new dataset always returns all annotations target_type=("attr", "identity", "bbox", "landmarks"), ), @@ -495,17 +526,37 @@ def qmnist_legacy_config_map(config): DatasetBenchmark( "sbd", legacy_cls=legacy_datasets.SBDataset, - legacy_config_map=lambda config: dict( - image_set=config.split, - mode="boundaries" if config.boundaries else "segmentation", + legacy_config_map=lambda benchmark: dict( + image_set=benchmark.new_config.split, + mode="boundaries" if benchmark.new_config.boundaries else "segmentation", ), - legacy_special_options_map=lambda config: dict( + legacy_special_options_map=lambda benchmark: dict( download=True, - transforms=JointTransform(PILToTensor(), torch.tensor if config.boundaries else PILToTensor()), + transforms=JointTransform( + PILToTensor(), torch.tensor if benchmark.new_config.boundaries else PILToTensor() + ), ), ), DatasetBenchmark("voc", legacy_cls=legacy_datasets.VOCDetection), DatasetBenchmark("imagenet", legacy_cls=legacy_datasets.ImageNet), + DatasetBenchmark( + "coco", + variant="instances", + legacy_cls=legacy_datasets.CocoDetection, + new_config=dict(split="train", annotations="instances"), + legacy_config_map=coco_legacy_config_map, + prepare_legacy_root=coco_prepare_legacy_root, + legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None), + ), + DatasetBenchmark( + "coco", + variant="captions", + legacy_cls=legacy_datasets.CocoCaptions, + new_config=dict(split="train", annotations="captions"), + legacy_config_map=coco_legacy_config_map, + prepare_legacy_root=coco_prepare_legacy_root, + legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None), + ), ] @@ -517,6 +568,9 @@ def parse_args(argv=None): ) parser.add_argument("name", help="Name of the dataset to benchmark.") + parser.add_argument( + "--variant", help="Variant of the dataset. If omitted all available variants will be benchmarked." + ) parser.add_argument( "-n", @@ -591,6 +645,7 @@ def parse_args(argv=None): try: main( args.name, + variant=args.variant, legacy=args.legacy, new=args.new, start=args.start, diff --git a/torchvision/prototype/features/_bounding_box.py b/torchvision/prototype/features/_bounding_box.py index e225bf3df83..64ba449ae76 100644 --- a/torchvision/prototype/features/_bounding_box.py +++ b/torchvision/prototype/features/_bounding_box.py @@ -118,7 +118,7 @@ def guess_image_size(cls, data: torch.Tensor, *, format: BoundingBoxFormat) -> T if data.dtype.is_floating_point: w = w.ceil() h = h.ceil() - return int(h), int(w) + return int(h.max()), int(w.max()) @classmethod def from_parts( From a7a3675678cce66751050619d0c86c2ceafd7295 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Tue, 30 Nov 2021 20:42:29 +0000 Subject: [PATCH 05/23] Feature extraction default arguments - ops (#4810) making torchvision ops leaf nodes by default --- test/test_ops.py | 125 +++++++++++- torchvision/models/feature_extraction.py | 38 +++- torchvision/ops/poolers.py | 234 ++++++++++++++--------- 3 files changed, 303 insertions(+), 94 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index c8e4e396c7e..d687e2e2952 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -7,12 +7,54 @@ import numpy as np import pytest import torch +import torch.fx from common_utils import needs_cuda, cpu_and_gpu, assert_equal from PIL import Image from torch import nn, Tensor from torch.autograd import gradcheck from torch.nn.modules.utils import _pair from torchvision import models, ops +from torchvision.models.feature_extraction import get_graph_node_names + + +class RoIOpTesterModuleWrapper(nn.Module): + def __init__(self, obj): + super().__init__() + self.layer = obj + self.n_inputs = 2 + + def forward(self, a, b): + self.layer(a, b) + + +class MultiScaleRoIAlignModuleWrapper(nn.Module): + def __init__(self, obj): + super().__init__() + self.layer = obj + self.n_inputs = 3 + + def forward(self, a, b, c): + self.layer(a, b, c) + + +class DeformConvModuleWrapper(nn.Module): + def __init__(self, obj): + super().__init__() + self.layer = obj + self.n_inputs = 3 + + def forward(self, a, b, c): + self.layer(a, b, c) + + +class StochasticDepthWrapper(nn.Module): + def __init__(self, obj): + super().__init__() + self.layer = obj + self.n_inputs = 1 + + def forward(self, a): + self.layer(a) class RoIOpTester(ABC): @@ -46,6 +88,15 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwar tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5 torch.testing.assert_close(gt_y.to(y), y, rtol=tol, atol=tol) + @pytest.mark.parametrize("device", cpu_and_gpu()) + def test_is_leaf_node(self, device): + op_obj = self.make_obj(wrap=True).to(device=device) + graph_node_names = get_graph_node_names(op_obj) + + assert len(graph_node_names) == 2 + assert len(graph_node_names[0]) == len(graph_node_names[1]) + assert len(graph_node_names[0]) == 1 + op_obj.n_inputs + @pytest.mark.parametrize("seed", range(10)) @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("contiguous", (True, False)) @@ -91,6 +142,10 @@ def _helper_boxes_shape(self, func): def fn(*args, **kwargs): pass + @abstractmethod + def make_obj(*args, **kwargs): + pass + @abstractmethod def get_script_fn(*args, **kwargs): pass @@ -104,6 +159,10 @@ class TestRoiPool(RoIOpTester): def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): return ops.RoIPool((pool_h, pool_w), spatial_scale)(x, rois) + def make_obj(self, pool_h=5, pool_w=5, spatial_scale=1, wrap=False): + obj = ops.RoIPool((pool_h, pool_w), spatial_scale) + return RoIOpTesterModuleWrapper(obj) if wrap else obj + def get_script_fn(self, rois, pool_size): scriped = torch.jit.script(ops.roi_pool) return lambda x: scriped(x, rois, pool_size) @@ -144,6 +203,10 @@ class TestPSRoIPool(RoIOpTester): def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): return ops.PSRoIPool((pool_h, pool_w), 1)(x, rois) + def make_obj(self, pool_h=5, pool_w=5, spatial_scale=1, wrap=False): + obj = ops.PSRoIPool((pool_h, pool_w), spatial_scale) + return RoIOpTesterModuleWrapper(obj) if wrap else obj + def get_script_fn(self, rois, pool_size): scriped = torch.jit.script(ops.ps_roi_pool) return lambda x: scriped(x, rois, pool_size) @@ -223,6 +286,12 @@ def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligne (pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio, aligned=aligned )(x, rois) + def make_obj(self, pool_h=5, pool_w=5, spatial_scale=1, sampling_ratio=-1, aligned=False, wrap=False): + obj = ops.RoIAlign( + (pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio, aligned=aligned + ) + return RoIOpTesterModuleWrapper(obj) if wrap else obj + def get_script_fn(self, rois, pool_size): scriped = torch.jit.script(ops.roi_align) return lambda x: scriped(x, rois, pool_size) @@ -374,6 +443,10 @@ class TestPSRoIAlign(RoIOpTester): def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): return ops.PSRoIAlign((pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)(x, rois) + def make_obj(self, pool_h=5, pool_w=5, spatial_scale=1, sampling_ratio=-1, wrap=False): + obj = ops.PSRoIAlign((pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) + return RoIOpTesterModuleWrapper(obj) if wrap else obj + def get_script_fn(self, rois, pool_size): scriped = torch.jit.script(ops.ps_roi_align) return lambda x: scriped(x, rois, pool_size) @@ -422,12 +495,18 @@ def test_boxes_shape(self): class TestMultiScaleRoIAlign: + def make_obj(self, fmap_names=None, output_size=(7, 7), sampling_ratio=2, wrap=False): + if fmap_names is None: + fmap_names = ["0"] + obj = ops.poolers.MultiScaleRoIAlign(fmap_names, output_size, sampling_ratio) + return MultiScaleRoIAlignModuleWrapper(obj) if wrap else obj + def test_msroialign_repr(self): fmap_names = ["0"] output_size = (7, 7) sampling_ratio = 2 # Pass mock feature map names - t = ops.poolers.MultiScaleRoIAlign(fmap_names, output_size, sampling_ratio) + t = self.make_obj(fmap_names, output_size, sampling_ratio, wrap=False) # Check integrity of object __repr__ attribute expected_string = ( @@ -436,6 +515,15 @@ def test_msroialign_repr(self): ) assert repr(t) == expected_string + @pytest.mark.parametrize("device", cpu_and_gpu()) + def test_is_leaf_node(self, device): + op_obj = self.make_obj(wrap=True).to(device=device) + graph_node_names = get_graph_node_names(op_obj) + + assert len(graph_node_names) == 2 + assert len(graph_node_names[0]) == len(graph_node_names[1]) + assert len(graph_node_names[0]) == 1 + op_obj.n_inputs + class TestNMS: def _reference_nms(self, boxes, scores, iou_threshold): @@ -693,6 +781,21 @@ def get_fn_args(self, device, contiguous, batch_sz, dtype): return x, weight, offset, mask, bias, stride, pad, dilation + def make_obj(self, in_channels=6, out_channels=2, kernel_size=(3, 2), groups=2, wrap=False): + obj = ops.DeformConv2d( + in_channels, out_channels, kernel_size, stride=(2, 1), padding=(1, 0), dilation=(2, 1), groups=groups + ) + return DeformConvModuleWrapper(obj) if wrap else obj + + @pytest.mark.parametrize("device", cpu_and_gpu()) + def test_is_leaf_node(self, device): + op_obj = self.make_obj(wrap=True).to(device=device) + graph_node_names = get_graph_node_names(op_obj) + + assert len(graph_node_names) == 2 + assert len(graph_node_names[0]) == len(graph_node_names[1]) + assert len(graph_node_names[0]) == 1 + op_obj.n_inputs + @pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("contiguous", (True, False)) @pytest.mark.parametrize("batch_sz", (0, 33)) @@ -705,9 +808,9 @@ def test_forward(self, device, contiguous, batch_sz, dtype=None): groups = 2 tol = 2e-3 if dtype is torch.half else 1e-5 - layer = ops.DeformConv2d( - in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups - ).to(device=x.device, dtype=dtype) + layer = self.make_obj(in_channels, out_channels, kernel_size, groups, wrap=False).to( + device=x.device, dtype=dtype + ) res = layer(x, offset, mask) weight = layer.weight.data @@ -1200,6 +1303,20 @@ def test_stochastic_depth(self, seed, mode, p): elif p == 1: assert out.equal(torch.zeros_like(x)) + def make_obj(self, p, mode, wrap=False): + obj = ops.StochasticDepth(p, mode) + return StochasticDepthWrapper(obj) if wrap else obj + + @pytest.mark.parametrize("p", (0, 1)) + @pytest.mark.parametrize("mode", ["batch", "row"]) + def test_is_leaf_node(self, p, mode): + op_obj = self.make_obj(p, mode, wrap=True) + graph_node_names = get_graph_node_names(op_obj) + + assert len(graph_node_names) == 2 + assert len(graph_node_names[0]) == len(graph_node_names[1]) + assert len(graph_node_names[0]) == 1 + op_obj.n_inputs + class TestUtils: @pytest.mark.parametrize("norm_layer", [None, nn.BatchNorm2d, nn.LayerNorm]) diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index 0095f21f62b..0a2b597da23 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -1,11 +1,14 @@ +import inspect +import math import re import warnings from collections import OrderedDict from copy import deepcopy from itertools import chain -from typing import Dict, Callable, List, Union, Optional, Tuple +from typing import Dict, Callable, List, Union, Optional, Tuple, Any import torch +import torchvision from torch import fx from torch import nn from torch.fx.graph_module import _copy_attr @@ -172,8 +175,19 @@ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathT warnings.warn(msg + suggestion_msg) +def _get_leaf_modules_for_ops() -> List[type]: + members = inspect.getmembers(torchvision.ops) + result = [] + for _, obj in members: + if inspect.isclass(obj) and issubclass(obj, torch.nn.Module): + result.append(obj) + return result + + def get_graph_node_names( - model: nn.Module, tracer_kwargs: Dict = {}, suppress_diff_warning: bool = False + model: nn.Module, + tracer_kwargs: Optional[Dict[str, Any]] = None, + suppress_diff_warning: bool = False, ) -> Tuple[List[str], List[str]]: """ Dev utility to return node names in order of execution. See note on node @@ -198,6 +212,7 @@ def get_graph_node_names( tracer_kwargs (dict, optional): a dictionary of keywork arguments for ``NodePathTracer`` (they are eventually passed onto `torch.fx.Tracer `_). + By default it will be set to wrap and make leaf nodes all torchvision ops. suppress_diff_warning (bool, optional): whether to suppress a warning when there are discrepancies between the train and eval version of the graph. Defaults to False. @@ -211,6 +226,14 @@ def get_graph_node_names( >>> model = torchvision.models.resnet18() >>> train_nodes, eval_nodes = get_graph_node_names(model) """ + if tracer_kwargs is None: + tracer_kwargs = { + "autowrap_modules": ( + math, + torchvision.ops, + ), + "leaf_modules": _get_leaf_modules_for_ops(), + } is_training = model.training train_tracer = NodePathTracer(**tracer_kwargs) train_tracer.trace(model.train()) @@ -294,7 +317,7 @@ def create_feature_extractor( return_nodes: Optional[Union[List[str], Dict[str, str]]] = None, train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None, eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None, - tracer_kwargs: Dict = {}, + tracer_kwargs: Optional[Dict[str, Any]] = None, suppress_diff_warning: bool = False, ) -> fx.GraphModule: """ @@ -353,6 +376,7 @@ def create_feature_extractor( tracer_kwargs (dict, optional): a dictionary of keywork arguments for ``NodePathTracer`` (which passes them onto it's parent class `torch.fx.Tracer `_). + By default it will be set to wrap and make leaf nodes all torchvision ops. suppress_diff_warning (bool, optional): whether to suppress a warning when there are discrepancies between the train and eval version of the graph. Defaults to False. @@ -397,6 +421,14 @@ def create_feature_extractor( >>> 'autowrap_functions': [leaf_function]}) """ + if tracer_kwargs is None: + tracer_kwargs = { + "autowrap_modules": ( + math, + torchvision.ops, + ), + "leaf_modules": _get_leaf_modules_for_ops(), + } is_training = model.training assert any( diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index a0cd238dc75..05cf5e4032e 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -1,6 +1,8 @@ +import warnings from typing import Optional, List, Dict, Tuple, Union import torch +import torch.fx import torchvision from torch import nn, Tensor from torchvision.ops.boxes import box_area @@ -106,6 +108,126 @@ def _infer_scale(feature: Tensor, original_size: List[int]) -> float: return possible_scales[0] +@torch.fx.wrap +def _setup_scales( + features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int +) -> Tuple[List[float], LevelMapper]: + assert len(image_shapes) != 0 + max_x = 0 + max_y = 0 + for shape in image_shapes: + max_x = max(shape[0], max_x) + max_y = max(shape[1], max_y) + original_input_shape = (max_x, max_y) + + scales = [_infer_scale(feat, original_input_shape) for feat in features] + # get the levels in the feature map by leveraging the fact that the network always + # downsamples by a factor of 2 at each level. + lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item() + lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item() + + map_levels = initLevelMapper( + int(lvl_min), + int(lvl_max), + canonical_scale=canonical_scale, + canonical_level=canonical_level, + ) + return scales, map_levels + + +@torch.fx.wrap +def _filter_input(x: Dict[str, Tensor], featmap_names: List[str]) -> List[Tensor]: + x_filtered = [] + for k, v in x.items(): + if k in featmap_names: + x_filtered.append(v) + return x_filtered + + +@torch.fx.wrap +def _multiscale_roi_align( + x_filtered: List[Tensor], + boxes: List[Tensor], + output_size: List[int], + sampling_ratio: int, + scales: Optional[List[float]], + mapper: Optional[LevelMapper], +) -> Tensor: + """ + Args: + x_filtered (List[Tensor]): List of input tensors. + boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in + (x1, y1, x2, y2) format and in the image reference size, not the feature map + reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + output_size (Union[List[Tuple[int, int]], List[int]]): size of the output + sampling_ratio (int): sampling ratio for ROIAlign + scales (Optional[List[float]]): If None, scales will be automatically infered. Default value is None. + mapper (Optional[LevelMapper]): If none, mapper will be automatically infered. Default value is None. + Returns: + result (Tensor) + """ + assert scales is not None + assert mapper is not None + + num_levels = len(x_filtered) + rois = _convert_to_roi_format(boxes) + + if num_levels == 1: + return roi_align( + x_filtered[0], + rois, + output_size=output_size, + spatial_scale=scales[0], + sampling_ratio=sampling_ratio, + ) + + levels = mapper(boxes) + + num_rois = len(rois) + num_channels = x_filtered[0].shape[1] + + dtype, device = x_filtered[0].dtype, x_filtered[0].device + result = torch.zeros( + ( + num_rois, + num_channels, + ) + + output_size, + dtype=dtype, + device=device, + ) + + tracing_results = [] + for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): + idx_in_level = torch.where(levels == level)[0] + rois_per_level = rois[idx_in_level] + + result_idx_in_level = roi_align( + per_level_feature, + rois_per_level, + output_size=output_size, + spatial_scale=scale, + sampling_ratio=sampling_ratio, + ) + + if torchvision._is_tracing(): + tracing_results.append(result_idx_in_level.to(dtype)) + else: + # result and result_idx_in_level's dtypes are based on dtypes of different + # elements in x_filtered. x_filtered contains tensors output by different + # layers. When autocast is active, it may choose different dtypes for + # different layers' outputs. Therefore, we defensively match result's dtype + # before copying elements from result_idx_in_level in the following op. + # We need to cast manually (can't rely on autocast to cast for us) because + # the op acts on result in-place, and autocast only affects out-of-place ops. + result[idx_in_level] = result_idx_in_level.to(result.dtype) + + if torchvision._is_tracing(): + result = _onnx_merge_levels(levels, tracing_results) + + return result + + class MultiScaleRoIAlign(nn.Module): """ Multi-scale RoIAlign pooling, which is useful for detection with or without FPN. @@ -165,31 +287,24 @@ def __init__( self.canonical_scale = canonical_scale self.canonical_level = canonical_level - def setup_scales( + def convert_to_roi_format(self, boxes: List[Tensor]) -> Tensor: + # TODO: deprecate eventually + warnings.warn("`convert_to_roi_format` will no loger be public in future releases.", FutureWarning) + return _convert_to_roi_format(boxes) + + def infer_scale(self, feature: Tensor, original_size: List[int]) -> float: + # TODO: deprecate eventually + warnings.warn("`infer_scale` will no loger be public in future releases.", FutureWarning) + return _infer_scale(feature, original_size) + + def setup_setup_scales( self, features: List[Tensor], image_shapes: List[Tuple[int, int]], ) -> None: - assert len(image_shapes) != 0 - max_x = 0 - max_y = 0 - for shape in image_shapes: - max_x = max(shape[0], max_x) - max_y = max(shape[1], max_y) - original_input_shape = (max_x, max_y) - - scales = [_infer_scale(feat, original_input_shape) for feat in features] - # get the levels in the feature map by leveraging the fact that the network always - # downsamples by a factor of 2 at each level. - lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item() - lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item() - self.scales = scales - self.map_levels = initLevelMapper( - int(lvl_min), - int(lvl_max), - canonical_scale=self.canonical_scale, - canonical_level=self.canonical_level, - ) + # TODO: deprecate eventually + warnings.warn("`setup_setup_scales` will no loger be public in future releases.", FutureWarning) + self.scales, self.map_levels = _setup_scales(features, image_shapes, self.canonical_scale, self.canonical_level) def forward( self, @@ -210,76 +325,21 @@ def forward( Returns: result (Tensor) """ - x_filtered = [] - for k, v in x.items(): - if k in self.featmap_names: - x_filtered.append(v) - num_levels = len(x_filtered) - rois = _convert_to_roi_format(boxes) - if self.scales is None: - self.setup_scales(x_filtered, image_shapes) - - scales = self.scales - assert scales is not None - - if num_levels == 1: - return roi_align( - x_filtered[0], - rois, - output_size=self.output_size, - spatial_scale=scales[0], - sampling_ratio=self.sampling_ratio, + x_filtered = _filter_input(x, self.featmap_names) + if self.scales is None or self.map_levels is None: + self.scales, self.map_levels = _setup_scales( + x_filtered, image_shapes, self.canonical_scale, self.canonical_level ) - mapper = self.map_levels - assert mapper is not None - - levels = mapper(boxes) - - num_rois = len(rois) - num_channels = x_filtered[0].shape[1] - - dtype, device = x_filtered[0].dtype, x_filtered[0].device - result = torch.zeros( - ( - num_rois, - num_channels, - ) - + self.output_size, - dtype=dtype, - device=device, + return _multiscale_roi_align( + x_filtered, + boxes, + self.output_size, + self.sampling_ratio, + self.scales, + self.map_levels, ) - tracing_results = [] - for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): - idx_in_level = torch.where(levels == level)[0] - rois_per_level = rois[idx_in_level] - - result_idx_in_level = roi_align( - per_level_feature, - rois_per_level, - output_size=self.output_size, - spatial_scale=scale, - sampling_ratio=self.sampling_ratio, - ) - - if torchvision._is_tracing(): - tracing_results.append(result_idx_in_level.to(dtype)) - else: - # result and result_idx_in_level's dtypes are based on dtypes of different - # elements in x_filtered. x_filtered contains tensors output by different - # layers. When autocast is active, it may choose different dtypes for - # different layers' outputs. Therefore, we defensively match result's dtype - # before copying elements from result_idx_in_level in the following op. - # We need to cast manually (can't rely on autocast to cast for us) because - # the op acts on result in-place, and autocast only affects out-of-place ops. - result[idx_in_level] = result_idx_in_level.to(result.dtype) - - if torchvision._is_tracing(): - result = _onnx_merge_levels(levels, tracing_results) - - return result - def __repr__(self) -> str: return ( f"{self.__class__.__name__}(featmap_names={self.featmap_names}, " From 33123bee8554af85989bc7188bf3c7dc1ba5e8c6 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 1 Dec 2021 00:08:09 +0100 Subject: [PATCH 06/23] [FBcode->GH] remove unused requests functionality (#5014) Co-authored-by: Prabhat Roy --- torchvision/datasets/utils.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/torchvision/datasets/utils.py b/torchvision/datasets/utils.py index e3222eca41d..acaac029137 100644 --- a/torchvision/datasets/utils.py +++ b/torchvision/datasets/utils.py @@ -233,14 +233,6 @@ def download_file_from_google_drive(file_id: str, root: str, filename: Optional[ _save_response_content(itertools.chain((first_chunk,), content), fpath) -def _get_confirm_token(response: "requests.models.Response") -> Optional[str]: # type: ignore[name-defined] - for key, value in response.cookies.items(): - if key.startswith("download_warning"): - return value - - return None - - def _save_response_content( response_gen: Iterator[bytes], destination: str, # type: ignore[name-defined] From 51f015a7708ae20e672b795a5605b2550a090363 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Tue, 30 Nov 2021 23:59:16 +0000 Subject: [PATCH 07/23] cover VideoReader --- torchvision/io/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py index 382e06fb4f2..1c91bbfb523 100644 --- a/torchvision/io/__init__.py +++ b/torchvision/io/__init__.py @@ -2,6 +2,7 @@ import torch +from ..utils import _log_api_usage_once from ._video_opt import ( Timebase, VideoMetaData, @@ -106,6 +107,7 @@ class VideoReader: """ def __init__(self, path: str, stream: str = "video", num_threads: int = 0) -> None: + _log_api_usage_once(self) if not _has_video_opt(): raise RuntimeError( "Not compiled with video_reader support, " From dedf489d35b7f91bef2e87e2d0638793a2c08c39 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Wed, 1 Dec 2021 00:46:32 +0000 Subject: [PATCH 08/23] cover c++ APIs --- torchvision/csrc/io/image/cpu/decode_jpeg.cpp | 1 + torchvision/csrc/io/image/cpu/decode_png.cpp | 1 + torchvision/csrc/io/image/cpu/encode_jpeg.cpp | 1 + torchvision/csrc/io/image/cpu/encode_png.cpp | 1 + torchvision/csrc/io/image/cpu/read_write_file.cpp | 2 ++ torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp | 1 + torchvision/csrc/io/video/video.cpp | 1 + torchvision/csrc/io/video_reader/video_reader.cpp | 4 ++++ 8 files changed, 12 insertions(+) diff --git a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp index c6e971c3b12..dc60f5e8f71 100644 --- a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp @@ -70,6 +70,7 @@ static void torch_jpeg_set_source_mgr( } // namespace torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/decode_png.cpp b/torchvision/csrc/io/image/cpu/decode_png.cpp index 0df55daed68..0c33cbfadb2 100644 --- a/torchvision/csrc/io/image/cpu/decode_png.cpp +++ b/torchvision/csrc/io/image/cpu/decode_png.cpp @@ -23,6 +23,7 @@ torch::Tensor decode_png( const torch::Tensor& data, ImageReadMode mode, bool allow_16_bits) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_png"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp index a8dbc7b2a28..be09694b28e 100644 --- a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp @@ -25,6 +25,7 @@ using JpegSizeType = size_t; using namespace detail; torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_jpeg"); // Define compression structures and error handling struct jpeg_compress_struct cinfo {}; struct torch_jpeg_error_mgr jerr {}; diff --git a/torchvision/csrc/io/image/cpu/encode_png.cpp b/torchvision/csrc/io/image/cpu/encode_png.cpp index d28bad95890..655cf38ae26 100644 --- a/torchvision/csrc/io/image/cpu/encode_png.cpp +++ b/torchvision/csrc/io/image/cpu/encode_png.cpp @@ -63,6 +63,7 @@ void torch_png_write_data( } // namespace torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_png"); // Define compression structures and error handling png_structp png_write; png_infop info_ptr; diff --git a/torchvision/csrc/io/image/cpu/read_write_file.cpp b/torchvision/csrc/io/image/cpu/read_write_file.cpp index a0bb7df72d5..120ba34b65f 100644 --- a/torchvision/csrc/io/image/cpu/read_write_file.cpp +++ b/torchvision/csrc/io/image/cpu/read_write_file.cpp @@ -33,6 +33,7 @@ std::wstring utf8_decode(const std::string& str) { #endif torch::Tensor read_file(const std::string& filename) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_file"); #ifdef _WIN32 // According to // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019, @@ -76,6 +77,7 @@ torch::Tensor read_file(const std::string& filename) { } void write_file(const std::string& filename, torch::Tensor& data) { + C10_LOG_API_USAGE_ONCE("torchvision.io.write_file"); // Check that the input tensor is on CPU TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); diff --git a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp index 68f63ced427..017fdebc9ef 100644 --- a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp +++ b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp @@ -33,6 +33,7 @@ torch::Tensor decode_jpeg_cuda( const torch::Tensor& data, ImageReadMode mode, torch::Device device) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg_cuda"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK( diff --git a/torchvision/csrc/io/video/video.cpp b/torchvision/csrc/io/video/video.cpp index d5a24398694..de7b557f7ae 100644 --- a/torchvision/csrc/io/video/video.cpp +++ b/torchvision/csrc/io/video/video.cpp @@ -157,6 +157,7 @@ void Video::_getDecoderParams( } // _get decoder params Video::Video(std::string videoPath, std::string stream, int64_t numThreads) { + C10_LOG_API_USAGE_ONCE("torchvision.io.Video"); // set number of threads global numThreads_ = numThreads; // parse stream information diff --git a/torchvision/csrc/io/video_reader/video_reader.cpp b/torchvision/csrc/io/video_reader/video_reader.cpp index 51b0750b431..3580387718d 100644 --- a/torchvision/csrc/io/video_reader/video_reader.cpp +++ b/torchvision/csrc/io/video_reader/video_reader.cpp @@ -583,6 +583,7 @@ torch::List read_video_from_memory( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_memory"); return readVideo( false, input_video, @@ -627,6 +628,7 @@ torch::List read_video_from_file( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_file"); torch::Tensor dummy_input_video = torch::ones({0}); return readVideo( true, @@ -653,10 +655,12 @@ torch::List read_video_from_file( } torch::List probe_video_from_memory(torch::Tensor input_video) { + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_memory"); return probeVideo(false, input_video, ""); } torch::List probe_video_from_file(std::string videoPath) { + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_file"); torch::Tensor dummy_input_video = torch::ones({0}); return probeVideo(true, dummy_input_video, videoPath); } From a370e79eb71a5120c7b8c58101ad326764167326 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 1 Dec 2021 10:56:51 +0000 Subject: [PATCH 09/23] Add bias parameter to ConvNormActivation (#5012) * Add bias parameter to ConvNormActivation * Update torchvision/ops/misc.py Co-authored-by: Vasilis Vryniotis Co-authored-by: Vasilis Vryniotis --- torchvision/ops/misc.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index fac9a3570d6..392517cb772 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -116,6 +116,7 @@ class ConvNormActivation(torch.nn.Sequential): activation_layer (Callable[..., torch.nn.Module], optinal): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU`` dilation (int): Spacing between kernel elements. Default: 1 inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True`` + bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``. """ @@ -131,9 +132,12 @@ def __init__( activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU, dilation: int = 1, inplace: bool = True, + bias: Optional[bool] = None, ) -> None: if padding is None: padding = (kernel_size - 1) // 2 * dilation + if bias is None: + bias = norm_layer is None layers = [ torch.nn.Conv2d( in_channels, @@ -143,7 +147,7 @@ def __init__( padding, dilation=dilation, groups=groups, - bias=norm_layer is None, + bias=bias, ) ] if norm_layer is not None: From 0aa3717d7d93a08dd3eb30512f817135456fa3db Mon Sep 17 00:00:00 2001 From: Yuxin Wu Date: Wed, 1 Dec 2021 05:54:40 -0800 Subject: [PATCH 10/23] Change batched NMS threshold to choose for-loop version (#4990) According to the benchmark link https://github.com/pytorch/vision/issues/1311#issuecomment-781329339, for GPU the threshold should be higher (which is why detectron2 used 40k). This PR changes the threshold to be device dependent. Co-authored-by: Nicolas Hug Co-authored-by: Francisco Massa --- torchvision/ops/boxes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 10a03a907e8..5ec46669be2 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -66,8 +66,7 @@ def batched_nms( _log_api_usage_once("torchvision.ops.batched_nms") # Benchmarks that drove the following thresholds are at # https://github.com/pytorch/vision/issues/1311#issuecomment-781329339 - # Ideally for GPU we'd use a higher threshold - if boxes.numel() > 4_000 and not torchvision._is_tracing(): + if boxes.numel() > (4000 if boxes.device.type == "cpu" else 20000) and not torchvision._is_tracing(): return _batched_nms_vanilla(boxes, scores, idxs, iou_threshold) else: return _batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold) From 874581cbe3277be9df8d4fddbc1962fedbf28ba7 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 1 Dec 2021 21:20:23 +0100 Subject: [PATCH 11/23] remove vanilla tensors from prototype datasets samples (#5018) --- .../prototype/datasets/_builtin/coco.py | 49 +++++++------------ torchvision/prototype/features/_feature.py | 2 +- .../prototype/transforms/_transform.py | 8 ++- 3 files changed, 27 insertions(+), 32 deletions(-) diff --git a/torchvision/prototype/datasets/_builtin/coco.py b/torchvision/prototype/datasets/_builtin/coco.py index 194cfa8df7d..0ba34167b51 100644 --- a/torchvision/prototype/datasets/_builtin/coco.py +++ b/torchvision/prototype/datasets/_builtin/coco.py @@ -32,23 +32,10 @@ getitem, path_accessor, ) -from torchvision.prototype.features import BoundingBox, Label -from torchvision.prototype.features._feature import DEFAULT +from torchvision.prototype.features import BoundingBox, Label, Feature from torchvision.prototype.utils._internal import FrozenMapping -class CocoLabel(Label): - super_category: Optional[str] - - @classmethod - def _parse_meta_data( - cls, - category: Optional[str] = DEFAULT, # type: ignore[assignment] - super_category: Optional[str] = DEFAULT, # type: ignore[assignment] - ) -> Dict[str, Tuple[Any, Any]]: - return dict(category=(category, None), super_category=(super_category, None)) - - class Coco(Dataset): def _make_info(self) -> DatasetInfo: name = "coco" @@ -111,27 +98,24 @@ def _decode_instances_anns(self, anns: List[Dict[str, Any]], image_meta: Dict[st categories = [self.info.categories[label] for label in labels] return dict( # TODO: create a segmentation feature - segmentations=torch.stack( - [ - self._segmentation_to_mask(ann["segmentation"], is_crowd=ann["iscrowd"], image_size=image_size) - for ann in anns - ] + segmentations=Feature( + torch.stack( + [ + self._segmentation_to_mask(ann["segmentation"], is_crowd=ann["iscrowd"], image_size=image_size) + for ann in anns + ] + ) ), - areas=torch.tensor([ann["area"] for ann in anns]), - crowds=torch.tensor([ann["iscrowd"] for ann in anns], dtype=torch.bool), + areas=Feature([ann["area"] for ann in anns]), + crowds=Feature([ann["iscrowd"] for ann in anns], dtype=torch.bool), bounding_boxes=BoundingBox( [ann["bbox"] for ann in anns], format="xywh", image_size=image_size, ), - labels=[ - CocoLabel( - label, - category=category, - super_category=self.info.extra.category_to_super_category[category], - ) - for label, category in zip(labels, categories) - ], + labels=Label(labels), + categories=categories, + super_categories=[self.info.extra.category_to_super_category[category] for category in categories], ann_ids=[ann["id"] for ann in anns], ) @@ -141,7 +125,12 @@ def _decode_captions_ann(self, anns: List[Dict[str, Any]], image_meta: Dict[str, ann_ids=[ann["id"] for ann in anns], ) - _ANN_DECODERS = OrderedDict([("instances", _decode_instances_anns), ("captions", _decode_captions_ann)]) + _ANN_DECODERS = OrderedDict( + [ + ("instances", _decode_instances_anns), + ("captions", _decode_captions_ann), + ] + ) _META_FILE_PATTERN = re.compile( fr"(?P({'|'.join(_ANN_DECODERS.keys())}))_(?P[a-zA-Z]+)(?P\d+)[.]json" diff --git a/torchvision/prototype/features/_feature.py b/torchvision/prototype/features/_feature.py index 81adea2ed82..cd52f1f80ad 100644 --- a/torchvision/prototype/features/_feature.py +++ b/torchvision/prototype/features/_feature.py @@ -12,7 +12,7 @@ class Feature(torch.Tensor): - _META_ATTRS: Set[str] + _META_ATTRS: Set[str] = set() _meta_data: Dict[str, Any] def __init_subclass__(cls): diff --git a/torchvision/prototype/transforms/_transform.py b/torchvision/prototype/transforms/_transform.py index 9fd07af1e77..8062ff0fad0 100644 --- a/torchvision/prototype/transforms/_transform.py +++ b/torchvision/prototype/transforms/_transform.py @@ -360,7 +360,13 @@ def _transform_recursively(self, sample: Any, *, params: Dict[str, Any]) -> Any: else: feature_type = type(sample) if not self.supports(feature_type): - if not issubclass(feature_type, features.Feature) or feature_type in self.NO_OP_FEATURE_TYPES: + if ( + not issubclass(feature_type, features.Feature) + # issubclass is not a strict check, but also allows the type checked against. Thus, we need to + # check it separately + or feature_type is features.Feature + or feature_type in self.NO_OP_FEATURE_TYPES + ): return sample raise TypeError( From 23ca7e7cd25589fc421fc95b564d83ebab06ece7 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 2 Dec 2021 09:56:15 +0000 Subject: [PATCH 12/23] Fix linebreak `dos-newlines` issue. (#5020) --- .../datasets/_builtin/coco.categories | 182 +++++++++--------- 1 file changed, 91 insertions(+), 91 deletions(-) diff --git a/torchvision/prototype/datasets/_builtin/coco.categories b/torchvision/prototype/datasets/_builtin/coco.categories index abe02d907e9..27e612f6d7d 100644 --- a/torchvision/prototype/datasets/_builtin/coco.categories +++ b/torchvision/prototype/datasets/_builtin/coco.categories @@ -1,91 +1,91 @@ -__background__,N/A -person,person -bicycle,vehicle -car,vehicle -motorcycle,vehicle -airplane,vehicle -bus,vehicle -train,vehicle -truck,vehicle -boat,vehicle -traffic light,outdoor -fire hydrant,outdoor -N/A,N/A -stop sign,outdoor -parking meter,outdoor -bench,outdoor -bird,animal -cat,animal -dog,animal -horse,animal -sheep,animal -cow,animal -elephant,animal -bear,animal -zebra,animal -giraffe,animal -N/A,N/A -backpack,accessory -umbrella,accessory -N/A,N/A -N/A,N/A -handbag,accessory -tie,accessory -suitcase,accessory -frisbee,sports -skis,sports -snowboard,sports -sports ball,sports -kite,sports -baseball bat,sports -baseball glove,sports -skateboard,sports -surfboard,sports -tennis racket,sports -bottle,kitchen -N/A,N/A -wine glass,kitchen -cup,kitchen -fork,kitchen -knife,kitchen -spoon,kitchen -bowl,kitchen -banana,food -apple,food -sandwich,food -orange,food -broccoli,food -carrot,food -hot dog,food -pizza,food -donut,food -cake,food -chair,furniture -couch,furniture -potted plant,furniture -bed,furniture -N/A,N/A -dining table,furniture -N/A,N/A -N/A,N/A -toilet,furniture -N/A,N/A -tv,electronic -laptop,electronic -mouse,electronic -remote,electronic -keyboard,electronic -cell phone,electronic -microwave,appliance -oven,appliance -toaster,appliance -sink,appliance -refrigerator,appliance -N/A,N/A -book,indoor -clock,indoor -vase,indoor -scissors,indoor -teddy bear,indoor -hair drier,indoor -toothbrush,indoor +__background__,N/A +person,person +bicycle,vehicle +car,vehicle +motorcycle,vehicle +airplane,vehicle +bus,vehicle +train,vehicle +truck,vehicle +boat,vehicle +traffic light,outdoor +fire hydrant,outdoor +N/A,N/A +stop sign,outdoor +parking meter,outdoor +bench,outdoor +bird,animal +cat,animal +dog,animal +horse,animal +sheep,animal +cow,animal +elephant,animal +bear,animal +zebra,animal +giraffe,animal +N/A,N/A +backpack,accessory +umbrella,accessory +N/A,N/A +N/A,N/A +handbag,accessory +tie,accessory +suitcase,accessory +frisbee,sports +skis,sports +snowboard,sports +sports ball,sports +kite,sports +baseball bat,sports +baseball glove,sports +skateboard,sports +surfboard,sports +tennis racket,sports +bottle,kitchen +N/A,N/A +wine glass,kitchen +cup,kitchen +fork,kitchen +knife,kitchen +spoon,kitchen +bowl,kitchen +banana,food +apple,food +sandwich,food +orange,food +broccoli,food +carrot,food +hot dog,food +pizza,food +donut,food +cake,food +chair,furniture +couch,furniture +potted plant,furniture +bed,furniture +N/A,N/A +dining table,furniture +N/A,N/A +N/A,N/A +toilet,furniture +N/A,N/A +tv,electronic +laptop,electronic +mouse,electronic +remote,electronic +keyboard,electronic +cell phone,electronic +microwave,appliance +oven,appliance +toaster,appliance +sink,appliance +refrigerator,appliance +N/A,N/A +book,indoor +clock,indoor +vase,indoor +scissors,indoor +teddy bear,indoor +hair drier,indoor +toothbrush,indoor From fe4ba3093857d2633886633c6a5017f523fbf138 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 2 Dec 2021 11:08:01 +0100 Subject: [PATCH 13/23] add pre-commit hook to fix line endings (#5021) --- .pre-commit-config.yaml | 2 + android/gradlew.bat | 168 +- .../datasets/_builtin/imagenet.categories | 2000 ++++++++--------- 3 files changed, 1086 insertions(+), 1084 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7f66a0de672..89f69bba52e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,8 @@ repos: - id: check-toml - id: check-yaml exclude: packaging/.* + - id: mixed-line-ending + args: [--fix=lf] - id: end-of-file-fixer # - repo: https://github.com/asottile/pyupgrade diff --git a/android/gradlew.bat b/android/gradlew.bat index e95643d6a2c..f9553162f12 100644 --- a/android/gradlew.bat +++ b/android/gradlew.bat @@ -1,84 +1,84 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/torchvision/prototype/datasets/_builtin/imagenet.categories b/torchvision/prototype/datasets/_builtin/imagenet.categories index 18e24b85311..7b6006ff57f 100644 --- a/torchvision/prototype/datasets/_builtin/imagenet.categories +++ b/torchvision/prototype/datasets/_builtin/imagenet.categories @@ -1,1000 +1,1000 @@ -tench,n01440764 -goldfish,n01443537 -great white shark,n01484850 -tiger shark,n01491361 -hammerhead,n01494475 -electric ray,n01496331 -stingray,n01498041 -cock,n01514668 -hen,n01514859 -ostrich,n01518878 -brambling,n01530575 -goldfinch,n01531178 -house finch,n01532829 -junco,n01534433 -indigo bunting,n01537544 -robin,n01558993 -bulbul,n01560419 -jay,n01580077 -magpie,n01582220 -chickadee,n01592084 -water ouzel,n01601694 -kite,n01608432 -bald eagle,n01614925 -vulture,n01616318 -great grey owl,n01622779 -European fire salamander,n01629819 -common newt,n01630670 -eft,n01631663 -spotted salamander,n01632458 -axolotl,n01632777 -bullfrog,n01641577 -tree frog,n01644373 -tailed frog,n01644900 -loggerhead,n01664065 -leatherback turtle,n01665541 -mud turtle,n01667114 -terrapin,n01667778 -box turtle,n01669191 -banded gecko,n01675722 -common iguana,n01677366 -American chameleon,n01682714 -whiptail,n01685808 -agama,n01687978 -frilled lizard,n01688243 -alligator lizard,n01689811 -Gila monster,n01692333 -green lizard,n01693334 -African chameleon,n01694178 -Komodo dragon,n01695060 -African crocodile,n01697457 -American alligator,n01698640 -triceratops,n01704323 -thunder snake,n01728572 -ringneck snake,n01728920 -hognose snake,n01729322 -green snake,n01729977 -king snake,n01734418 -garter snake,n01735189 -water snake,n01737021 -vine snake,n01739381 -night snake,n01740131 -boa constrictor,n01742172 -rock python,n01744401 -Indian cobra,n01748264 -green mamba,n01749939 -sea snake,n01751748 -horned viper,n01753488 -diamondback,n01755581 -sidewinder,n01756291 -trilobite,n01768244 -harvestman,n01770081 -scorpion,n01770393 -black and gold garden spider,n01773157 -barn spider,n01773549 -garden spider,n01773797 -black widow,n01774384 -tarantula,n01774750 -wolf spider,n01775062 -tick,n01776313 -centipede,n01784675 -black grouse,n01795545 -ptarmigan,n01796340 -ruffed grouse,n01797886 -prairie chicken,n01798484 -peacock,n01806143 -quail,n01806567 -partridge,n01807496 -African grey,n01817953 -macaw,n01818515 -sulphur-crested cockatoo,n01819313 -lorikeet,n01820546 -coucal,n01824575 -bee eater,n01828970 -hornbill,n01829413 -hummingbird,n01833805 -jacamar,n01843065 -toucan,n01843383 -drake,n01847000 -red-breasted merganser,n01855032 -goose,n01855672 -black swan,n01860187 -tusker,n01871265 -echidna,n01872401 -platypus,n01873310 -wallaby,n01877812 -koala,n01882714 -wombat,n01883070 -jellyfish,n01910747 -sea anemone,n01914609 -brain coral,n01917289 -flatworm,n01924916 -nematode,n01930112 -conch,n01943899 -snail,n01944390 -slug,n01945685 -sea slug,n01950731 -chiton,n01955084 -chambered nautilus,n01968897 -Dungeness crab,n01978287 -rock crab,n01978455 -fiddler crab,n01980166 -king crab,n01981276 -American lobster,n01983481 -spiny lobster,n01984695 -crayfish,n01985128 -hermit crab,n01986214 -isopod,n01990800 -white stork,n02002556 -black stork,n02002724 -spoonbill,n02006656 -flamingo,n02007558 -little blue heron,n02009229 -American egret,n02009912 -bittern,n02011460 -crane,n02012849 -limpkin,n02013706 -European gallinule,n02017213 -American coot,n02018207 -bustard,n02018795 -ruddy turnstone,n02025239 -red-backed sandpiper,n02027492 -redshank,n02028035 -dowitcher,n02033041 -oystercatcher,n02037110 -pelican,n02051845 -king penguin,n02056570 -albatross,n02058221 -grey whale,n02066245 -killer whale,n02071294 -dugong,n02074367 -sea lion,n02077923 -Chihuahua,n02085620 -Japanese spaniel,n02085782 -Maltese dog,n02085936 -Pekinese,n02086079 -Shih-Tzu,n02086240 -Blenheim spaniel,n02086646 -papillon,n02086910 -toy terrier,n02087046 -Rhodesian ridgeback,n02087394 -Afghan hound,n02088094 -basset,n02088238 -beagle,n02088364 -bloodhound,n02088466 -bluetick,n02088632 -black-and-tan coonhound,n02089078 -Walker hound,n02089867 -English foxhound,n02089973 -redbone,n02090379 -borzoi,n02090622 -Irish wolfhound,n02090721 -Italian greyhound,n02091032 -whippet,n02091134 -Ibizan hound,n02091244 -Norwegian elkhound,n02091467 -otterhound,n02091635 -Saluki,n02091831 -Scottish deerhound,n02092002 -Weimaraner,n02092339 -Staffordshire bullterrier,n02093256 -American Staffordshire terrier,n02093428 -Bedlington terrier,n02093647 -Border terrier,n02093754 -Kerry blue terrier,n02093859 -Irish terrier,n02093991 -Norfolk terrier,n02094114 -Norwich terrier,n02094258 -Yorkshire terrier,n02094433 -wire-haired fox terrier,n02095314 -Lakeland terrier,n02095570 -Sealyham terrier,n02095889 -Airedale,n02096051 -cairn,n02096177 -Australian terrier,n02096294 -Dandie Dinmont,n02096437 -Boston bull,n02096585 -miniature schnauzer,n02097047 -giant schnauzer,n02097130 -standard schnauzer,n02097209 -Scotch terrier,n02097298 -Tibetan terrier,n02097474 -silky terrier,n02097658 -soft-coated wheaten terrier,n02098105 -West Highland white terrier,n02098286 -Lhasa,n02098413 -flat-coated retriever,n02099267 -curly-coated retriever,n02099429 -golden retriever,n02099601 -Labrador retriever,n02099712 -Chesapeake Bay retriever,n02099849 -German short-haired pointer,n02100236 -vizsla,n02100583 -English setter,n02100735 -Irish setter,n02100877 -Gordon setter,n02101006 -Brittany spaniel,n02101388 -clumber,n02101556 -English springer,n02102040 -Welsh springer spaniel,n02102177 -cocker spaniel,n02102318 -Sussex spaniel,n02102480 -Irish water spaniel,n02102973 -kuvasz,n02104029 -schipperke,n02104365 -groenendael,n02105056 -malinois,n02105162 -briard,n02105251 -kelpie,n02105412 -komondor,n02105505 -Old English sheepdog,n02105641 -Shetland sheepdog,n02105855 -collie,n02106030 -Border collie,n02106166 -Bouvier des Flandres,n02106382 -Rottweiler,n02106550 -German shepherd,n02106662 -Doberman,n02107142 -miniature pinscher,n02107312 -Greater Swiss Mountain dog,n02107574 -Bernese mountain dog,n02107683 -Appenzeller,n02107908 -EntleBucher,n02108000 -boxer,n02108089 -bull mastiff,n02108422 -Tibetan mastiff,n02108551 -French bulldog,n02108915 -Great Dane,n02109047 -Saint Bernard,n02109525 -Eskimo dog,n02109961 -malamute,n02110063 -Siberian husky,n02110185 -dalmatian,n02110341 -affenpinscher,n02110627 -basenji,n02110806 -pug,n02110958 -Leonberg,n02111129 -Newfoundland,n02111277 -Great Pyrenees,n02111500 -Samoyed,n02111889 -Pomeranian,n02112018 -chow,n02112137 -keeshond,n02112350 -Brabancon griffon,n02112706 -Pembroke,n02113023 -Cardigan,n02113186 -toy poodle,n02113624 -miniature poodle,n02113712 -standard poodle,n02113799 -Mexican hairless,n02113978 -timber wolf,n02114367 -white wolf,n02114548 -red wolf,n02114712 -coyote,n02114855 -dingo,n02115641 -dhole,n02115913 -African hunting dog,n02116738 -hyena,n02117135 -red fox,n02119022 -kit fox,n02119789 -Arctic fox,n02120079 -grey fox,n02120505 -tabby,n02123045 -tiger cat,n02123159 -Persian cat,n02123394 -Siamese cat,n02123597 -Egyptian cat,n02124075 -cougar,n02125311 -lynx,n02127052 -leopard,n02128385 -snow leopard,n02128757 -jaguar,n02128925 -lion,n02129165 -tiger,n02129604 -cheetah,n02130308 -brown bear,n02132136 -American black bear,n02133161 -ice bear,n02134084 -sloth bear,n02134418 -mongoose,n02137549 -meerkat,n02138441 -tiger beetle,n02165105 -ladybug,n02165456 -ground beetle,n02167151 -long-horned beetle,n02168699 -leaf beetle,n02169497 -dung beetle,n02172182 -rhinoceros beetle,n02174001 -weevil,n02177972 -fly,n02190166 -bee,n02206856 -ant,n02219486 -grasshopper,n02226429 -cricket,n02229544 -walking stick,n02231487 -cockroach,n02233338 -mantis,n02236044 -cicada,n02256656 -leafhopper,n02259212 -lacewing,n02264363 -dragonfly,n02268443 -damselfly,n02268853 -admiral,n02276258 -ringlet,n02277742 -monarch,n02279972 -cabbage butterfly,n02280649 -sulphur butterfly,n02281406 -lycaenid,n02281787 -starfish,n02317335 -sea urchin,n02319095 -sea cucumber,n02321529 -wood rabbit,n02325366 -hare,n02326432 -Angora,n02328150 -hamster,n02342885 -porcupine,n02346627 -fox squirrel,n02356798 -marmot,n02361337 -beaver,n02363005 -guinea pig,n02364673 -sorrel,n02389026 -zebra,n02391049 -hog,n02395406 -wild boar,n02396427 -warthog,n02397096 -hippopotamus,n02398521 -ox,n02403003 -water buffalo,n02408429 -bison,n02410509 -ram,n02412080 -bighorn,n02415577 -ibex,n02417914 -hartebeest,n02422106 -impala,n02422699 -gazelle,n02423022 -Arabian camel,n02437312 -llama,n02437616 -weasel,n02441942 -mink,n02442845 -polecat,n02443114 -black-footed ferret,n02443484 -otter,n02444819 -skunk,n02445715 -badger,n02447366 -armadillo,n02454379 -three-toed sloth,n02457408 -orangutan,n02480495 -gorilla,n02480855 -chimpanzee,n02481823 -gibbon,n02483362 -siamang,n02483708 -guenon,n02484975 -patas,n02486261 -baboon,n02486410 -macaque,n02487347 -langur,n02488291 -colobus,n02488702 -proboscis monkey,n02489166 -marmoset,n02490219 -capuchin,n02492035 -howler monkey,n02492660 -titi,n02493509 -spider monkey,n02493793 -squirrel monkey,n02494079 -Madagascar cat,n02497673 -indri,n02500267 -Indian elephant,n02504013 -African elephant,n02504458 -lesser panda,n02509815 -giant panda,n02510455 -barracouta,n02514041 -eel,n02526121 -coho,n02536864 -rock beauty,n02606052 -anemone fish,n02607072 -sturgeon,n02640242 -gar,n02641379 -lionfish,n02643566 -puffer,n02655020 -abacus,n02666196 -abaya,n02667093 -academic gown,n02669723 -accordion,n02672831 -acoustic guitar,n02676566 -aircraft carrier,n02687172 -airliner,n02690373 -airship,n02692877 -altar,n02699494 -ambulance,n02701002 -amphibian,n02704792 -analog clock,n02708093 -apiary,n02727426 -apron,n02730930 -ashcan,n02747177 -assault rifle,n02749479 -backpack,n02769748 -bakery,n02776631 -balance beam,n02777292 -balloon,n02782093 -ballpoint,n02783161 -Band Aid,n02786058 -banjo,n02787622 -bannister,n02788148 -barbell,n02790996 -barber chair,n02791124 -barbershop,n02791270 -barn,n02793495 -barometer,n02794156 -barrel,n02795169 -barrow,n02797295 -baseball,n02799071 -basketball,n02802426 -bassinet,n02804414 -bassoon,n02804610 -bathing cap,n02807133 -bath towel,n02808304 -bathtub,n02808440 -beach wagon,n02814533 -beacon,n02814860 -beaker,n02815834 -bearskin,n02817516 -beer bottle,n02823428 -beer glass,n02823750 -bell cote,n02825657 -bib,n02834397 -bicycle-built-for-two,n02835271 -bikini,n02837789 -binder,n02840245 -binoculars,n02841315 -birdhouse,n02843684 -boathouse,n02859443 -bobsled,n02860847 -bolo tie,n02865351 -bonnet,n02869837 -bookcase,n02870880 -bookshop,n02871525 -bottlecap,n02877765 -bow,n02879718 -bow tie,n02883205 -brass,n02892201 -brassiere,n02892767 -breakwater,n02894605 -breastplate,n02895154 -broom,n02906734 -bucket,n02909870 -buckle,n02910353 -bulletproof vest,n02916936 -bullet train,n02917067 -butcher shop,n02927161 -cab,n02930766 -caldron,n02939185 -candle,n02948072 -cannon,n02950826 -canoe,n02951358 -can opener,n02951585 -cardigan,n02963159 -car mirror,n02965783 -carousel,n02966193 -carpenter's kit,n02966687 -carton,n02971356 -car wheel,n02974003 -cash machine,n02977058 -cassette,n02978881 -cassette player,n02979186 -castle,n02980441 -catamaran,n02981792 -CD player,n02988304 -cello,n02992211 -cellular telephone,n02992529 -chain,n02999410 -chainlink fence,n03000134 -chain mail,n03000247 -chain saw,n03000684 -chest,n03014705 -chiffonier,n03016953 -chime,n03017168 -china cabinet,n03018349 -Christmas stocking,n03026506 -church,n03028079 -cinema,n03032252 -cleaver,n03041632 -cliff dwelling,n03042490 -cloak,n03045698 -clog,n03047690 -cocktail shaker,n03062245 -coffee mug,n03063599 -coffeepot,n03063689 -coil,n03065424 -combination lock,n03075370 -computer keyboard,n03085013 -confectionery,n03089624 -container ship,n03095699 -convertible,n03100240 -corkscrew,n03109150 -cornet,n03110669 -cowboy boot,n03124043 -cowboy hat,n03124170 -cradle,n03125729 -construction crane,n03126707 -crash helmet,n03127747 -crate,n03127925 -crib,n03131574 -Crock Pot,n03133878 -croquet ball,n03134739 -crutch,n03141823 -cuirass,n03146219 -dam,n03160309 -desk,n03179701 -desktop computer,n03180011 -dial telephone,n03187595 -diaper,n03188531 -digital clock,n03196217 -digital watch,n03197337 -dining table,n03201208 -dishrag,n03207743 -dishwasher,n03207941 -disk brake,n03208938 -dock,n03216828 -dogsled,n03218198 -dome,n03220513 -doormat,n03223299 -drilling platform,n03240683 -drum,n03249569 -drumstick,n03250847 -dumbbell,n03255030 -Dutch oven,n03259280 -electric fan,n03271574 -electric guitar,n03272010 -electric locomotive,n03272562 -entertainment center,n03290653 -envelope,n03291819 -espresso maker,n03297495 -face powder,n03314780 -feather boa,n03325584 -file,n03337140 -fireboat,n03344393 -fire engine,n03345487 -fire screen,n03347037 -flagpole,n03355925 -flute,n03372029 -folding chair,n03376595 -football helmet,n03379051 -forklift,n03384352 -fountain,n03388043 -fountain pen,n03388183 -four-poster,n03388549 -freight car,n03393912 -French horn,n03394916 -frying pan,n03400231 -fur coat,n03404251 -garbage truck,n03417042 -gasmask,n03424325 -gas pump,n03425413 -goblet,n03443371 -go-kart,n03444034 -golf ball,n03445777 -golfcart,n03445924 -gondola,n03447447 -gong,n03447721 -gown,n03450230 -grand piano,n03452741 -greenhouse,n03457902 -grille,n03459775 -grocery store,n03461385 -guillotine,n03467068 -hair slide,n03476684 -hair spray,n03476991 -half track,n03478589 -hammer,n03481172 -hamper,n03482405 -hand blower,n03483316 -hand-held computer,n03485407 -handkerchief,n03485794 -hard disc,n03492542 -harmonica,n03494278 -harp,n03495258 -harvester,n03496892 -hatchet,n03498962 -holster,n03527444 -home theater,n03529860 -honeycomb,n03530642 -hook,n03532672 -hoopskirt,n03534580 -horizontal bar,n03535780 -horse cart,n03538406 -hourglass,n03544143 -iPod,n03584254 -iron,n03584829 -jack-o'-lantern,n03590841 -jean,n03594734 -jeep,n03594945 -jersey,n03595614 -jigsaw puzzle,n03598930 -jinrikisha,n03599486 -joystick,n03602883 -kimono,n03617480 -knee pad,n03623198 -knot,n03627232 -lab coat,n03630383 -ladle,n03633091 -lampshade,n03637318 -laptop,n03642806 -lawn mower,n03649909 -lens cap,n03657121 -letter opener,n03658185 -library,n03661043 -lifeboat,n03662601 -lighter,n03666591 -limousine,n03670208 -liner,n03673027 -lipstick,n03676483 -Loafer,n03680355 -lotion,n03690938 -loudspeaker,n03691459 -loupe,n03692522 -lumbermill,n03697007 -magnetic compass,n03706229 -mailbag,n03709823 -mailbox,n03710193 -maillot,n03710637 -tank suit,n03710721 -manhole cover,n03717622 -maraca,n03720891 -marimba,n03721384 -mask,n03724870 -matchstick,n03729826 -maypole,n03733131 -maze,n03733281 -measuring cup,n03733805 -medicine chest,n03742115 -megalith,n03743016 -microphone,n03759954 -microwave,n03761084 -military uniform,n03763968 -milk can,n03764736 -minibus,n03769881 -miniskirt,n03770439 -minivan,n03770679 -missile,n03773504 -mitten,n03775071 -mixing bowl,n03775546 -mobile home,n03776460 -Model T,n03777568 -modem,n03777754 -monastery,n03781244 -monitor,n03782006 -moped,n03785016 -mortar,n03786901 -mortarboard,n03787032 -mosque,n03788195 -mosquito net,n03788365 -motor scooter,n03791053 -mountain bike,n03792782 -mountain tent,n03792972 -mouse,n03793489 -mousetrap,n03794056 -moving van,n03796401 -muzzle,n03803284 -nail,n03804744 -neck brace,n03814639 -necklace,n03814906 -nipple,n03825788 -notebook,n03832673 -obelisk,n03837869 -oboe,n03838899 -ocarina,n03840681 -odometer,n03841143 -oil filter,n03843555 -organ,n03854065 -oscilloscope,n03857828 -overskirt,n03866082 -oxcart,n03868242 -oxygen mask,n03868863 -packet,n03871628 -paddle,n03873416 -paddlewheel,n03874293 -padlock,n03874599 -paintbrush,n03876231 -pajama,n03877472 -palace,n03877845 -panpipe,n03884397 -paper towel,n03887697 -parachute,n03888257 -parallel bars,n03888605 -park bench,n03891251 -parking meter,n03891332 -passenger car,n03895866 -patio,n03899768 -pay-phone,n03902125 -pedestal,n03903868 -pencil box,n03908618 -pencil sharpener,n03908714 -perfume,n03916031 -Petri dish,n03920288 -photocopier,n03924679 -pick,n03929660 -pickelhaube,n03929855 -picket fence,n03930313 -pickup,n03930630 -pier,n03933933 -piggy bank,n03935335 -pill bottle,n03937543 -pillow,n03938244 -ping-pong ball,n03942813 -pinwheel,n03944341 -pirate,n03947888 -pitcher,n03950228 -plane,n03954731 -planetarium,n03956157 -plastic bag,n03958227 -plate rack,n03961711 -plow,n03967562 -plunger,n03970156 -Polaroid camera,n03976467 -pole,n03976657 -police van,n03977966 -poncho,n03980874 -pool table,n03982430 -pop bottle,n03983396 -pot,n03991062 -potter's wheel,n03992509 -power drill,n03995372 -prayer rug,n03998194 -printer,n04004767 -prison,n04005630 -projectile,n04008634 -projector,n04009552 -puck,n04019541 -punching bag,n04023962 -purse,n04026417 -quill,n04033901 -quilt,n04033995 -racer,n04037443 -racket,n04039381 -radiator,n04040759 -radio,n04041544 -radio telescope,n04044716 -rain barrel,n04049303 -recreational vehicle,n04065272 -reel,n04067472 -reflex camera,n04069434 -refrigerator,n04070727 -remote control,n04074963 -restaurant,n04081281 -revolver,n04086273 -rifle,n04090263 -rocking chair,n04099969 -rotisserie,n04111531 -rubber eraser,n04116512 -rugby ball,n04118538 -rule,n04118776 -running shoe,n04120489 -safe,n04125021 -safety pin,n04127249 -saltshaker,n04131690 -sandal,n04133789 -sarong,n04136333 -sax,n04141076 -scabbard,n04141327 -scale,n04141975 -school bus,n04146614 -schooner,n04147183 -scoreboard,n04149813 -screen,n04152593 -screw,n04153751 -screwdriver,n04154565 -seat belt,n04162706 -sewing machine,n04179913 -shield,n04192698 -shoe shop,n04200800 -shoji,n04201297 -shopping basket,n04204238 -shopping cart,n04204347 -shovel,n04208210 -shower cap,n04209133 -shower curtain,n04209239 -ski,n04228054 -ski mask,n04229816 -sleeping bag,n04235860 -slide rule,n04238763 -sliding door,n04239074 -slot,n04243546 -snorkel,n04251144 -snowmobile,n04252077 -snowplow,n04252225 -soap dispenser,n04254120 -soccer ball,n04254680 -sock,n04254777 -solar dish,n04258138 -sombrero,n04259630 -soup bowl,n04263257 -space bar,n04264628 -space heater,n04265275 -space shuttle,n04266014 -spatula,n04270147 -speedboat,n04273569 -spider web,n04275548 -spindle,n04277352 -sports car,n04285008 -spotlight,n04286575 -stage,n04296562 -steam locomotive,n04310018 -steel arch bridge,n04311004 -steel drum,n04311174 -stethoscope,n04317175 -stole,n04325704 -stone wall,n04326547 -stopwatch,n04328186 -stove,n04330267 -strainer,n04332243 -streetcar,n04335435 -stretcher,n04336792 -studio couch,n04344873 -stupa,n04346328 -submarine,n04347754 -suit,n04350905 -sundial,n04355338 -sunglass,n04355933 -sunglasses,n04356056 -sunscreen,n04357314 -suspension bridge,n04366367 -swab,n04367480 -sweatshirt,n04370456 -swimming trunks,n04371430 -swing,n04371774 -switch,n04372370 -syringe,n04376876 -table lamp,n04380533 -tank,n04389033 -tape player,n04392985 -teapot,n04398044 -teddy,n04399382 -television,n04404412 -tennis ball,n04409515 -thatch,n04417672 -theater curtain,n04418357 -thimble,n04423845 -thresher,n04428191 -throne,n04429376 -tile roof,n04435653 -toaster,n04442312 -tobacco shop,n04443257 -toilet seat,n04447861 -torch,n04456115 -totem pole,n04458633 -tow truck,n04461696 -toyshop,n04462240 -tractor,n04465501 -trailer truck,n04467665 -tray,n04476259 -trench coat,n04479046 -tricycle,n04482393 -trimaran,n04483307 -tripod,n04485082 -triumphal arch,n04486054 -trolleybus,n04487081 -trombone,n04487394 -tub,n04493381 -turnstile,n04501370 -typewriter keyboard,n04505470 -umbrella,n04507155 -unicycle,n04509417 -upright,n04515003 -vacuum,n04517823 -vase,n04522168 -vault,n04523525 -velvet,n04525038 -vending machine,n04525305 -vestment,n04532106 -viaduct,n04532670 -violin,n04536866 -volleyball,n04540053 -waffle iron,n04542943 -wall clock,n04548280 -wallet,n04548362 -wardrobe,n04550184 -warplane,n04552348 -washbasin,n04553703 -washer,n04554684 -water bottle,n04557648 -water jug,n04560804 -water tower,n04562935 -whiskey jug,n04579145 -whistle,n04579432 -wig,n04584207 -window screen,n04589890 -window shade,n04590129 -Windsor tie,n04591157 -wine bottle,n04591713 -wing,n04592741 -wok,n04596742 -wooden spoon,n04597913 -wool,n04599235 -worm fence,n04604644 -wreck,n04606251 -yawl,n04612504 -yurt,n04613696 -web site,n06359193 -comic book,n06596364 -crossword puzzle,n06785654 -street sign,n06794110 -traffic light,n06874185 -book jacket,n07248320 -menu,n07565083 -plate,n07579787 -guacamole,n07583066 -consomme,n07584110 -hot pot,n07590611 -trifle,n07613480 -ice cream,n07614500 -ice lolly,n07615774 -French loaf,n07684084 -bagel,n07693725 -pretzel,n07695742 -cheeseburger,n07697313 -hotdog,n07697537 -mashed potato,n07711569 -head cabbage,n07714571 -broccoli,n07714990 -cauliflower,n07715103 -zucchini,n07716358 -spaghetti squash,n07716906 -acorn squash,n07717410 -butternut squash,n07717556 -cucumber,n07718472 -artichoke,n07718747 -bell pepper,n07720875 -cardoon,n07730033 -mushroom,n07734744 -Granny Smith,n07742313 -strawberry,n07745940 -orange,n07747607 -lemon,n07749582 -fig,n07753113 -pineapple,n07753275 -banana,n07753592 -jackfruit,n07754684 -custard apple,n07760859 -pomegranate,n07768694 -hay,n07802026 -carbonara,n07831146 -chocolate sauce,n07836838 -dough,n07860988 -meat loaf,n07871810 -pizza,n07873807 -potpie,n07875152 -burrito,n07880968 -red wine,n07892512 -espresso,n07920052 -cup,n07930864 -eggnog,n07932039 -alp,n09193705 -bubble,n09229709 -cliff,n09246464 -coral reef,n09256479 -geyser,n09288635 -lakeside,n09332890 -promontory,n09399592 -sandbar,n09421951 -seashore,n09428293 -valley,n09468604 -volcano,n09472597 -ballplayer,n09835506 -groom,n10148035 -scuba diver,n10565667 -rapeseed,n11879895 -daisy,n11939491 -yellow lady's slipper,n12057211 -corn,n12144580 -acorn,n12267677 -hip,n12620546 -buckeye,n12768682 -coral fungus,n12985857 -agaric,n12998815 -gyromitra,n13037406 -stinkhorn,n13040303 -earthstar,n13044778 -hen-of-the-woods,n13052670 -bolete,n13054560 -ear,n13133613 -toilet tissue,n15075141 +tench,n01440764 +goldfish,n01443537 +great white shark,n01484850 +tiger shark,n01491361 +hammerhead,n01494475 +electric ray,n01496331 +stingray,n01498041 +cock,n01514668 +hen,n01514859 +ostrich,n01518878 +brambling,n01530575 +goldfinch,n01531178 +house finch,n01532829 +junco,n01534433 +indigo bunting,n01537544 +robin,n01558993 +bulbul,n01560419 +jay,n01580077 +magpie,n01582220 +chickadee,n01592084 +water ouzel,n01601694 +kite,n01608432 +bald eagle,n01614925 +vulture,n01616318 +great grey owl,n01622779 +European fire salamander,n01629819 +common newt,n01630670 +eft,n01631663 +spotted salamander,n01632458 +axolotl,n01632777 +bullfrog,n01641577 +tree frog,n01644373 +tailed frog,n01644900 +loggerhead,n01664065 +leatherback turtle,n01665541 +mud turtle,n01667114 +terrapin,n01667778 +box turtle,n01669191 +banded gecko,n01675722 +common iguana,n01677366 +American chameleon,n01682714 +whiptail,n01685808 +agama,n01687978 +frilled lizard,n01688243 +alligator lizard,n01689811 +Gila monster,n01692333 +green lizard,n01693334 +African chameleon,n01694178 +Komodo dragon,n01695060 +African crocodile,n01697457 +American alligator,n01698640 +triceratops,n01704323 +thunder snake,n01728572 +ringneck snake,n01728920 +hognose snake,n01729322 +green snake,n01729977 +king snake,n01734418 +garter snake,n01735189 +water snake,n01737021 +vine snake,n01739381 +night snake,n01740131 +boa constrictor,n01742172 +rock python,n01744401 +Indian cobra,n01748264 +green mamba,n01749939 +sea snake,n01751748 +horned viper,n01753488 +diamondback,n01755581 +sidewinder,n01756291 +trilobite,n01768244 +harvestman,n01770081 +scorpion,n01770393 +black and gold garden spider,n01773157 +barn spider,n01773549 +garden spider,n01773797 +black widow,n01774384 +tarantula,n01774750 +wolf spider,n01775062 +tick,n01776313 +centipede,n01784675 +black grouse,n01795545 +ptarmigan,n01796340 +ruffed grouse,n01797886 +prairie chicken,n01798484 +peacock,n01806143 +quail,n01806567 +partridge,n01807496 +African grey,n01817953 +macaw,n01818515 +sulphur-crested cockatoo,n01819313 +lorikeet,n01820546 +coucal,n01824575 +bee eater,n01828970 +hornbill,n01829413 +hummingbird,n01833805 +jacamar,n01843065 +toucan,n01843383 +drake,n01847000 +red-breasted merganser,n01855032 +goose,n01855672 +black swan,n01860187 +tusker,n01871265 +echidna,n01872401 +platypus,n01873310 +wallaby,n01877812 +koala,n01882714 +wombat,n01883070 +jellyfish,n01910747 +sea anemone,n01914609 +brain coral,n01917289 +flatworm,n01924916 +nematode,n01930112 +conch,n01943899 +snail,n01944390 +slug,n01945685 +sea slug,n01950731 +chiton,n01955084 +chambered nautilus,n01968897 +Dungeness crab,n01978287 +rock crab,n01978455 +fiddler crab,n01980166 +king crab,n01981276 +American lobster,n01983481 +spiny lobster,n01984695 +crayfish,n01985128 +hermit crab,n01986214 +isopod,n01990800 +white stork,n02002556 +black stork,n02002724 +spoonbill,n02006656 +flamingo,n02007558 +little blue heron,n02009229 +American egret,n02009912 +bittern,n02011460 +crane,n02012849 +limpkin,n02013706 +European gallinule,n02017213 +American coot,n02018207 +bustard,n02018795 +ruddy turnstone,n02025239 +red-backed sandpiper,n02027492 +redshank,n02028035 +dowitcher,n02033041 +oystercatcher,n02037110 +pelican,n02051845 +king penguin,n02056570 +albatross,n02058221 +grey whale,n02066245 +killer whale,n02071294 +dugong,n02074367 +sea lion,n02077923 +Chihuahua,n02085620 +Japanese spaniel,n02085782 +Maltese dog,n02085936 +Pekinese,n02086079 +Shih-Tzu,n02086240 +Blenheim spaniel,n02086646 +papillon,n02086910 +toy terrier,n02087046 +Rhodesian ridgeback,n02087394 +Afghan hound,n02088094 +basset,n02088238 +beagle,n02088364 +bloodhound,n02088466 +bluetick,n02088632 +black-and-tan coonhound,n02089078 +Walker hound,n02089867 +English foxhound,n02089973 +redbone,n02090379 +borzoi,n02090622 +Irish wolfhound,n02090721 +Italian greyhound,n02091032 +whippet,n02091134 +Ibizan hound,n02091244 +Norwegian elkhound,n02091467 +otterhound,n02091635 +Saluki,n02091831 +Scottish deerhound,n02092002 +Weimaraner,n02092339 +Staffordshire bullterrier,n02093256 +American Staffordshire terrier,n02093428 +Bedlington terrier,n02093647 +Border terrier,n02093754 +Kerry blue terrier,n02093859 +Irish terrier,n02093991 +Norfolk terrier,n02094114 +Norwich terrier,n02094258 +Yorkshire terrier,n02094433 +wire-haired fox terrier,n02095314 +Lakeland terrier,n02095570 +Sealyham terrier,n02095889 +Airedale,n02096051 +cairn,n02096177 +Australian terrier,n02096294 +Dandie Dinmont,n02096437 +Boston bull,n02096585 +miniature schnauzer,n02097047 +giant schnauzer,n02097130 +standard schnauzer,n02097209 +Scotch terrier,n02097298 +Tibetan terrier,n02097474 +silky terrier,n02097658 +soft-coated wheaten terrier,n02098105 +West Highland white terrier,n02098286 +Lhasa,n02098413 +flat-coated retriever,n02099267 +curly-coated retriever,n02099429 +golden retriever,n02099601 +Labrador retriever,n02099712 +Chesapeake Bay retriever,n02099849 +German short-haired pointer,n02100236 +vizsla,n02100583 +English setter,n02100735 +Irish setter,n02100877 +Gordon setter,n02101006 +Brittany spaniel,n02101388 +clumber,n02101556 +English springer,n02102040 +Welsh springer spaniel,n02102177 +cocker spaniel,n02102318 +Sussex spaniel,n02102480 +Irish water spaniel,n02102973 +kuvasz,n02104029 +schipperke,n02104365 +groenendael,n02105056 +malinois,n02105162 +briard,n02105251 +kelpie,n02105412 +komondor,n02105505 +Old English sheepdog,n02105641 +Shetland sheepdog,n02105855 +collie,n02106030 +Border collie,n02106166 +Bouvier des Flandres,n02106382 +Rottweiler,n02106550 +German shepherd,n02106662 +Doberman,n02107142 +miniature pinscher,n02107312 +Greater Swiss Mountain dog,n02107574 +Bernese mountain dog,n02107683 +Appenzeller,n02107908 +EntleBucher,n02108000 +boxer,n02108089 +bull mastiff,n02108422 +Tibetan mastiff,n02108551 +French bulldog,n02108915 +Great Dane,n02109047 +Saint Bernard,n02109525 +Eskimo dog,n02109961 +malamute,n02110063 +Siberian husky,n02110185 +dalmatian,n02110341 +affenpinscher,n02110627 +basenji,n02110806 +pug,n02110958 +Leonberg,n02111129 +Newfoundland,n02111277 +Great Pyrenees,n02111500 +Samoyed,n02111889 +Pomeranian,n02112018 +chow,n02112137 +keeshond,n02112350 +Brabancon griffon,n02112706 +Pembroke,n02113023 +Cardigan,n02113186 +toy poodle,n02113624 +miniature poodle,n02113712 +standard poodle,n02113799 +Mexican hairless,n02113978 +timber wolf,n02114367 +white wolf,n02114548 +red wolf,n02114712 +coyote,n02114855 +dingo,n02115641 +dhole,n02115913 +African hunting dog,n02116738 +hyena,n02117135 +red fox,n02119022 +kit fox,n02119789 +Arctic fox,n02120079 +grey fox,n02120505 +tabby,n02123045 +tiger cat,n02123159 +Persian cat,n02123394 +Siamese cat,n02123597 +Egyptian cat,n02124075 +cougar,n02125311 +lynx,n02127052 +leopard,n02128385 +snow leopard,n02128757 +jaguar,n02128925 +lion,n02129165 +tiger,n02129604 +cheetah,n02130308 +brown bear,n02132136 +American black bear,n02133161 +ice bear,n02134084 +sloth bear,n02134418 +mongoose,n02137549 +meerkat,n02138441 +tiger beetle,n02165105 +ladybug,n02165456 +ground beetle,n02167151 +long-horned beetle,n02168699 +leaf beetle,n02169497 +dung beetle,n02172182 +rhinoceros beetle,n02174001 +weevil,n02177972 +fly,n02190166 +bee,n02206856 +ant,n02219486 +grasshopper,n02226429 +cricket,n02229544 +walking stick,n02231487 +cockroach,n02233338 +mantis,n02236044 +cicada,n02256656 +leafhopper,n02259212 +lacewing,n02264363 +dragonfly,n02268443 +damselfly,n02268853 +admiral,n02276258 +ringlet,n02277742 +monarch,n02279972 +cabbage butterfly,n02280649 +sulphur butterfly,n02281406 +lycaenid,n02281787 +starfish,n02317335 +sea urchin,n02319095 +sea cucumber,n02321529 +wood rabbit,n02325366 +hare,n02326432 +Angora,n02328150 +hamster,n02342885 +porcupine,n02346627 +fox squirrel,n02356798 +marmot,n02361337 +beaver,n02363005 +guinea pig,n02364673 +sorrel,n02389026 +zebra,n02391049 +hog,n02395406 +wild boar,n02396427 +warthog,n02397096 +hippopotamus,n02398521 +ox,n02403003 +water buffalo,n02408429 +bison,n02410509 +ram,n02412080 +bighorn,n02415577 +ibex,n02417914 +hartebeest,n02422106 +impala,n02422699 +gazelle,n02423022 +Arabian camel,n02437312 +llama,n02437616 +weasel,n02441942 +mink,n02442845 +polecat,n02443114 +black-footed ferret,n02443484 +otter,n02444819 +skunk,n02445715 +badger,n02447366 +armadillo,n02454379 +three-toed sloth,n02457408 +orangutan,n02480495 +gorilla,n02480855 +chimpanzee,n02481823 +gibbon,n02483362 +siamang,n02483708 +guenon,n02484975 +patas,n02486261 +baboon,n02486410 +macaque,n02487347 +langur,n02488291 +colobus,n02488702 +proboscis monkey,n02489166 +marmoset,n02490219 +capuchin,n02492035 +howler monkey,n02492660 +titi,n02493509 +spider monkey,n02493793 +squirrel monkey,n02494079 +Madagascar cat,n02497673 +indri,n02500267 +Indian elephant,n02504013 +African elephant,n02504458 +lesser panda,n02509815 +giant panda,n02510455 +barracouta,n02514041 +eel,n02526121 +coho,n02536864 +rock beauty,n02606052 +anemone fish,n02607072 +sturgeon,n02640242 +gar,n02641379 +lionfish,n02643566 +puffer,n02655020 +abacus,n02666196 +abaya,n02667093 +academic gown,n02669723 +accordion,n02672831 +acoustic guitar,n02676566 +aircraft carrier,n02687172 +airliner,n02690373 +airship,n02692877 +altar,n02699494 +ambulance,n02701002 +amphibian,n02704792 +analog clock,n02708093 +apiary,n02727426 +apron,n02730930 +ashcan,n02747177 +assault rifle,n02749479 +backpack,n02769748 +bakery,n02776631 +balance beam,n02777292 +balloon,n02782093 +ballpoint,n02783161 +Band Aid,n02786058 +banjo,n02787622 +bannister,n02788148 +barbell,n02790996 +barber chair,n02791124 +barbershop,n02791270 +barn,n02793495 +barometer,n02794156 +barrel,n02795169 +barrow,n02797295 +baseball,n02799071 +basketball,n02802426 +bassinet,n02804414 +bassoon,n02804610 +bathing cap,n02807133 +bath towel,n02808304 +bathtub,n02808440 +beach wagon,n02814533 +beacon,n02814860 +beaker,n02815834 +bearskin,n02817516 +beer bottle,n02823428 +beer glass,n02823750 +bell cote,n02825657 +bib,n02834397 +bicycle-built-for-two,n02835271 +bikini,n02837789 +binder,n02840245 +binoculars,n02841315 +birdhouse,n02843684 +boathouse,n02859443 +bobsled,n02860847 +bolo tie,n02865351 +bonnet,n02869837 +bookcase,n02870880 +bookshop,n02871525 +bottlecap,n02877765 +bow,n02879718 +bow tie,n02883205 +brass,n02892201 +brassiere,n02892767 +breakwater,n02894605 +breastplate,n02895154 +broom,n02906734 +bucket,n02909870 +buckle,n02910353 +bulletproof vest,n02916936 +bullet train,n02917067 +butcher shop,n02927161 +cab,n02930766 +caldron,n02939185 +candle,n02948072 +cannon,n02950826 +canoe,n02951358 +can opener,n02951585 +cardigan,n02963159 +car mirror,n02965783 +carousel,n02966193 +carpenter's kit,n02966687 +carton,n02971356 +car wheel,n02974003 +cash machine,n02977058 +cassette,n02978881 +cassette player,n02979186 +castle,n02980441 +catamaran,n02981792 +CD player,n02988304 +cello,n02992211 +cellular telephone,n02992529 +chain,n02999410 +chainlink fence,n03000134 +chain mail,n03000247 +chain saw,n03000684 +chest,n03014705 +chiffonier,n03016953 +chime,n03017168 +china cabinet,n03018349 +Christmas stocking,n03026506 +church,n03028079 +cinema,n03032252 +cleaver,n03041632 +cliff dwelling,n03042490 +cloak,n03045698 +clog,n03047690 +cocktail shaker,n03062245 +coffee mug,n03063599 +coffeepot,n03063689 +coil,n03065424 +combination lock,n03075370 +computer keyboard,n03085013 +confectionery,n03089624 +container ship,n03095699 +convertible,n03100240 +corkscrew,n03109150 +cornet,n03110669 +cowboy boot,n03124043 +cowboy hat,n03124170 +cradle,n03125729 +construction crane,n03126707 +crash helmet,n03127747 +crate,n03127925 +crib,n03131574 +Crock Pot,n03133878 +croquet ball,n03134739 +crutch,n03141823 +cuirass,n03146219 +dam,n03160309 +desk,n03179701 +desktop computer,n03180011 +dial telephone,n03187595 +diaper,n03188531 +digital clock,n03196217 +digital watch,n03197337 +dining table,n03201208 +dishrag,n03207743 +dishwasher,n03207941 +disk brake,n03208938 +dock,n03216828 +dogsled,n03218198 +dome,n03220513 +doormat,n03223299 +drilling platform,n03240683 +drum,n03249569 +drumstick,n03250847 +dumbbell,n03255030 +Dutch oven,n03259280 +electric fan,n03271574 +electric guitar,n03272010 +electric locomotive,n03272562 +entertainment center,n03290653 +envelope,n03291819 +espresso maker,n03297495 +face powder,n03314780 +feather boa,n03325584 +file,n03337140 +fireboat,n03344393 +fire engine,n03345487 +fire screen,n03347037 +flagpole,n03355925 +flute,n03372029 +folding chair,n03376595 +football helmet,n03379051 +forklift,n03384352 +fountain,n03388043 +fountain pen,n03388183 +four-poster,n03388549 +freight car,n03393912 +French horn,n03394916 +frying pan,n03400231 +fur coat,n03404251 +garbage truck,n03417042 +gasmask,n03424325 +gas pump,n03425413 +goblet,n03443371 +go-kart,n03444034 +golf ball,n03445777 +golfcart,n03445924 +gondola,n03447447 +gong,n03447721 +gown,n03450230 +grand piano,n03452741 +greenhouse,n03457902 +grille,n03459775 +grocery store,n03461385 +guillotine,n03467068 +hair slide,n03476684 +hair spray,n03476991 +half track,n03478589 +hammer,n03481172 +hamper,n03482405 +hand blower,n03483316 +hand-held computer,n03485407 +handkerchief,n03485794 +hard disc,n03492542 +harmonica,n03494278 +harp,n03495258 +harvester,n03496892 +hatchet,n03498962 +holster,n03527444 +home theater,n03529860 +honeycomb,n03530642 +hook,n03532672 +hoopskirt,n03534580 +horizontal bar,n03535780 +horse cart,n03538406 +hourglass,n03544143 +iPod,n03584254 +iron,n03584829 +jack-o'-lantern,n03590841 +jean,n03594734 +jeep,n03594945 +jersey,n03595614 +jigsaw puzzle,n03598930 +jinrikisha,n03599486 +joystick,n03602883 +kimono,n03617480 +knee pad,n03623198 +knot,n03627232 +lab coat,n03630383 +ladle,n03633091 +lampshade,n03637318 +laptop,n03642806 +lawn mower,n03649909 +lens cap,n03657121 +letter opener,n03658185 +library,n03661043 +lifeboat,n03662601 +lighter,n03666591 +limousine,n03670208 +liner,n03673027 +lipstick,n03676483 +Loafer,n03680355 +lotion,n03690938 +loudspeaker,n03691459 +loupe,n03692522 +lumbermill,n03697007 +magnetic compass,n03706229 +mailbag,n03709823 +mailbox,n03710193 +maillot,n03710637 +tank suit,n03710721 +manhole cover,n03717622 +maraca,n03720891 +marimba,n03721384 +mask,n03724870 +matchstick,n03729826 +maypole,n03733131 +maze,n03733281 +measuring cup,n03733805 +medicine chest,n03742115 +megalith,n03743016 +microphone,n03759954 +microwave,n03761084 +military uniform,n03763968 +milk can,n03764736 +minibus,n03769881 +miniskirt,n03770439 +minivan,n03770679 +missile,n03773504 +mitten,n03775071 +mixing bowl,n03775546 +mobile home,n03776460 +Model T,n03777568 +modem,n03777754 +monastery,n03781244 +monitor,n03782006 +moped,n03785016 +mortar,n03786901 +mortarboard,n03787032 +mosque,n03788195 +mosquito net,n03788365 +motor scooter,n03791053 +mountain bike,n03792782 +mountain tent,n03792972 +mouse,n03793489 +mousetrap,n03794056 +moving van,n03796401 +muzzle,n03803284 +nail,n03804744 +neck brace,n03814639 +necklace,n03814906 +nipple,n03825788 +notebook,n03832673 +obelisk,n03837869 +oboe,n03838899 +ocarina,n03840681 +odometer,n03841143 +oil filter,n03843555 +organ,n03854065 +oscilloscope,n03857828 +overskirt,n03866082 +oxcart,n03868242 +oxygen mask,n03868863 +packet,n03871628 +paddle,n03873416 +paddlewheel,n03874293 +padlock,n03874599 +paintbrush,n03876231 +pajama,n03877472 +palace,n03877845 +panpipe,n03884397 +paper towel,n03887697 +parachute,n03888257 +parallel bars,n03888605 +park bench,n03891251 +parking meter,n03891332 +passenger car,n03895866 +patio,n03899768 +pay-phone,n03902125 +pedestal,n03903868 +pencil box,n03908618 +pencil sharpener,n03908714 +perfume,n03916031 +Petri dish,n03920288 +photocopier,n03924679 +pick,n03929660 +pickelhaube,n03929855 +picket fence,n03930313 +pickup,n03930630 +pier,n03933933 +piggy bank,n03935335 +pill bottle,n03937543 +pillow,n03938244 +ping-pong ball,n03942813 +pinwheel,n03944341 +pirate,n03947888 +pitcher,n03950228 +plane,n03954731 +planetarium,n03956157 +plastic bag,n03958227 +plate rack,n03961711 +plow,n03967562 +plunger,n03970156 +Polaroid camera,n03976467 +pole,n03976657 +police van,n03977966 +poncho,n03980874 +pool table,n03982430 +pop bottle,n03983396 +pot,n03991062 +potter's wheel,n03992509 +power drill,n03995372 +prayer rug,n03998194 +printer,n04004767 +prison,n04005630 +projectile,n04008634 +projector,n04009552 +puck,n04019541 +punching bag,n04023962 +purse,n04026417 +quill,n04033901 +quilt,n04033995 +racer,n04037443 +racket,n04039381 +radiator,n04040759 +radio,n04041544 +radio telescope,n04044716 +rain barrel,n04049303 +recreational vehicle,n04065272 +reel,n04067472 +reflex camera,n04069434 +refrigerator,n04070727 +remote control,n04074963 +restaurant,n04081281 +revolver,n04086273 +rifle,n04090263 +rocking chair,n04099969 +rotisserie,n04111531 +rubber eraser,n04116512 +rugby ball,n04118538 +rule,n04118776 +running shoe,n04120489 +safe,n04125021 +safety pin,n04127249 +saltshaker,n04131690 +sandal,n04133789 +sarong,n04136333 +sax,n04141076 +scabbard,n04141327 +scale,n04141975 +school bus,n04146614 +schooner,n04147183 +scoreboard,n04149813 +screen,n04152593 +screw,n04153751 +screwdriver,n04154565 +seat belt,n04162706 +sewing machine,n04179913 +shield,n04192698 +shoe shop,n04200800 +shoji,n04201297 +shopping basket,n04204238 +shopping cart,n04204347 +shovel,n04208210 +shower cap,n04209133 +shower curtain,n04209239 +ski,n04228054 +ski mask,n04229816 +sleeping bag,n04235860 +slide rule,n04238763 +sliding door,n04239074 +slot,n04243546 +snorkel,n04251144 +snowmobile,n04252077 +snowplow,n04252225 +soap dispenser,n04254120 +soccer ball,n04254680 +sock,n04254777 +solar dish,n04258138 +sombrero,n04259630 +soup bowl,n04263257 +space bar,n04264628 +space heater,n04265275 +space shuttle,n04266014 +spatula,n04270147 +speedboat,n04273569 +spider web,n04275548 +spindle,n04277352 +sports car,n04285008 +spotlight,n04286575 +stage,n04296562 +steam locomotive,n04310018 +steel arch bridge,n04311004 +steel drum,n04311174 +stethoscope,n04317175 +stole,n04325704 +stone wall,n04326547 +stopwatch,n04328186 +stove,n04330267 +strainer,n04332243 +streetcar,n04335435 +stretcher,n04336792 +studio couch,n04344873 +stupa,n04346328 +submarine,n04347754 +suit,n04350905 +sundial,n04355338 +sunglass,n04355933 +sunglasses,n04356056 +sunscreen,n04357314 +suspension bridge,n04366367 +swab,n04367480 +sweatshirt,n04370456 +swimming trunks,n04371430 +swing,n04371774 +switch,n04372370 +syringe,n04376876 +table lamp,n04380533 +tank,n04389033 +tape player,n04392985 +teapot,n04398044 +teddy,n04399382 +television,n04404412 +tennis ball,n04409515 +thatch,n04417672 +theater curtain,n04418357 +thimble,n04423845 +thresher,n04428191 +throne,n04429376 +tile roof,n04435653 +toaster,n04442312 +tobacco shop,n04443257 +toilet seat,n04447861 +torch,n04456115 +totem pole,n04458633 +tow truck,n04461696 +toyshop,n04462240 +tractor,n04465501 +trailer truck,n04467665 +tray,n04476259 +trench coat,n04479046 +tricycle,n04482393 +trimaran,n04483307 +tripod,n04485082 +triumphal arch,n04486054 +trolleybus,n04487081 +trombone,n04487394 +tub,n04493381 +turnstile,n04501370 +typewriter keyboard,n04505470 +umbrella,n04507155 +unicycle,n04509417 +upright,n04515003 +vacuum,n04517823 +vase,n04522168 +vault,n04523525 +velvet,n04525038 +vending machine,n04525305 +vestment,n04532106 +viaduct,n04532670 +violin,n04536866 +volleyball,n04540053 +waffle iron,n04542943 +wall clock,n04548280 +wallet,n04548362 +wardrobe,n04550184 +warplane,n04552348 +washbasin,n04553703 +washer,n04554684 +water bottle,n04557648 +water jug,n04560804 +water tower,n04562935 +whiskey jug,n04579145 +whistle,n04579432 +wig,n04584207 +window screen,n04589890 +window shade,n04590129 +Windsor tie,n04591157 +wine bottle,n04591713 +wing,n04592741 +wok,n04596742 +wooden spoon,n04597913 +wool,n04599235 +worm fence,n04604644 +wreck,n04606251 +yawl,n04612504 +yurt,n04613696 +web site,n06359193 +comic book,n06596364 +crossword puzzle,n06785654 +street sign,n06794110 +traffic light,n06874185 +book jacket,n07248320 +menu,n07565083 +plate,n07579787 +guacamole,n07583066 +consomme,n07584110 +hot pot,n07590611 +trifle,n07613480 +ice cream,n07614500 +ice lolly,n07615774 +French loaf,n07684084 +bagel,n07693725 +pretzel,n07695742 +cheeseburger,n07697313 +hotdog,n07697537 +mashed potato,n07711569 +head cabbage,n07714571 +broccoli,n07714990 +cauliflower,n07715103 +zucchini,n07716358 +spaghetti squash,n07716906 +acorn squash,n07717410 +butternut squash,n07717556 +cucumber,n07718472 +artichoke,n07718747 +bell pepper,n07720875 +cardoon,n07730033 +mushroom,n07734744 +Granny Smith,n07742313 +strawberry,n07745940 +orange,n07747607 +lemon,n07749582 +fig,n07753113 +pineapple,n07753275 +banana,n07753592 +jackfruit,n07754684 +custard apple,n07760859 +pomegranate,n07768694 +hay,n07802026 +carbonara,n07831146 +chocolate sauce,n07836838 +dough,n07860988 +meat loaf,n07871810 +pizza,n07873807 +potpie,n07875152 +burrito,n07880968 +red wine,n07892512 +espresso,n07920052 +cup,n07930864 +eggnog,n07932039 +alp,n09193705 +bubble,n09229709 +cliff,n09246464 +coral reef,n09256479 +geyser,n09288635 +lakeside,n09332890 +promontory,n09399592 +sandbar,n09421951 +seashore,n09428293 +valley,n09468604 +volcano,n09472597 +ballplayer,n09835506 +groom,n10148035 +scuba diver,n10565667 +rapeseed,n11879895 +daisy,n11939491 +yellow lady's slipper,n12057211 +corn,n12144580 +acorn,n12267677 +hip,n12620546 +buckeye,n12768682 +coral fungus,n12985857 +agaric,n12998815 +gyromitra,n13037406 +stinkhorn,n13040303 +earthstar,n13044778 +hen-of-the-woods,n13052670 +bolete,n13054560 +ear,n13133613 +toilet tissue,n15075141 From ba299e8f52737448f1cbb9fffd25bf3d6e7ceef7 Mon Sep 17 00:00:00 2001 From: kbozas Date: Thu, 2 Dec 2021 17:39:17 +0000 Subject: [PATCH 14/23] support amp training for video classification models (#5023) * support amp training for video classification models * Removed extra empty line and used scaler instead of args.amp as function argument * apply formating to pass lint tests Co-authored-by: Konstantinos Bozas --- references/video_classification/train.py | 54 +++++++++--------------- 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/references/video_classification/train.py b/references/video_classification/train.py index 1f363f57dad..0cd88e8022f 100644 --- a/references/video_classification/train.py +++ b/references/video_classification/train.py @@ -12,19 +12,13 @@ from torch.utils.data.dataloader import default_collate from torchvision.datasets.samplers import DistributedSampler, UniformClipSampler, RandomClipSampler -try: - from apex import amp -except ImportError: - amp = None - - try: from torchvision.prototype import models as PM except ImportError: PM = None -def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq, apex=False): +def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq, scaler=None): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) @@ -34,16 +28,19 @@ def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, devi for video, target in metric_logger.log_every(data_loader, print_freq, header): start_time = time.time() video, target = video.to(device), target.to(device) - output = model(video) - loss = criterion(output, target) + with torch.cuda.amp.autocast(enabled=scaler is not None): + output = model(video) + loss = criterion(output, target) optimizer.zero_grad() - if apex: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() + + if scaler is not None: + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() else: loss.backward() - optimizer.step() + optimizer.step() acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) batch_size = video.shape[0] @@ -101,11 +98,6 @@ def collate_fn(batch): def main(args): if args.weights and PM is None: raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.") - if args.apex and amp is None: - raise RuntimeError( - "Failed to import apex. Please install apex from https://www.github.com/nvidia/apex " - "to enable mixed-precision training." - ) if args.output_dir: utils.mkdir(args.output_dir) @@ -224,9 +216,7 @@ def main(args): lr = args.lr * args.world_size optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay) - - if args.apex: - model, optimizer = amp.initialize(model, optimizer, opt_level=args.apex_opt_level) + scaler = torch.cuda.amp.GradScaler() if args.amp else None # convert scheduler to be per iteration, not per epoch, for warmup that lasts # between different epochs @@ -267,6 +257,8 @@ def main(args): optimizer.load_state_dict(checkpoint["optimizer"]) lr_scheduler.load_state_dict(checkpoint["lr_scheduler"]) args.start_epoch = checkpoint["epoch"] + 1 + if args.amp: + scaler.load_state_dict(checkpoint["scaler"]) if args.test_only: evaluate(model, criterion, data_loader_test, device=device) @@ -277,9 +269,7 @@ def main(args): for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) - train_one_epoch( - model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq, args.apex - ) + train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq, scaler) evaluate(model, criterion, data_loader_test, device=device) if args.output_dir: checkpoint = { @@ -289,6 +279,8 @@ def main(args): "epoch": epoch, "args": args, } + if args.amp: + checkpoint["scaler"] = scaler.state_dict() utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) @@ -363,17 +355,6 @@ def parse_args(): action="store_true", ) - # Mixed precision training parameters - parser.add_argument("--apex", action="store_true", help="Use apex for mixed precision training") - parser.add_argument( - "--apex-opt-level", - default="O1", - type=str, - help="For apex mixed precision training" - "O0 for FP32 training, O1 for mixed precision training." - "For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet", - ) - # distributed training parameters parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes") parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training") @@ -381,6 +362,9 @@ def parse_args(): # Prototype models only parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load") + # Mixed precision training parameters + parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training") + args = parser.parse_args() return args From dcf5dc8747b94f70d1a3f1557df440fb567af95d Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 3 Dec 2021 16:22:33 +0100 Subject: [PATCH 15/23] fix `fromfile` on windows (#4980) * exclude windows from mmap * add comment for windows * appease mypy --- .../prototype/datasets/utils/_internal.py | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/torchvision/prototype/datasets/utils/_internal.py b/torchvision/prototype/datasets/utils/_internal.py index 3db10183f68..c4b91b4a14b 100644 --- a/torchvision/prototype/datasets/utils/_internal.py +++ b/torchvision/prototype/datasets/utils/_internal.py @@ -8,6 +8,7 @@ import os.path import pathlib import pickle +import platform from typing import BinaryIO from typing import ( Sequence, @@ -260,6 +261,11 @@ def _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe: return dp +def _read_mutable_buffer_fallback(file: BinaryIO, count: int, item_size: int) -> bytearray: + # A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable + return bytearray(file.read(-1 if count == -1 else count * item_size)) + + def fromfile( file: BinaryIO, *, @@ -293,20 +299,24 @@ def fromfile( item_size = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8 np_dtype = byte_order + char + str(item_size) - # PyTorch does not support tensors with underlying read-only memory. In case - # - the file has a .fileno(), - # - the file was opened for updating, i.e. 'r+b' or 'w+b', - # - the file is seekable - # we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it to - # a mutable location afterwards. buffer: Union[memoryview, bytearray] - try: - buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :] - # Reading from the memoryview does not advance the file cursor, so we have to do it manually. - file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR)) - except (PermissionError, io.UnsupportedOperation): - # A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable - buffer = bytearray(file.read(-1 if count == -1 else count * item_size)) + if platform.system() != "Windows": + # PyTorch does not support tensors with underlying read-only memory. In case + # - the file has a .fileno(), + # - the file was opened for updating, i.e. 'r+b' or 'w+b', + # - the file is seekable + # we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it + # to a mutable location afterwards. + try: + buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :] + # Reading from the memoryview does not advance the file cursor, so we have to do it manually. + file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR)) + except (PermissionError, io.UnsupportedOperation): + buffer = _read_mutable_buffer_fallback(file, count, item_size) + else: + # On Windows just trying to call mmap.mmap() on a file that does not support it, may corrupt the internal state + # so no data can be read afterwards. Thus, we simply ignore the possible speed-up. + buffer = _read_mutable_buffer_fallback(file, count, item_size) # We cannot use torch.frombuffer() directly, since it only supports the native byte order of the system. Thus, we # read the data with np.frombuffer() with the correct byte order and convert it to the native one with the From a3d87195d4dac60e7bbed711a5cc3f8db3378d64 Mon Sep 17 00:00:00 2001 From: Prabhat Roy Date: Sun, 5 Dec 2021 12:15:47 +0000 Subject: [PATCH 16/23] Skip test_roi_align_aligned (#5029) --- test/test_onnx.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_onnx.py b/test/test_onnx.py index 830699ab5ee..b49e0e24c8e 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -141,6 +141,7 @@ def test_roi_align(self): model = ops.RoIAlign((5, 5), 1, -1) self.run_model(model, [(x, single_roi)]) + @pytest.mark.skip(reason="ROIAlign with aligned=True is not supported in ONNX, but will be supported in opset 16.") def test_roi_align_aligned(self): x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32) From 9b57de6c6a60082cf54a414ead5b5e68ccb44010 Mon Sep 17 00:00:00 2001 From: Prabhat Roy Date: Sun, 5 Dec 2021 22:02:28 +0000 Subject: [PATCH 17/23] Add missing space in error message (#5031) --- torchvision/io/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py index 382e06fb4f2..8ee832f43d7 100644 --- a/torchvision/io/__init__.py +++ b/torchvision/io/__init__.py @@ -110,7 +110,7 @@ def __init__(self, path: str, stream: str = "video", num_threads: int = 0) -> No raise RuntimeError( "Not compiled with video_reader support, " + "to enable video_reader support, please install " - + "ffmpeg (version 4.2 is currently supported) and" + + "ffmpeg (version 4.2 is currently supported) and " + "build torchvision from source." ) self._c = torch.classes.torchvision.Video(path, stream, num_threads) From 01ffb3ae9f9cc3a445bcaabf787251a05e67c7a7 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 6 Dec 2021 11:26:27 +0000 Subject: [PATCH 18/23] Add RAFT model for optical flow (#5022) --- docs/source/models.rst | 15 +- .../ModelTester.test_raft_large_expect.pkl | Bin 0 -> 46827 bytes .../ModelTester.test_raft_small_expect.pkl | Bin 0 -> 46827 bytes test/test_models.py | 35 +- torchvision/models/__init__.py | 1 + torchvision/models/optical_flow/__init__.py | 1 + torchvision/models/optical_flow/_utils.py | 45 ++ torchvision/models/optical_flow/raft.py | 659 ++++++++++++++++++ 8 files changed, 752 insertions(+), 4 deletions(-) create mode 100644 test/expect/ModelTester.test_raft_large_expect.pkl create mode 100644 test/expect/ModelTester.test_raft_small_expect.pkl create mode 100644 torchvision/models/optical_flow/__init__.py create mode 100644 torchvision/models/optical_flow/_utils.py create mode 100644 torchvision/models/optical_flow/raft.py diff --git a/docs/source/models.rst b/docs/source/models.rst index dbb1400e11e..ee8503a0857 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -7,7 +7,7 @@ Models and pre-trained weights The ``torchvision.models`` subpackage contains definitions of models for addressing different tasks, including: image classification, pixelwise semantic segmentation, object detection, instance segmentation, person -keypoint detection and video classification. +keypoint detection, video classification, and optical flow. .. note :: Backward compatibility is guaranteed for loading a serialized @@ -798,3 +798,16 @@ ResNet (2+1)D :template: function.rst torchvision.models.video.r2plus1d_18 + +Optical flow +============ + +Raft +---- + +.. autosummary:: + :toctree: generated/ + :template: function.rst + + torchvision.models.optical_flow.raft_large + torchvision.models.optical_flow.raft_small diff --git a/test/expect/ModelTester.test_raft_large_expect.pkl b/test/expect/ModelTester.test_raft_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a6aad285f59c5159ce39affbddd71a4d49c72ba2 GIT binary patch literal 46827 zcmZ^~byQSe{4R`2*%Bfu0xBRPC<>yW?B`AZED*QN;@*Y93G-ebG#+?fMp?Ebe`P*3OG?!r~$*Y4T9&2?9wwL3R$*t5-b{8F(yFL3X_3S%-$$SMx=ZqSS6yfgw)0X^S zKluQeVgK7FpI|(9rrQ7SbN_!J_=vpm>x3`5DtsVg>I+4^z3|!T3)Pan_;+D1z9fmD zc`y~ZRVhecDnfd2DrWnMP_Qu-8xD$K+n$Jo#fhl+lYmYAlhD~C2_{bxp*%JTU-u?q z^~MbRmy(Xdt?5YWpMiMC3}lVSfZ^&)BsOMXc3c{=(^F8Bl8X4uR9MNS!gQ?&>x@KL zJXM4xKBf4bQG^_nASeUIeH3RJ>10!>PNe(6A}NuKA^C z9#)Eg9;L$dB{+Av1k(@R!;d4y*s`Yu`;uE>{-hbs7A;sbwHfbhn(ldW zVX`j^$JK9Qi{5pF%4Olkrc5{~-o!iUbsP#BhHrg`L3gDFDuRc@smE|!%o~o9rNhzW zGz?eY_+qo$Uc}D!hY9z^0t!v2SV`UhGfBAX^dstr6k( zp;R~u-}j)^0xm{LI9Ha8xWGgN9z2`_9ju^~GHy*FNmtw9EsvrN1= zkco4(8Q6R;1Ae|~FgY(m=8ZJC3i!N}OM~)(G@LgT;b~tHa{HIUXIUwn`juh*pJFUW zDP9$qVcWA(oS9sPOUGKEFs}t+=UVYj;E=)a7RbDB#Rj`pJlHNA$KAy2{Ojl^cN6DE zWkK$Uz?lp-hgVU)IP<_Cxx#bSTctu?oQgY>MHqcUgnWMy6vwCHr_dL|1RS0x z;_0L$v<*qZtF1{06?Elgl88plWE9NHgvVL|hvOOWcgnz`W0@%Zl>v>246J>biJ|L7 zC>FH!<##G{@28^3A`QL;spy<5Lfm{2Zp|si&Hg1&k1NH^5hd8*Sc)~1N}zh97*j1u zf!1$WbgmgsmbK#Iz!pq8+=}7DTT$83jMA%Z@c42A&zE1vO#zR9Rar22bQ3;*v*28H z10KJ#5EL>Dd-kbgW2y!oM+`@J?-6jHHv$t%hGNnFVQ_Z$!@{#Zs0iJQLBsvwHO~)* zIezdLG&f%A2ZMzo7;H=Zf1A^mR2&fSI$kZpog)G^{;Aj{Ny0w?pFR5$@kS{LclHRJ zDM^BbeiHNqd}jB{M9h&4Tse^ei=mltc$9%jgReujFar*2GGTpJ1ha@#jP^`L;M-JW zxrxwUFC8}pZDreu@W`eNxz++MEk&5CS^_VDOJ<1zR_jXeqpA#EcUv$ZtpzS?nqe5; z0&Tq(d`}j9C#VH`N47z4T_*a)XW{R}ENpyp9lgS@Lr>-=^1fu@99_o{(_v_|*TC%D zp;&flC`QT;M?ilKjM5s0w130!%5*Q@@x2(e!ViTXd|`Us7p>FxBIW2_997#3g&!h_ zbW_n*o(jM26lBd##qX+A^xi2#(S=m}Ihc&DB}vfsOhW3sM6B+agtQ-tXb?Q8pU{^s z&BU|h3>;Pxp3^l09XtcGo@Su`^h|tNo`KuhX;>a8@aL)shZ9rb-JS{`!S_=>iVzi- zifyy*!FNXqDkhcUL|8GFx|U#*X9;xXl)!ydDW;dSqT8$-#_r)+h!}Srxp!~CH!=%Fw=)o!B4Bjo2CmG^z=0W>cyVSZG8br|?yWiw-&KQ` z%rGSE)j&hTa9s8AL&r2bO!TqOz+J(k z{~gGH9A#kSwG50?&A>GQXa5sv=oE5s$o_QvJS)QZGlKtr7PwiTf~ONoQQ%pI_s2`H zcu5(u1pZ7pUW^S!rBIq)ihlE2q3qI%+GQ;m_p}9RCCylHz8SVrE!a4{6{en9knhNX zhQOPSDK}9!JqtPY*P-Bl9VG&{!Y>X*-|i9ctQ?NiBO~y|bvRTW4M(qzq1ZckI37Rq z!-?ZQ2*~ll{j9wh>+X+%Wqz16+Ygs-?M3%K5!#lgVs3Q`Dwd>Tn6(HlTZBGc5ljW$ znK=r1)tZPRtwdbaOU4{=5_UBu!u)9>Qoba@WkDt`R;A<3>2&lr%fu^pA=jT}z`ai< z+{`oaI3Nvi0yk>~%r8h%5!;c9x)CC%)`;MzEW#=GQjD5b3insVc)7L=Ht$L>S6Yl? z?~7r$tpo!+nz7Zh8PZ$d@H43mD>YkTHMbcnb~U4ENehMk5o)>5+Q-7B5$Gy4Q_{^*>Dimw+~}{OehR`97OEiP#hi-iV4d@g{%)o0}sMn z#rK%u_DY2OMA_0@9$0I;JUhw`4$Xjy}*-f!1{@9J^ zyl&h`k)ivk-B{G#g(FQ}n0uuY(LvqNzL0=P?=In8LlUNrNPt;<0v6dOVAb~|P{$>h zl>cPjBYv`B{eG|tix#$RPBT0A?JK*pyM-zBYGPK?s}cXE5>tz+v2Sn{YQw6qtfCSf z*J@yNq8eXxyU`|KvV4{d{hQeh^>xBAryHC9>qerg43#|={Iww!HB-{iGgpMYerX7O zErNN42#bWQzGiv|Jq!-Q_){q2rX2*UIfP4#Ls6j-3aeFzk(m>M>WzUIp&Wq4ErB@w zUkJAL3W5EqAYAVV#_rE?DBmB8GIkO7`d&hyR~&x1#^a^YMO>I22b1G6bk(2>jYi$D zsOiSK*lrB*mZ8?PPB=Qs(D4}wxX_V+Z(#|zy*&=DotJRnL;?66Pc{vzW2p znM+nPyV}2vi5tGLwLMyx-|VkU{JN28#8jcprv@?is*zJ!g#|U$c+pq|wNq7C?pTc? z0gu(+x*^%#4Zo!_bTpzH2@AT>&!ZbIySnh>vw+Dj5q`>upz&S=ufu5ws1h)X5nHaQ+b<+D&c$UF#H?NB^(4#mvmV5q$a!X6Hu zYZq+Sbi-jqH<}*{_yi~5YH=K%yi0)p<^)7ujKiqomtfqKfN65yS^k|*EGhgGv-_`= zW!(M7KKL}T;`c4g;mdb+<#!Df>T2-GzY;<3Dj}y)joFS>c)GqCKmMzM-qtRh59mU_ zE!`NgSB6xD<{2nM3odqJV_+8+3$;oD7vXfJP|vLs>bXP_=w2!Y*rs9jnpE^Xd=UD7 z4k08v6rWT>VW)Wr$NwGxJwJ@9r~??Z$sd=;1!KnR5JY$dL-tWHj^_uV%r6AmvGU#iPJK9`_f<;$qZA{8PMuDTVPE|KRFeA_t<9+yQxnF!^FL$Nd`6h~bTA;CWs zXD%Luo;Vb)_J=Tb(LvgYy{XE(gsx?qyog(l%y^QXy>{KI%C$R$8^ z&1HDFTtcO7JOaPO10jj{GAkYtGn!e`lxDVgb~76_{1ems`;C2RX<}LXKC?5un^;R_ zH7ew5P+eLLjjC$IjI4&5R}D_@twCOH6=d!T96i>F%`0W7Tj1=&hEA*$eCM~h3?tUe~O?coF9|^&v-!?V-^W~ia3ZbpAW)x%OUi8dI+kGp%BG|LQ!~s z@!3Jln-+lgPQkc0GYI$o1q)-&Kx`cxfSn@(ace~|ENm~JQ$8MjXU9UVB_2}EczCS1 z2ra?4^;P3>P|*6#O)}K(*oC2j7Yu$ULn|!1(7SgRG6l>&hsR>`@dPv+O2o#FL^zpV zLa*y_D4L&$nKJR%IIoet{rr_>HZ(Im%|@pB_&w9ZduIHym9^BgvOw(`%rvY*iCrZI znN>rvs0y>4t1&gRN*I?`LPoV4a~)*po}LVioghPpddpC{z^5~D-7xI#LN2A@=8!a8 zcq>AW7eY<OvqgwFLe+1;Kc85VpJx#*_2$SX_J_A&cVCSbstAo;V!39*g|)I2;R% z!w^du+H}4P^Qjxzy}IBW+>P#vF8IsHP}Mm>Z_^S{`u7q}<_g|^EMCa7GyQ{Lm)0b6h){@V$u{N*dW8<#y(LrI{VdZDB*Jt5G+v3X-}?{O+p8zq{3V zJ+}%L$yK&8XlxoZ2Sp+p#4UJ6cwdZyq# zFH=#^M3@;W;G>v^#JEEk7=H+-s)QW-E9gncviAZ$3)Y81bH^drxCUX)#r>dV`?2*@ z5Zvnh@m)O_bG8PeP&Eh*7|)e-W2bs}!qCE}$_0xoa06Ci5y~ zI(}J~<}2yZC=FfGc&AJ2RdgvfNtgCd&?Wgw9qPF@8tHZ?(S9i!hG(PEum4GGN{R*z zi^Bc(C{!&S3&R%^pfcV7Plg%d*;PX*RU6@W+Zf1)j6>s-`^p7*E4cbx0e&iLggu((=(pk*T^eAB;Jo4DM)FKw19flqW;n2+v zhehvjd{m9V2Du3I3qFoyuW+1J>chTI8^pZjR9I<|B6D(AWa|g_XT#rL{`5CQ^w) z#;QTm8OZ5YENmE^yP8I1;lBF)aSgK3IGNUmxKMLy|qfm1v8jViT z_;oW1c73AY_Bk5=)W)NB{aDD{9)qxlW1((fh~8O-2pl{H!*7pAXGbyHysDC|98ObAj8tb z)R=S8Vu|;>`I4ZT#gf_1b0w#gZ6xw)vm}F0EtM#iEt0qtDpBEb73vUpBez4D>R&07 z$|hxcX{k*24=R(jz?G;N9csU~?nn`(abWRHCJ zt<7@5Iyq({H(zqQWw}J}^cuf7YeS54!YKLyr~-{R?l6CYMLiI5{R7 z9@SB}{y7R0Pe!3rBN}%npTx^P(b&Ag2y254q1s~{#J>zN{iFe=&N4!UyAiUj4RFP; znziTNW9=ptOlmSATOI+g6Jf~C3`g4K6KH)N2FthMn7lh2w-*j&2Q*Zeddy$T!U8$gW6=;+ za=68^XY){|{kP6CA$y}_La%j_n3PEppCz`E(I*#4jNjWxd|$4Txb(4;xJ^{1d5e_k z&Q}%s`AdcJQ1BD$O6CFEe6Nr#bSq_! z?Zs^2h$5CWte6c{2#4vK2((-ZLuGLUra43)|6e#N5P>an;jnxD#`1N50yB>r%eEaI z!ZgahT1xu&WdGjxWtHV0Ec3U{ki5YfiC)}7$;8Nwl3?qFlKfaZN&FHAiJI6}GNV$N zygHR>%Nu1HC3sGypE8-rsL-Ws6|x$xOydPDO?)w$LS%JmiGYhy(`X8~t3%}zb;;dB zhtBy%;nS}ugq@CpM!%Ez_)^f;v1t4!c=gNu(Re*-EYv0$L9$XG=WGp8e#-#kevXCN z7z3oX7+{z~Av@{)fayh)vMQ(h?5WyAcA~zFxyzQba}yu3CFw=%!uCQY>RG`429&V) zz!G-rNhvE(DP%`Q_n7JZ2$TT(N17^icb9NGEwfSwv6xHUmOqT!EcfhmmP~lPLQ>>mAyJ)TE1BKlDET_UM)LXM zd`WBRJjtR^WeVa-G|pCu?vE7svt5~b3wa+Y_;u4<6}n%oL$1a;v^rOZ%C?TC-kv(t z^hcKzTXg6}ybg7QM#0x08aD@>#1xw-Jl++BLE>nr2)Kn@iN>V)W8rYc5cI$Z4uY~cz+~^_Gc(O1rZ41r@#F~b=@A&*CjuE6VL0>i1dh3c<5Fcf7SHU- zIxar9RLW6g8V(JXFuJF zOIJ$zwXc_aJ8VvsPo~nwFH_0&^i(okZcaxRS&+e83#t*0wHY=vXs``E-D6EtX4+8Q z3Tt{LV?$||ZAg8wHF;hS!wnXOaZ^vAC_fB$ggjaCGYr$GgyY)sF!;^tV2A(wU^`QP zv0u0Uu)D*4vyHC5n2A^h+57&mu%t=sd8#(MvR;#&QPF2L>$O;s`dGH;yg7THHJaUM z{v=-D`B!`)v{QWj!e_DD*miNF_7icn>tC_ygg7<8*S|bxi+3c2cBa6E6 zfoX65%<_(XVoN(+`JE#>c&)!14>;n^^{PC$x~VJQGjJQ9aL<*KZh3Qz<3Qf{ygIpcl)mY2$et zVlg(f?xz)fbh4(8ldY-wi4Bdnv8L?pVHi{%j)L|ul=Tin>E1BtAq>v05pbOm28Yi- z*v#`iP*TzZnSKAU1Al(9-mUFy=4=_bYPGZNXGgQIoyKfY&xve`$7nWUoeq1xax|MV zV>qRqC&iL_b2iFutxT;`6DxV@Q%5A zHZjqi&rGb;$ZU(gu&YBpv+#i)Jani#FH3acEBo%^zK*Wk(!rCDsMx`OA9UebKUZ^& ztE>6r(W|-1`(=E!cr_nTyMljTx|%O~WXBH)m~@&`8<}SCO1)t0*#o3%( zE6piqsttAdSX0SiYg%q==%vOTyr05e~1UF!XCWf$za# z$Vm%BrqHkY={Hk7{f{Y+kU`G6f2?xOZ-&(}*xB-zxjFn}@s%d*So}D4X2>Kq=$syF z-KWEHx((R9h2z+eu2D?+zap_)|Ci!LGv10d*VKt!7S)O)-oF!fWVMK8s7+iH@t)0( zXk-avzOc~Xk4!1&6Wf~J#CpVjWyQicTr*}XzhJ+EPasb|{rGl%`qp+H(d5aQi3j%> z@5V1YS-~UVz*iR9@gS!)eCA|(zG3|uez9#8e;K!ed(=!Lzs=L=SCK|1%Z~iXPX70YB@ORj*B$?{aG`JG(Ka^O`47A3Z@|9&)n*x| zG+F0!U3SXdl-*ro%yd>7vh_PBv#Dzu#iv!iic$Sdyq&!icU9JjuZfG{W7kv)u(PJ19zxq~O`CG9XwWljVt&^2b&eHT9}`QD zTk%$ttxPduGoo~viIpK6cv`^c-)Q#g)EE{k`7RcnXb=x<>>>Fy@Risp`JH%b^AmBb z^%wE1+)v`p-;J!JwUITNzGR1Ie_%(lzA)Fk&&+Vy2NqWc7p6uj1C#~QwN7!-IYDYe^b1k1PUCBFQ?6}T%N8akYihCWkpx8PK zlDTO?L;Ni$LCu0h<`%@XrqK^;b2=GlMY}J}qcug=2lBqxr%K=z5*NIQKBD5xnTtzA)r|{>Qew`pFjk`OB77w=(&L4)$u-U-o9{ zA7&fX&c@a1vuo!@u(FY6Eb#jncA)nN7EwQuDJq+=9l83f+5VweW2lTo^6IVlTR^k8 z-=!|`-1gVvoYSAh8i8$M&o#|#$L6o>Q}sKR?fsG6scK@?$)DM6={q)6Md<(F#oO1p z^Xm_{^2yJ)@cMcWUa!83%U<2g)8Fn93WnvpSEB<@YhJ^5-gDw^JDs@PSO>oItv%0W z%lX163zGdXl`OVRr5t8K8yn22+1`TsO%-@#G?iYIS<#}NHk5G0ni>UPE}3Ob?*nZ} z#@U(%4Yi?>c_+a7hGT}{IqEv$&=)+XUhwGH-C@v-48xU@AB+|MWkZksV%hII+1%y7 znXJb@c7I_zd$s*HyLDKfiAIiRci-r<)u!WE&t7BL*Sosx$Z$P2%V7lT82(IbY+5J& z*RM_dU8PLSwpNP`bAE`=AMX;2+T|r527F)v3qP}ceHxkJ%ulSK?juu}_>SEW@~qyb zk?GZM;)gf7^P8J?@>%ISxUu&RE@!r#D=B(#?a7`zYU4WoW7-<7T5ikL<5u$6OLkm2 zU?tD`yN0XhtmZc-SWx|gsr2RORFWAmjSL@}Q&NOESsgPcZA%Lp^v0Te?W{;?nH9a5 zH;)<&tZCa98`@B6O>wiWX^U()+PhB(>**&TAN4<)3d2%izF|T33Ah~ygUo?{?C{`U z?DpqocDUp(t1$b=qI5gioe6)KZ|{FBY}-U;&^??5DGg?q!?jtDwZ^Ph{&;2@Fpjx& z7&4884dRPKYsH=)yT$Hbeu^~yng8(*Y?kLowo%!QKPcPDw@Wrag_jW7a{9p?o73;#M-d)XyC9dLG zlI8qxPX~TF%z@7|UdeYQ*zq6x*7AP~?CH}%JDL}5N1Igb2>Et&)6jwTZMCPP2kj|X z-k17tA1dweCS{Qixy|vW%sOuhN%bX%RBzhsCPz3XN8^9V(xz%T+Ug@mzb)mcq)3j6 zE#&A)N&%)i7GUl00{rM%0NI-b$URwr8TN&6;RV7xq91fKeetV)H=dgNK)rMivIqMi zvw1fvLcB4qID_4>y~!+YXRvwInXDA)EH5^V&9+Hp{^!%!;Luh!KJ_~r68?vEEp1~9 z>VL7oYv0&~)lE!a?FXygc$r7-y37ZSzsjwqUgg`1uJX^R$z0m^GVhWm^L_HMye%S@ z?|&G_U2uu#6kp;4`X})6@_0VPIgxLiYflHp+R@47_SBJXM=B}y6n4{&=Bqf6wweR+ z6mL@3^reDgAIcr!O=V(lvZ(N;9ooKh$J?7~?d7O+lRWhn$x+V`Ihr{`j%+;SNOhGw zRS%Y<`4$D(8e520i3QM?E5v8#{<3L9x6VwDenusgcn*}~Ws*3Yhk z#eV(9YAxH@CA&tZUe>~lwqE6ivRC=a-^u(%Vk&>qmdyY0t9+NtH9prlncv@YiO-rD z&ttSM@y#zU@9x= z^y-9wi;IB625;(@>_e{(deQD~FFLx?ht#fl)9NE~lrve5tlr6yx~m*Xm>jLhk|U>c za-@5@t}*X(Po&*e1ss4|^x)@@=w4$bVKP>1g6 z*TPiqv@p|zAMD%UHWug9%6dg5^R<(%@SHza`R>7&`6;<8{O7l8eEqQ$-goI0t~x%R zFJG6$M@A=bvkeJ+rBfnrOufWoV&eEBc>$-LjugAdk*f;`UfjWMX;KgG>BfLIUsU8_%&TfrrN=@O_i*>E~1j(%k1jW&$1uXW7&AyLMEc?m(TE zf)^QkQ&5yQZR_+Vzk6O3pXE(6*9g8d%bRTMy=mbhIeHo(N1MLOlFMCL5{ZR#b2(}$ zlqKm;Il3HJfaoCw7`Cbq^Trh7^z{N1?I^?-;ka~W0S3hFhLhME74?2tu*XkWzxTr9 z@jlpS?F;RgJ=lHz8jJj%%3cjfV?Q^guoqQnY?J*}Hfz6#4aiJq{vUp_vz0CEV%S%< zPWwAsr`yg}pA-1g`IY@W_=7o2O5v0AuJ9rr$FC$L@k=LDxcl6z{FG)gUvu~x*XS9? z(_<6)%qL0QYFRSBrxwq%C71ZUHOYL0-X(4u?m*_r4rEtjPq70WXwqy4>Q`k?tt%Xe z`q)!oPcPE7@}h0eys7V00V7{83fSUJonw7S{k9jmWy?|GS6RC4Cr5+aWJ$lL94$AN zqi^N1lx`+R<#P*gwOjCPWV>vRvnNipeW);@V ze)VrPYEM~% z>`9?+4b4#YrfLC;Vl5wuh2&)~(EVMk|Z(YGqnNu1&2;nXi+1R^nAY+2I@z7`C7QeYy&;s81ox1btc66kaj zV$sglOMbBTCtKLyfr8dHwX#X~zcb@Mm-)V#DZF6hHU9cSDmQh$%w@hN@|`6~{OI}1 z+@MR)lS%@2xg5uh3=?^db`rNc5YMlxCGc%Wllb5|ce1Kx z{FKN4d&WOHJm;tDp7Y03p7S0)&$*Y?D_%LngKi&nBO2^MU;N!kWvT})2zRHfPFjZ0){!B!AuNPOj1HmKhT1f6tTycsY17-sTI9^M>G&pX zIx9PpHZIYk9*e5*(zObGZ7Y#EU0B1uQH6KEDiI>=6H%+Kf?~}X1eKmcpO|ylKQjh$ zDQB_LIR<81&taMDSv+1f5yy^9h26zT*geh^&$moMMe8JCzquLCG)#nK*$ zUD!Mxi-YHM;9{qP-rM!CPDvNHZb~?rin;vwyL?+^K3C*9LMLK70 z;6aXSLMVS=2%RkrrYiy_@74y>j@n>)6&ONU{X^*AL2dFmtWCjOi_)95=tr@@op3FB zZmUf{ZfnsD-71`VSB2s|RVa(9M4@dJoc*g{c)1Dz1yv|YKa2bmF{p2e!5f)#nD;pb zKE`J;;LusztUrq#0tU8=O>p)0WW0KA4DIa`;dIj!GTJ6M{mv9Q3&-Ht4;_ex8X#!2 zE`sjp!M#Ht*=KdpHb@sMjqdV!GxE5LmW20soXab{^0@w5&X4Cx`O5e_zWZkbKl1b? zcii}ryB>bYZ>GKEyO+J-gB2TizUMRk!+IzEcHT)pQrzhKBX??$3f$S}PES-lXz*1} z(hxAvyB5IKM#Y7 zb8y{q4*tn8P*F0$;d$e+;=jr0Z)Xa-xh7CdFhkkI@mOA90;`xY!rlcvEV(fXc3bo? zzlSaY0`(w2S`XiX$6=bzE#6Z(haXSi{BT?z53#t-ub$51`OkBBy(8zQWp8*;(Hp+N z>5(7=6 zvv67vgO72hhSl!BD@;+MGYJt(Cg5G-Wc0JwgX=^+yvWx@uVZ7uzK(%m z)mRvGjuG~F>EixjDW9p5$1CP!^N2?|ys@71f&*f%U6RAs&6aTK#HakY&MV&Y<0~F9 z6NH{4n0IUg43N#hoJQk$*^*(ZBYXox3i`gu^##~#8OvIku=4yFNf zgQ@>q0iTH>B#I6uhu(r$?+T$)=Y#3Gi#9F!qe(9{waL0tlS)5nQi({5%%*5jQim2@ z39dx1-PPzlzX~GnYTOqzHNC46;-giVD`?F#>kLXK$Drc(S;U`=!Exa{VRa0$51fNX zz!?}wOprc$BDSoagdAbrHF5Gpba%(vS7_5;UgW^lV zy2w|39K9%LEnUj{J&^DVQ**dYjhKHH&S?kd)BES}r}BkdFX$P6U-XKr{(Qj?l)T}y zvtIL}_?KKi<0aqt=_wC6?ny^8h3sl^Bejnnk&pi*?DdgI_t8UczCzxvIhtQiD zA+)hQn97CouYW^mxp01Zzi@QaqDX&jy0%^LXKigdkgGTZOZ6Rp?kIc#WX1cJs5i+W#ChC!WQT=jTv6=`6|w%-m-i zq5mlpB#S3wWXvSgUKQ|pGYQ8_O`(`Q8QTWyL1CFL#$VCHS$$nJx{pO!vL2Ss*F}7Y zF7y`M<%c43xn#AJKeoKXYY*q~B}?=8=0BW&_$lRA1Z{1x6g+v$6JC=4oIBAguAlUr zPk;TKU+!+;4bwbm5_cz)LU(G?+DUdF-09*ecdAHnC)tZ0L_)4#E()emUxTUHDwq=f z2^zZzMnQ76tXLIpcefaqD6Hxw5g|n(~{F#^lEh#f&@PvvY`rj ziZ!rWUJbv@O7t@j{CZs#5`tqetThHM+s|O**cfbi6N97!=V9m}Xl~j$Oo}pr-hoM| z^fyMg-6U9uO)%iADc)@`7I--cqn?bx__w1`AsPk0E?op3&_~NYT}15DL%r!(c-H0d zvhSQ<-kQ(vJ}KmLPUi9s``i3WbS`hbpUXv;p7Yp0FZk|^$jvva`rTsxHiSc}Z@j%eEH052$5cJ2Xxv++LvGvFbATqY+yd`3=ss9jEa)>T1Ta7kXeD?~wRVXh$U z8Kfxf-0DVK)pk(g7dP5}#f?;VyHTB{8*TXRCXB`0seWWIEqoJ1)#HOn!#s%2BbeUZ z2_iSMU~*CkCaVrZTHt3y7k?R2m8}t7n_)z_U`2DZu&cLVPPJz?{L6XkQYIC}B-M z;&v3K9gM__9#N32j>fB`2uyc&!or<)aF=(&axHt@(R0Fr2uH-Pa)R+SNAy|th%esr zkT>`~;72!9bE*1$?(^*-S9N*7XW5o>joS*+lkerFb2JpBEtBP>OUo3bhvq0qqmIZ+ zhxJyJKANzDtarFmc9I*-p6f=HMecNOy*oM1a-&gC+-ReLz?~k!v|wKly%K!7$T5f< z<^|EsCqZO!Gl(?S8`1Yq2;X9Mc~l# zI(S^Kh3=v{-1t<3?7p=K^C*C)z?(Y)ZzRH4Kk{upR$j}8*K1+Uv40`v)^h8F><9;UCpVG_#T~8(Gd@ynDzW-G0P>Wmj3;^!K(K6)1YpqWB=%yeo(<-w&cbqk`#P zj}V%>Dwvchmq6>oz$wl3e>XM9Th_(^!QZ*u<$wu^4;MImPhIC|AJx<5fqxN78 zzI>1do7ZCXNk`m|Sc|v}CmhYY z$6pmb;LxMVuQ{*MzP#;!$HlLNH1)*?uEEj)kOLv3<3 zZ&7;4Wh5p1l;1<{7xswHyh)1SG~n!Z7_%rJ;Xe-S+Bc@QaC z1yMv`5N#_7qK88Sf1YDVXXhJIw4))p_B5pW-{UAo@SX9kUlE-MuvOz`CM!hBGBR6a7NEFc!fn^ zU$&sLlMyHzABkpRpU9?-Q5d~B3VXLW!NI^01M=45)46pxAacZozD}_5Sc~Gj4w$m0 zmY0=PaF+jor<^M152jRc<%TL=9bV2uE2{b18aXLDq$us#p&( zRzZ6Asho6Gt~)LMyMx-wchJ5G?iBJ*@F0OtmDX;wG}D!u@`9*#U@)mZ3!?VFL8O)? zc=HLNb}kE|4lQ9URxysOD-5aqh7skj5O6tdNS{_3(pMqZ`foL&)pzSKba5?Irqtqt zdL6b}*1_&W9o`8#n^<3i4%Gr|(-!6vdlus3vjQwxmXALJ3egx=h@#3uq<2JMX>cTL zGNZr+O?|r;iHS=ik#{2!%X&tkWZyb$dA}C+ZuVGaxE5#cIiY;3111l2Kzi#MY!17} zck7pPyP1#pkahQYkj_K?BC4FP{9eJ$S|0Lm4FYGrDM;lNKf8J=*I*A@kg*B4-EvaB-tFp$aGG4sDjM81y{YN7~O zxJN@{`9K(d3K%IWNME-pNIRy>OHT(WNM8+8lny_lAbmVRQM#~L zQF`F{6goH2jLeLukV&{1Wk#D(>6)puZGjmbIc-M6M{85f1ua_Vt3@AXX;J3U5%jE4 z7=+B)zt(OX8)+Ew^s*uoUetwj|GEmY4GnT1Gtcv%DV{Vj1Fl#`lQB|JzOGd9x2|;MA!Dh_orzL; zO=GE?*F@3jL{-P4K3OrwH1CJy%=(S%i(rp5$;s3!~~s{ zs8C*l=G=vterSN?Z@Zaf_zz9V9qDMv{#k=0dEfg<44QN#wV`Sf?ff{)lWAKm^X$?s z=hmFCJbwOyrOvUdmL5}rEk$MFmRD!^SXP<@O|N7jlnx&>QR?9}NjfWaf;7x|k~BPfqV&*TV`&PXD3z`-qaHO= z=$Dll9afz}OZH3^yk-hD1e;N#(G=P|Uz-$!Il+auHR#Y}ZGws>Ia_Md2VqXE!9|M{ zqgNtt*a|FZumW3S3#VU;;k;)7o<^+@w)rfD`lUXS5DOK_?p@}RQEY%jVm44RGgVvC z^ryeXW`>I7lj>Q^h{IXydi3@xQHcc*JVont9YHMVTd{UQl#Zj-c5>6Wc%IGxTvrBA{WA;EiA}uYX`S^ot26%Ov`18&HcW%@AS08j=9V;07Cfh1x;Ckk zOvm=JR5dP0W>kAcTD|m(Fva<$;N#FNblTo3Wchy(;=_Lli!EOX5nuiaizH^^=U0Qp zp8A8uf4iHDowp1Ye|wsX!%1@m^+Uw^f5wQzw_1zyZj2IV@39i~I$4XUnpWb|084Rv zlBKw=fi$&i96GPEf&VWX9LyaD*NkxpthT|)eq-_S^*{_dX@HlhhG^<)fME*;;^WSN zP+c~_mD2+e*~OhTwoGR&!(5rq0Y^6a)l8=U-jx|GabcGi&0>WUjfLA!I|~oO2M8KP zJp`Z6gN1^-#zIF&4MEaiAUrCFk*#ssFMD%qv#d2CSQh+nk?hb<7um>#v9jRC7}@-E ziA?Xfj;wLaJE^zyn$&3hU#Z)ki_#rOOl5B>JIeN?wF*H`-wF%a2SI&Kv+!$In{Y+^ zF2uBZExdGZ72JNBi{6Fi;t#_?VuGKAIOvas_(ncNe7enCJjKk#yd;VEXP=emeac## zlR8=~_%=#hb=y)LRADVD1WU29$Oh+#H(gC^5Z7fqu1vPU`{CnowW|$+OU7cP$v{ZQ z4n#<}0nT(W!W6|o*i;ULWD#lXias8naApCAr!&>^>1^N4+3Z*HG&U;7mF0LkGp$E% zY~e>EA#sehkZP$X3^Mysr8?b982)yM;JVsS5H{%xH2Re7*t|tnUAI{#8JREpcx#)i zVa!xnzSSm~_l7W;YUn+wywOy)wyCSkH6}u4q1RC+@lBK_{(DktR0r9kZoh=Pvs#1; zqf|mw&ND&pZHu6D?YEFEGz%R9e+eU|3>7L zh3HW%5j)meiId7K#UtOW#PRE`#N&Id#BrupqU{oiX!6qrj&Wl#$aE~`^s_<0?Qs|t zFb*!-<6!@BG}cWq!i1rQu+KGs2gR_|Q3hDjht|Z>44}$2L|LsH+ve}ami3*(PFl`j zF}IxA81BN1e$8T?9o^ZA(W zD@vvxut(O3rO8qku9eA;M#|=gq|4@;rpT_Gbdsq&HDznnhsq+lzL2VBo|d}JPm>0F z&yrbG8OuV#p9!t3MObsQS*Re54QcmP=(_5&a83S681ng>&~$LH7N_?JYDOTB8isp3e4juf!jGsK}Kh7fFw5Ll&;-A4xEp0y$R4=}=h)j-UBI-R}r zcW0;F7O)N(v)It*GuWrSg(~vc}^7q=^S8zCV2}41f1psCnHi$h&?Q%(PX)+-q-y=$;>i9g{4? zCzWPm`T{di|C*WjWUIM&qj8W}>SiX+OEDL7?X1K(_pHRhUnOFdm!)`XC*2Ps4_aU) zy1H45&%V88bIh7pp!HkU#r+L?(W8mg9eKx^cD!cw(M`}*{eQyWe>V%-y2MsTsPX48J*Os|7N@S7uo^}BrGX>h5qVPd(U(W68d{IrZ;JW|4s zeLlg1cbD=0WheNNfn^;3p5$SxOSrcpg^!++$k%*N;(kdfTq7oluauf`{pS2hNJjR+er)$brg?pbri35auR1nIEwbY9YsG4iFiwk4qu5_k}MIs zERcx0BQ3>nA4_o#9Ut17*l^AN*dL!aRHNI(hHZMwR!x0NHTrLuq}MyPxh#RzMaQ%3 zbtqZ(LQ*yH=F);gidF>CRGKP;rXSaWCavPuKE6H|zPHss^s` zAgs34@ef}0e8JKCeD1VK2Z#a<3r(31* zhiB6HfnKTHqcMeV4N2!O8&dfEr^#GBshn?{bdtYbbdp>3E8`B5G9I2<&Q)E@_!P}j ze$TXlciPs#$4@1Ede!mMYwLLYnEQOXNj*<4s^#5JI*F(0_=h=(Uyp zc@ACACVXyMibHE8qB>y`y2?_Vx7Ski{v{EoZ;*)MGl{5?^NvY2eV|(7H_Y+JTjplk z#5P*KVRHNTOnp-m^D&8McUDk-p-nvNk`u?Aa}t>RX&f6v^Jt5>oy_Xj7oF&aRyA z)GOwD4wv!WQK>wC0C~-vL_Vc5m7iRl%HMxU;|=4J`GadIy!X9wo^Mdbm-i~=Pi)J0 zV|5w7cCehEv?<{)f0uHhsev18ujl?PH9SbIp6{z{;5W}V@DBU#@s9DQBLB=o=#%jfl;E7jg{D8jg|Pmzm=#pLL%B9mxvE!R$}mH zi5S%39UDLX9ecX*Ei3Kxo>}<6WhY*}VFoYWu#Dt4Z282UZ13E7_Rpa>rlpm@tn*@7 zvHC97Y3?qjH8qwkDm*QyKRPPN#+M7beijQaXO;^cwaNt{^{nu|`G}wqT*gngFX8FB zW&BB>65f4D2{)Qo&O7ca~bx?q+7@DzpLja zy7&2|kM;aNLl@EfoTE5qh?8hE(n-{^cM=~JI*A*DoW!s$&SJs7QKId?#G4V8;(r~j z#P+EYv6%F>ZI7kcqj8j&EP2QB;@_}$o!_wRU2j>z`M1oMbT)W#6I0vynn@@(zW1mY z_L1_OZC}T+qR+AHMz18MOiExwPbaYj`%Vl0bto6~;|~j_qfQF1ZdVA`-yRiuoGTC( zEiDvY(tBTTaFVxfEaNR1Wqka|~BAv_0 zpZf+R@di~Am)Ml?C6(p8VPqLU>r>9{j+FD@bEQ1lsgx)FE#)P1>bS;{1|B+6dJBnKVj$*-hXK~LUM{!%TqxkTpgJ>?&e2eB; zPacjE*=kEMcebUt%feFhsg#K3WfF0;y!!^W*|VuN?S zV|G_#*%-oPM^qx)dpm}ew(Vk(6vHg2Zs)FDJgc`A1jWHpLH$&oFzfb7p=@!b@aSHV zkh-@*m={+rq?w-NUoMpK4!$S2ZbKP2&MW6*AD8mSeM@=dBKnO>()o*oRPOAQ$gADc zc<$OX-t|@r|K^vWoH>L=t3U!w~SYxDCh5T%J>V?skcKJZ(ehsPtK|5 z^8fDf5fdAD^~-v$t*Yh2zSeQ+=z9M3k(0Qzo3l8aIOCe>Br08;#QN0^V(te=vGa3B zas6;hu~)67n7T~z|JW!mNJOJ>nqLrR1+Of{J7s;@m*st#;vYTMX@D;KM^~3kyspP0 zBz@U1LtVDR%tv@K$X9ruwM>Zo86Y^f4-tB6tPmW$W(pC*eW>1UHxED$bGE`H`CmN#?1qb=OxhKk=QZRUyg&D`v9JrBH6 z&z;O3@CwZaZr=4iH}h}c)2ti#?KAbfqedey5}xw@CeQgqwMHK1_l)1@^^Ct;(a0b8 zH}cLp&Ag#V#p_Oe=97(8Jkqd*TYge;>PyD2M5s8_^u-&M14N@M1I4R7^+h+cfnut! zzGxvCAUf?DAU>Pl%JZ6A`MPO8`R4bnTzcpSFPQa%hj_K|A)i|L=v{r;t`B{fZ3jIj z3nHw(=&{mlJ$8GwF6$Gf%fih91gD+u!esXV!Or*}VaRr0;qs_|1Y71O{5=pTZ2FVI zlXyDs_I?jH{I#2J>b{qIJ=x3qJlM_kw`A~G^%lNsN;BWSvYA)UYvwl=xA2e)&3vVq zif{R<;=8l%^MTLm_|n_;{0EI+x~kph#w+i05C3|8|1D|ecOySRIVWRfja*s$l*8^B zAK3XB&u^lfl<-C#T-CyRHGJZeja2;Q9Toqp-%NED%{=hKC!Ut7;=a=dimLGgMAz#B zM3ce#qS+LEas5wyF=F}v@rbRy=y$P|`waQP?bLtp;oraUK@WfMj!i%K51UrrOZz9c z-O-l?)#$M*i7vapN{{s$ug4Z1@5d&s@5}1G5ucU>3F)JJh0fst!o>9Df_P+^U_LTP z2zQz%90>Fi)(zat=Z5X&i5K_qNsIS!ZI`{gs5OIE$@lP`t1@`IOU-=aJ{A8j^%D=O zZ{{{>&HSiB#d~dU=8L~J^RZ+1QJ(0aH#J^b*_z5MLoJ^b72J$x};OByn` zcMajPr#N9_=Ds3dH?tIT)$5PKS8*Zk=F+L z-RD8a>UsaIPr2HeXMEz=XFOwUBfk*T$b*&>k20R|3fWU`x#JTz(`ezj?V9-urs5yG zRs8aHp@qs)jaFVF8Axl^y75d zoU1E^$^Je`}pdMdwJ&@8C=^t zgL`yV@^PwrvHqG9cbj^O&a-#n~mJBeJ?Xd##o0Wqs#vGk;J<;}8CRTR%2*N{3NQ(O4J4)i_h@1Q^HCf3JKxHG4{7DW)4ucM4SMXrQC+t6Zy)AMy=$-P z>oO~|e(aftF7<-y%f{pc2r7HI(5`N&5Px~OF!+49(4)XZ$n3UI_@!_cB5v*Fn+x{v zAPe&8=)F97t60WJA=>D-oqdGws48?iRX`R<~kHtG;V$3>qj(mx6o$(c1{bA zr}+Fwr=Ewm)boQb4cwxroPqx?4hrA3!DmFO{Z zjebmfx*nTNyctZ`ICj-zu0eV%C_GTGDh(3Uwk;L%LV|_R*?xjlZ=R4F87L(FS|tP> z&)}QMqestI@{eyac+~Sfd~NqVys;hO6u+0VpC9>`w<uOEW^OXQg`c|J z%;PEkrtYri$Bi5Ksk#T;{8$4&^sJt%do*z82Mzq_A>vY}r+oeKMsB_8Isehx$nTNH ze6BU}jz1dt0@9rNtB?EujTaQ2pSe2Wzy9(k?y1?#ONKV{Llc|1t#p8BxIth1T{TcV zw^m;qICp^f;KcxOrIx;UcZt3zT~9H6SR2<^@RPSaY2~Rpt=wb553bnsgMTG`MI5M+ zX5V=%h3te>Qu$7r?Z%}#sWH-sYf@>%t?g0=A%uloTg#3buVZyX!q~2gjm$VNgnbFz z#H`0{WQ%s#V&foN_~ea;TedBFOYG1;*cL_I?NA+Khkar8h`2Eo$97J|lwtPRyV)LV ztL>>q(E-<-9pGc?ittotXzX^zB)JPj7Z((ix}a053*NtXp`2P{xQ;VM;&fw-PBX@a zJ;wNBYJySiP4W4l3D#~gL<~28)j+t$8;ZHyf{l*>oCYeKUj3R~#h+Ys|M8DP+4%Ap-XYr);sAs$qRL_-5D zsp>GYR>zOa?O`-Z1DF464{hZsw)WF0R`R-nNeG+EZKs%C<0*EmcO@H|af&tm*)GMt zT&d2eGO5HqPAd98k`|lM{z=(=X@pL_)N1ltcB#)sc5(0)w!LyKQx4w1awn{3gHkrK z^kJKr{!}}xyJZX2TPg0?+Tx#7J2+po!z&Lvv|ne3qw5`@{ly+fs2*I;*&f~cl*x-sC(JpX5Fbx;2Trk$x6&bf(G5)&?2B}=2b-@I6z9x8o#}xN_nh?)S zC|}(KMH@}9Ey4sXGbo?
YL7(wVySiLsH!as(1Q)Y;_?`VCzuLdd`)R9VcA^RQG zapr_N*1Kw8>>hP&zNwD-8Vvn;OOb9a{>M>B^AjR}^4tOB9$KPOk?7wOc`+prUeW?Rl z9EnR#j!^oz;_oA8bQfGO+Qb!Z8Ls%p#TBPMI-}sYD;%$w!0wJQ8b+AlLX-(^^)SWz zPbP5uZH$evrs&_<2v2s?{!_UjCjT}>GI6MSsuAYK8K5zda*yX|;I^MSWWnk)BSxRQ{(}Oos}VxuSv%x>L!LXH>G?))lP#vkI2@xkc*z@P<^a zvB3t{{G}4ZV^VyolGFu#M;bN$LX=9sjBxw&m*E33A! zx0~%Sa;+_HhS}nnksX#<+2Mg;hdr8h*rBq+UEbrZRy#K^)46Mz%iYb)Y@QT) z)Stq@(hk<3`T5 zHZa2JN&`%wJ@Eem3~=j~0k#?&0`ZhfvD6SJ*K6R0o;q?#S4)Pd^u~9o&+*?%^Amwk6es-d@Mvw}i3bSJpCaiAZQ3bj6qqWln5r%dR$P(lh4hvMYb8k~iBVFu zUVEjI8&OhzVLkij@_Ke?;W{RSY-PhHQg6$TYpF;1dUo&nW@a+U4%-85ak5}MY`aP^ z=e`|0?%Trhu`Pb}w!=en2Mnm1iZ^akQB8VUbHf2zLmhC)*a4pn9Pp}xE8a9YlV+UZ z*h2AL=8Dne%MT4*Aq{jvm@RqpEMv^RXoTK&Ch#Gw{;V}YW~2#5JDT7;GsJ$;08MKR zP)OJF9#ehH14E?TG{lWzhPd0Jj<=iJqi1@1yd+KO>uBI+lsX2eYryiCIv$x;vZa5> zhZ9e-8-}OY)$tYVd}t+G)~Aw%-ao}u#m+Ksr^Pb2O|xZU?OK_*ewxh4yGiO6+F6$P zs*h}i^({8M#(^hRFj_QImfdHYOwzZLOf*iG>1~RZsrmZLa8pgD8m%i6 z&(yF!JMOSI7jLsNjT+WYx0*e9Rn7iTZ|MEq@33FtFo&in5Xwa?hgmBABqc z({Z~Zyv8gBIQ=DGzvohVN@HSy9!n(EYZHR6?6!X{|;NC z(ZC9So>`)&hZTzYSRrvFlX-XeCT)B>OBON5T;|<#ybMzo$Py(BWH9%Zsif8HYW^J- zzvwP&9dMgfe!a^gP42Pw`>UBp{$2JiFa@GR3fdn{LF3RAG=-(0b9M@xiAx^6Qjns^ zKo?~OzU5?~E;9pH_GM7q$-r>J%rqeb?T;%V4N#)_h7uhJqwf2axF;y_{HYQ-b;PCb zk!XAx30c=D^lFJjj4Be}KSiRNaB6ou5|y7~ko+VDMUJsB-y4I?M`JL*T`aPG$KbhL zEZEvr)boMr{_=wH59Pc!pQpMK^G=+-p*UiVZ^f2~)V0RpBUZ4dL#?Y7I<;Bi z`YtDoq8R))D;9^SUQ}{v6+Ye! zLe0Zf=v_&g(hJ7=Evpbg`OJsTSR-b?C8BaHVWDA-zJAu!6UrLbBv$zQ*%C(U7t2(e zr^zBTPuYpTzewGNTgxPKTxD);U1S#LGi8H^++mw!)hzp;8rIRcmRbFzI?{GE?COMj z?1fe>6J1l_CQm_X_Y_QAmjW~Lp1b=}XpEYIM5MrdVFpeU4ws`ckds6hg=9c$X9jM> zWWbNEjS7{pD^_CNTGCXu5)o$!n>|Vx7b#J6Uy1G6kx*VCjp;<8``1YH&WVInHwym{ zf2>GvxjM1XAn!T!PYhlxjlqgPG1!q8Lw(a?@W3M$gTx@{-X}~NR$=zSU})N`g1cET zvZ%f`W$`N1j+0CWZ%y$rBG_sEMeO<%4%%(c7?zQZam;ysv@}IRS z7&#;bw-v;vBXq>4V48Odx?aveI^j`VlmQE$41C>^fje=;8`9m91*ES?C4vc~yz@$! zkuPVhQ=(~;5+#Jsn=(pOXrwl0jtO{(1*M0vRPs9v;&^z>lqD##ZF}n9&2pcJ3F=eL} zrjy_Fz9B(soE4hMqkk{8gql@%S@p5$vf%F+qZ?WF@s@Z9mYL>a0bVU3?Yci%pr=W6K3JOSLXU7m`?~(!1 zmv&PI_DV8vuZHj-9(`Fvd?NpOx-pv0pWN|X_Q=5JGCHDMKAqeMU(#kGgz z!;&bB-VljzW>I+35Q$ztB9Tg1X(mS^#Uln=%wnN8I~J3E5{GhP;P^NO^9ZM?DY3Xr zb)soht2KRh5FVbQy!U*n3-DNl(cMR&|Eow?#}a?iA~CKq z62{~`)bIlW`L*r4NF2TzgZx*72YIv~`A?iSVe%n{;zbO!x5l6*brtqd4f0eoit|*D zwAv^bMij?N{t~YYg7L@K3Kg}M(A#c_|Jo#YLow{yq|umdX^n$&ifjA(3pU<5g5&*e zg2ir4!NWyMFe)A{xQ-hvG~65_Ty3mo$M06NYZSjW8CA32>uOp4hP!Od&KkB;QpW&%!Z{EaV1c z;iW1U`(Njxx?LWvLFCc+G8d(nb78BS2lvKY>@0{w*vUBjjg7-5I_8wb!K5e-Px{59 z`C1&7F3!h&{e1kKn2+nr^U>^)kF%EfnAw*07hy?y_IwYMIHSyDa$6J=WHdb^tfsV=BVpZBh>Q zX64|1ZVoOq+JM zn^it4*XKiXSw4Ie`EXy5kCFf8Q{2f%^7A~HSSMlR;3S;3NW!1~N%+-?&KDEmDo?^F z=Ok=d;)yFoUhrDy1>3ueAs_37ZWBE5X@oa)FM6VEqn4l?PkgD;6uOog3W?pdg+o7k z3Ul;&2&uDmgsv$y%ynWN3p!oLDsyXCz|i|lYS_SvU)Hc?L3PxpHwXU}<=`Xn=xR+4 z*1sf-N^;Pd_>+1(2m2RhL1v$YBi*vlJt_-Xbk7a4U?$6=^{Xr_xSES4LkX8(x$xOf z*i`0XA93n%&peD{dFXI44kwF=GkfDu^F9uP5659yWE^rU<1jfR4qDdvFdLSSuvz&S zp`DLR!pkBiAAbn1UF-7kk+_pvn}kK1l5pcrB9@L%!c+Gogmq3r4P6_kyb(3j6VrEi z0SCOGbHoet2Y8|LZBN8RdEw%#QG(k05rRbXXI1vE9zyHrL4ulEAHl*?BJ5bxLy%fE zu(M&0*)au;5B;ew`(!nnGUyH)u&~QuH`_kmJ8LM9Qa(xfjQx&c{&HP`sSjo zH3z2_W#P|^EQCzTg5Ja|d|I4^F=kn)+d#TImx&5xE)E^a#q6Jt(kO|7KaS0c)b1{M>&vrm{pkvd0alN`{ZMgZa$)d z>AW!yOMd3zz&}aoo{|LDZb{hII|)Z8CSfme$yJhs85K!L9O8|c!#$zhfwa}+g;jP- zpmO&{f~6OV?|7p6(9fzB>w5@SI=Tq1Pet(F}& ztYu-L_nGa5yX^b0de&Oiz#Q9ZnPyH7-q+`#&+QzHCQOds$ibqz9K^KCMMGW=%6ep> z*HqG&O%~J$qvNBp5JYF)fmvuY%EBziJOp0MMV^?8gy3AHf6T@9++37g$c5&TJUpHl z2lLiA%$*#MTRQPL8x#kf#yGrt7l-)+<4{zd4|%tIB)R9&dTkyS?9azw=X|O=%0tZW zd?@=6K8urZEjI}V1IT;o5;4><3GHSi;k!{1&Qi}Kop3MA8tjdWlfA%$Jn>w^3x&C! zDE+$_YM$D{oIgE#+esV!BLtv-m?*h~z%&=JX;?@P0T&g5_wIsDhJNQAurO|$8R}U#&fWNIFr~R3$7EgFjty| zDWeIO!CCNjC-0%>HiU=Oh&;?Ynu{Kna&fv>9!hWI0`gomP07RNQE~9gCyo7$!^IzQ zsMr#RA1QI@Dv5{3usB+`&4)et^98kh_%6@GY2@SA=6vX!%Y#KhJ~m5}(0NA^3c{1n zYk3lk4kTfiYZ4~8B*EuGBG&xtNj)OGFyM|C+NFBp@@5Y_%<@F$zuxd&;EB=qj0IQ9 zMJ^lMPO#bAM{tboF6@Z26-M{(A3-dN-!MGpA zGDGt1!@1bonhSlx=u%-W7M#e%n4Db7zs|#RX&&mn#z8GP4wlp6(D76p^pC}1+IRBj z{c%`Bxb3@}2dvCT{=R%5EFTFhACZKWTc3Q)wabUr+9YJmNkZ2%i7?Vlf}%?jR(42& zqm1}7H3^4idg0StFW5iy#M`0dJ&!%{E#3=H$hYt1cw+t@W5N4c+ zPR_%6tvnh7=EC4$E?kQ!?o&)pn@9e$Bo61vgRCf~`y8V9-ZdUYpD3P@*EpTY$0~<> zh>`g?LHy}MxcE2ZVUKz~UOFUU$gf1|g_VT$$CEI2UlJa?PsBaa+^5+|#CK1sNB2Ze zdr$l!J>^gI#7^?;mU-T|=H`Xd9u_=uf+>GdWXcWf2J;cqO?mL6;e67p;XE;G824E} zi%l3Yn++_O&kkRm#TcE7ug_;5pQp3Bjf>dDnrK`*5shyh6o@+;jXPn{Xegy;=b~}G zBpREqDWG{lfsUHnarTJOP8;2#r`ceYM{7%4*xCG4D zmw&gWsbSN z%yr{p)_&|)=+m;ZG(}oKCaEM0l|Dv(%cQjUSh(_%5Xb3-}kzKBUS%w0;rYK;0 zT!Dzw3Umljz^lV{s&!RhJ#lA3MFLXt69B^ML2Ck@UP!>f+X>iwD*>a*2y^&+TQFJbkQqp_UcqkR_zT8g5PnnXBljKehSU!scsRj?H}tgN-)yb9 zd88$my)@%mD^2;13&VL^u0F4yxR6ab;z8d7SjfIO`!JL8x$MKqMeN?PCCt=y9vf6n zShQDQ;N@s^A}{(ILgxtL5&f3t>}Yrscf1K7$#e3aA_Z#c{%U{%ia!eU*sDO$#{~Ql z5)kO0fTHRI^nI0pn3@DkOh`b`e+kGTFAE`_6wXzkQHaJ1r)c;Sm!=a>f6)2Vt3vo6 zFQhf3L)bC65T&HG56OjSk`}^Es|eE`5^n-BDF+}EFV(Uzvq1?Hzf8$AzvMq93M73_z<$!!eB#mb^9eXa_iG5J z*b{`$>jYTDM#IlQ0YCD$hfWG8KSaZ6N;G1Qk!KTLq4|aIB7a^uzYzL!3n5)z2*txf zIMO|n6yb?QCf?5>-yv-|#%IEwaQZnU6NLve(cq?pR#OOZG8EN6Lop#b6w)>SV17vm ziXMdFsBS1mmJj3CI+^iK9wxkXE^`2!qw+#rMc}&L1Q_T~C1CwgfDtXIi<$E#l4`(r$@YG**(o z?c7V+x*CmkniLnw|GjldQ=@+bs{#ZW|8hNAtS5LB)m&Z|6)_-`Xq zKF83E$M3h`A-9I|`u9WmSi>Rw+r5QMW04noHPVaeS_iTbmlm?OIn&vm`1x#1=wfzp zANlbZ1%f9isQ-@w=Ln--q_3mo5kq#z8HSg z56JPw$br7la`l7t7&$um%kex;4*L;u?ADeeyRV%15Qysea=45O$KEyJ_~;pqb2RSw zsu_;W!^80)mHGrmKnMfmh<_@lycIbjPRVg>zZ{1g1EKsVhZU_mz9$|?O)|0M zG>vU)l{ol&AM&^oX&aPykdX$@)HIxVn1=N40r6Tz;kDopn0Gjs)Vx8~VRg z1z@ZY0MRl45z7Lx;&&h}9t!|}NBvnBW%HjS5Ac?d{oEvv@F~yc#y9i1=l%m+ZMnLg zo3T-qSoPs7E&fZ@CLKC$#MRI0ICQDf35%2N`0I;ge_u?S=L;V@KR8_R#p2Jt7(LSm z80m-cT5_EKHxLhW<>-Gu5ZSeX&?9`DZU(^ndms){kL1&X!{A*SilS>_Snm)HwvKvC z&kw__iQy=)mBTG75a)l&F{xaRZO7zz=qJa{`GHuzS&mVJ&tuy6Xpkszr^G!JF`U6r4`IMgs7-HURm10?~V903K)t zBFs4e!O;Owix0p<;>#n4gFK=pn;&Y;a%g>#qv|4YCtHrMg!`OiIi8p+q1dfNHtoUcn<_DgaB^Nn9D1XKgw}26>Jdjw z)1mfX8g>z1JQk(HPAeS=gjL(CGz7PkqbArNJ6`+4TQ3m1>H{#i-XD$o0&rR<5EsOQ z{PGggQIGxn@XTCZv?ZT6TIcg)|KxMGLwUS$q^3;oVT4R=SnL_KJ?yMXHlj*Z*SQKY zVNz942U+uAKV0|p!-(^~I2P>-)k7b=yzhg~UHo8F?289Ma{MuoBjHRS1}-NaTnmKr z%s}d$6NpK~<8dRxu(&D=6{M>qAq>lk!*Fdw82qUxcX~+}-gR4w8`G8|?Xn#58FCEN zm*a9xAdZB|@v$(FKDD|J<4lM*v_D;7OM8s}?xX&}bU%sK`0prD>zz)qC>{T9A$?6w zN2ht|IQA$FH@BrDy+3KqIS>hf0kB^mfXx#E5&KOJjqZVXKajY7&>tV?Wb?5rbGh@l zL%g@%LC%YF_>H)1KH)(=e^a955hgqBjQXCCMnq{ygF8%-sl`{E2~Lidx_!}=sr|ZU zyY;;<&XeB^+UAG4PJZ}N;EReVUo?67VQs805=qDRq=C?WC&%_zf#^^8m=y%l-VL3L z0?~nZGNB?2f6T*C-6b3umOJP4ujt8UUh}lKH zHky3NArtGm&>l!vB`#L)L-jS<^Pusc@uPiceNOX;&1qN>mk#I3bTocQ$DNWiw2cd^h4^&0GK(;VKyTG^(Fz>`j0=7^ZZeGz#nx1d3@caJYLa|&wG>KIFHTa zxwiQ{%I+Xn6lC$@VTQ8coeN|U^#yi~b3aIRQVeAg>K0Or^N<=vmr2``_qgZy;o^8- z%y%Gdx%pucaj6IQML#t^`rd9JJj6hl2!XJK95>A6G`9?dS}!?H6JF1WFH@<{w%fTd zXlx5d^2RWXH3`SF1LQ^2EBN~kIob}$ae=(*NfiCrbF&!J_>G*a%9gn=yDK9e}M|Py6HX;pU?fvnv zkbF6UyvHj5F=zd-?TtS=oeqQ*;gn51>N|bO;eYD)^WW3+={r8Tymi)hSX zjqZGxV)xRsMl*+iuk|)O%LcS?@xFvuYzhoz=SRi$hMnnDfII9m#{{zVpSs zA%3`}?MLt34~n)x^rYX>MtBT88HmsWfv^c7ZP4${{}_n7Z^KYp9)^geq%WF7e~Ax+ zl}8vVH-%%!h;S_IvlJ18>l`)m^d>rIk>qGD39U05=l??p`(@( zy{wfuRINmHw@gSH((z|xIvhgMaBBqN;+GC}ieK}-llRmTm%;oq(bl?HLq1%4m+b)~mU7X9!wDWkM;rYB_XCBx1n#aB0g`X8~?2u-Q z^Q6H$0%Q>}(Nd!-)mgO!xKqqanKfQ5Oq#d-0-%Mf}g=*}VMgVt%7(2A{mb zoyWz`=PBB=_{qcx=)Zje_3B`#sGNYDg$zGiCZNwXh9^D>enNYdtCDf-N-|!LjfZdF zc$A%w#{jo@xSWYc=FWH&T#biX-vnsTx>%?2o{%U!Xr0OfF&C&Gf7xPm)bYg9i5{3g zqrF&d(oS?euP(~1+KCq)w-f(Y+IPn_l|}0+U>yZ))X=P`j5-QY(K-85!3s$T37`aJ z92M-21?#Az*Z>mR=DmC0yZ8S4;%|F$4(C9! zertVe@3q!mU+mYqBFWxPoJzM7zjY+8mF3ZV<-!r%{hNvp=@7v;4+$r3h-kjuS<4?? zkZ^VzLrAfN(fM>-C!ziuiRSwZ3kNauUrF+*qo+Dn`reKthdFxWHP>TE~J(J^FXmqe+Aw?WXIodn8?JqK8f2U?eJnk=G{}Uh%=GB3YHU4@TWRL2w-qjPS|n z@Lg-hLCK8TAj;WNPJ0>E9rb@@#*8YG&txMOXBZLki1LXkM(oQmVq`xfM%dGMY{dF* z2CQvGebYO|z(W1fi*+&RyVpQ|X%z($o>WaY&?Zl_)HezS2t%z-5D;{sK5s^7I zVoEDpG2lWJKRQg!Zys0ipBIeawdaTMZ7tQjaDt8(<$C<}L65U(dTed1$1^uQyxZ#W)=7_y(RwKT^zi7VM_sKR zbA0reg7msbofRn!yP9O{XVE$fQld>b*Y##a11$yS{BWGgndu@i&b)Lc<-2w#6M zoR1g~$rm;KiP!!*f>*E8)4F6ZkI*u7+(|ZdM?wbC45~?=Y7R;`{X)Y2V37Ss;KqDC zO3&(0`>P%|8$=L4ogSSVMZi3Y;G~-MCbc%p?pTU ziiaNw1Yi?{(yAUG)||Ure=g+FeATjI>u7H#HGt-yqTJgY)>N&lfKwcJmAngib?h|!n%p% zLNPcu73I{1M8mb00gcCx#IlpoSa&xX+t){9b#OFBZ;VD+7h5r9zpWU4%2q^OvJ<_` zwqnjjTXE4^Puw)u74Lo+!tXl-bLS=z+}=^mSGFI*OHCRsZ3^RqD~Iv&R#Xmqh+#}1 zL%rV_{;mt`KF!c1h45!Q!`KUYOg*K;2KxwD7L$w!gNg`~_IHnf@}Ul`H|we1UQawU zI@F};VE0@{^=*3O|3va4+;-X-gpYHB@sx&5vw_4#5)3VZvA-%94Qhgrej*+A5$TvZ z&y3M0$jRSzS$d@IA#$NSEe$n&5Yd^7kpxh}~nr z>oNnlH5%^+jKG#ZqS1e041WI3fNqQJL~=4=&GNcp?gCrUUu!FJF4hyZ`)tHq!lvxm zq5Qy!NWOD^6o2zc&4;`S<9DZx;JruaxktosJ}Qo3^(hJMA4+)lM1s!1P&|}$%8QOs z40iP+kaUi)WT74lO7)mlCj!>3B#-aNN6BPY+jZn;^>{;g+?C{1`$30Al{&<9(Ia+< z9%Z*^-w$E&SMp;OMM22y9}J)A!Gx*7m>3X@_Y=(+)hr!pzUhc1oq0tzwJXbvCWJ|S zznD=q&V>83jX2uUhzFC5Xnnv4Tr%RsR3k?1Fv5~xKz4)yLkN%8{BFSLBhd&PYXAn2 zygC}Ni~M-T>pJ4gk-DPi1Y1#IXDgickxmV>72`eZMA|hSx9(8$-LuvFQF%Y^*CB*| zdo+S~{Ue-TtkUq$gg@OrNpLzO!CL|v`T!2460YhQMjvEUzpKa4Z#vw+PdKw!hsUk; z`16z=>r?bt;uwMTu6o$-Cb^i&*OMQ+NjP=39_bh9QLtPWr)xSojZeoMvZZuwI$ZXeu~cP7k2PlKO3e6Y2HDm;Bf@7Iarz?p zdK#K6BNWX|=;dyLiAQ6LE%C=}j>eQ&vZ=GtNHvZ`TDurb``dtD3f;w)JXev}z)hGp zxQf@VZer#Ih3Mp^5FKB*i^k7spSG063on`Z;6JkXoWD}JO*7iRZ_VVn;pyDgLrEM( z0oXlG3CptpJkTid+ae{dDV3;`s)W8G6passVmS}R;|rmX!$J|#Rs~NQNzX%ZzfTlG zbE7alH41(wqHy+16doo=qE22EmVAta=CcZezEfkNP~qlV70xVEL)Tr6#QSP|NK<2_ zeGU};awsO4gSa+1I6XZF3!Y{p@KH9-*QXlmFe4mk4m8`@Nc*jfXc%wA3c{l^K}L)v zU++jZJg{E`a7Txb89Jl~=#WGhxx_yL(n1}c&v6r>&aT44Qz6(QSAkA$;)tJ{_#s*$ zOt%%nZC3{WOz%U%hpN!PBNTruQKA2VP_z$Gp{u(J6T3yBL!;pkt)mcgHVQZ5 zqtI|;6!wv9ha5aH7EwMOWyG!G?P&n91b z`j2qbAs=cpQ->;=pEh5i!)~~WdA&Wv=~ZsRyUIdS?yIA(^-WDb)%5qFA8rhQMBJD5?{ijuzy_?^0q`F zV{H`dU#g(`qC&_!HJlvOI2fVE(_}RsdZ`iniyHSyS7toUhIUd8&J|`OqIC}3m*n7= z?{bjkorAzwBc}glMBFGNrYtlfY`hUQFyh_4)!0eLwu%UptqVtLbvVuv4op0wL*@@U z^!ZIkIc>tpH6G&V0uS+Mf~)xO%}vOU-Gy$tyLj7#_OeDOg#SSkzw{=Be|?<6TlLT6 z_l>!H$e?WA;A}cS@;H^BC7t;&q$eg%RN{3{B~Duc5W6z~UXzpvYSR;rEmW9PUj_71 zVQ+0H%E?adkuFu0(Y5uV7_%f2Q)WbBj@NLSb3|e9l}Oyqi9(+Zk*M(@S*=#XCXZzD zw+fHVYK(VPqu*vV6d`K(&eh;Uqa0}OX5%ND9JHv)hSR2ObbOhO7Ay=mK!`jrMLG4y>1jx|#uYm^G1^;DQLJrsH7 zP^6OX#Lf)Gy}zPxA~FgWY$NfhcO-N-qfkk>(`0`nPD{ga_CD#~Q8ilk($Ibs4N??p z)UHsY%QOw{*{gAt^k%tF4u0&CjW6%BQBL);NmK*cYkM}9Q=ROTXe0KHrg`%+6I71m z=i3|c<3_@x#wK{v9D18iI3|#d<+h2ywkbLs>aByeHXQvngkze64()2)g<`F{_;B7` zTygXeqgS|#DQDe9S(t}tncyle{*}z{My=*MKBx17pj5tFW8^2VrSinyX}qS8Y>oO$ zx^C!6TY{9xS<(|7q@M8JsHA;5O8C7|LaqtHw)9X8xEqR>WM3g8$-W{&(LG89??$0; zER4d!lqg&(jf6ZQ5<#_*NKr@OlZj+h6-9j+34?#qz=tsCadQpMch%r%j2dqas_~_V z8rctWF!)6_?bpf1kOkR@Zb`NI+X;tSEo=^2KFmhHC)xO5XF|M8{{7Ty=*h3E_t1PX(FmVlBkVdF@hi=b$kwL$=urF@oj0U;>Ifa}!BdEyCReemu^ZWwyVzFYCR*FN3sb1Oc%dUde>;O0 zd`sg4?bG>KB8f6?O5?mZi+?vGowrF#<*)85A(0PT+qWmqP@mg3&y<)a1>pQK(yIYV z$~T81=}jp5?jygxBNWnuP|Vb+aL`GG#dTB|`7IJ3PedW>a1>MrqHxcJe3xGoo?VN? zwg-{O?xMzv*D6e1sK)!3YA7vg3?~d~GF^@KG<=8TKs2NM3OBMb)-?w^s1Cgc;q}Af zZ2U>})$0x$kw^YLbEFaX$iGkF z=5YwSI}SgMj>EF2<1pSP9z~&X7#I|X10O9gY`38MeG493wBSXu1*2zJVE@H}g7FsW zt!u@6uT146WmJ#a!5fMYZLk@>K0 zFs4roPNW%NtQUi{MKLf;PJn~+Y}|gCfb}FNWnco@JWN30^#u6#O~CO^ama5GhniOL z@Un}8s(Bpj65^1jrTj-Piv2sS#VDf%y%t*V$9W6+FAL`0u^_L(TC}J4lSofi6dNhd zYsBkUG%x&tYC~v!G3uKUUiD2dzY3#Vj~2VWBdqBYhApeK_^B!kM_sh|=%IzjN_Www zu9x_o`gklG=pn|p^%M_pc?g>?3Q-;JAvQ+tw;fyfvyh#9 z@4W3irky|TcL=~xeE?2Tzt%nz15lL_0M8!&m{aPH#9*p-JY;|_D2C!@2Dn@^py03p z?7TR1tB{v_%7ap))z=dr!2r#3l7t@sxRcv{~=%A)P#zmCcGI> zwQ5QeMwA-Sy@3hbFC5dGhT(@S8k4lh3Dn|jSQzdM(c-{;E%E4i3G-BMvG}ID7}wuZ z=wEsWt+$6LUgssg)q04mfq6V;!v>D19sG~1?R@Cl0)A)UEnX>5 zWENDb6A-eP@;bT%{IDbex@Kf=RJ#;U`7_VTI3#6^gE}S-JHDqpNPo(47L3F1bp7Td zT3bxEU}2U8x6BrdA4f7;Z$a#H3tZZ)#X^e-M>m*|@z98Z?j{tfO}N+Agx&TuFRC=+ z$Zjp(d1{efPYWqei>0)_I2)-&rO?7}Z5Se3D#V^*50UuROL+e5EzYYv#1r!E)N@`; z8ch9_-frc(dz-nNVLSh)?ndf0vW4g7=kfUVn|YM`c5d0_4^vqH);kBFMMVI8cU^Y)#m;o8lbxsMc|Z22R()uy&{p z1#iQ!(Hw@L^BRP848!!-8pI9o6y5523iCTp(MRbex-9n;pMyQb_PJhS)@*lSS+wzg z=T+a9Zs8C7D9_q(6R%_1%=1TV;mFP7yLC!b#V8Se#~-CP{gK3!=+__sHxBwEIWz$N z^(byIIR<$Yw6V`#C1`4>v(`hw%BPk+|Hom{S;r<(I#o%KeFKQ8IsRn3;r2n!KV!t@{=aa89?>1kkV z@iAdSkO{fd!(bN@hCPvC2nY;AhfQJl;H*QZSS@j(Xpw)$Q|u}96vL-^iLn-9AYF)*vQO?4#tBY72=_1}9b`lBg+=O3>i}=#!u{6Hyj&yfT zjkI2VEE!C9rPcBS>DR_LrIh@;(#*4M*yqLWOc&INbqVufE0=k)%6Kn!<~J`^TjIqg z%rr5@9O}(kozAwsGg3c66I0o)X1y-0W;^$;W{2D|S*x$<>`v=kc5?O_W;m9~oZZ*3 zdAl=+UnPt6eP<8d8hZ@h+#IDd9FUmn09Bbij*YTMveFT+@3`U8epe{`JuxBF9i^%6 z=vmD!JN`uaWdBSm$ofml|Lc|%l-_}zwNU@bTkTj0b7yNuv}dmq+p%S( zo=owVC%d^OjTNM?Vh7)v80s3?kzHnXZ%P{L|I)}(n;Tj8@j2{xP#XI_J&Wa!%4Awa zHXHgin>oMBVEx8svBhr9@Xw&8D9>@g!b}I87LHIibHvtP?9p?UBc{Amp!5efd~Tq? zwU+K^vOs~ipWR`pc0*2v0!i^TY>uUdb?;ck=01JSGQO)}iq6%nR9VBORKH-R#-5^u zgO7;+;w4PYy#=rF7B=0zMbc9*(f0>$@xHZ-*j()-mc+S;CR?3F#&IVx^0bp!H_=7x zk8u&x6y?(0KPse7TZ$xA?=q=&XQ_1c)&pt6tB2BqR!^jI$F}Uo>JDu4!B#ADNIP~j z#E0!DQLx2QTeg6B@hmQ87O^0eO;)dB>og{|@NgXurB6Vz>g)V{2G^i)yx4|C~kKtYI(r*08}&RV*#AhDE-7 z!R}r25q<4j3y+CjA|l8~G&XpPcB$SXhIGg7vA2kr;4F?TYAHJ1a}iZtoJCNTt9aDZ zP26ehECxPnCEEObT{2y%l;#{PmzHn8C9PR{LqbxCbi3s`a9=~|*2v!CS4N<-VS z>{@p=W~w&}T;(BXW5&M0GWE@;6DfZoz@=pIPwGDkFt=m&BH97W3TG;HqlpoiI z9qHMg)rs|H3G+SJ#w}jVT0a3dd4amrid(%k1{GjDOv!Am%dZ6)5HcNSkl zokfz5lSta^Bz)RBi#>;(#UT%8p|vZO{0}~oV!u3;8ulrbt_*o3tykZXCLFpgRR-QC zesK?WkNL1~FI%%6Hok1cJWtkLrC^PZc(A_pJlVp*DeQHLkqsSWWFN{?SzC`3wzY9G zbGn_vY)2W{x9kkIYI!!ByfKpnthTV>^E26IOFC;~CCn~MXJ?MtW9n2#XhR&~Mz(e1 zcL!MaH^rcjO>w)i142n(W^7a7c6$%V6$;E`3XJu3#qGszSn|6Y%y+0qdf5v$Jn$v! z@%jZD`b#x?u;CdiTu{SYW>&EcdER0=yu=jlDPncr;^;>&;cMd~8qqP~yrW%wL3TyU2 zsy%yCy65D_RNr|qMJpfnjAT@Ap%>dR&Wknp(wf~)^JGiLnwcTm#N$Edo>6 z;qJ*SsDYV1%QdkdpXRay*_o_!P8K_vl*z88tYHPq(iwW^u%FX1*~hWXVEox0ztnL= zdWt=9&Nl^R3{eu_45y}%d~7^$u(vy!ZgrT<64PdRixW@0#N!@bVyMbnC_Z|N zXKv2o>}A4d(jn2rS=71eBKA>^qd)0Ta5rb+b^DQYtEgQ1ePfkmKk2FDIscy2X2LBg z;!TA#_S}7`Xs0*xi*3ispLAp!4-sdppC{WnQNh@t_Uv+?4|}aNveiw~+0Dodc70D8 zyHK#2S>~p&c9BNrxpp-hYm>qDHCoFq2IaCJCtBFJ{55R-lXNyAKbtLTo5hR;O`(r# zhVIVI@kf(pSbB(L^WGkte{;mi;SN|IFP~7xS2$Jh+lMN+F|CX@7+%W1RXyd?Gv9yx4<+%Ci{|CF~>p44ZjwN8tZ z$4g8V$6bz2usTLwJD%Ejigj?pd~0e!N9#7{5c$g?;_1+b{HF+9?A~>9L8>MQnO9EaQ69vp6&AuXCbF_te}^gHTtXXRt&$YfgszEM=`c-8N1BxI9T- zw|$kYjmnoJCnd`tG`3cZooAh7JYXH{tF?aUG0i${ozD7pO{KM=cA_=0o|l~cAy_^- zBuwsh9hbv0=^O6N+eCC@9zA~kZpPW|4 z=b6j+KBsc7EUn-#JSurf>m<3oDMxk~pC&t%t(2vR6|!yS8oAk*B)MQwvOHV6O#U$; zN4~txEYDlMO#XG*61o15S@O@DSIHfYuaqkf4zT(?Ibgk=X|nbj7HExH-`cvb-+F7W zWLSHJ^{`%E?=NTH7$Rp(W%BiOU)lS(pL{t|EuW8-h+pjo*~7j>sxq9C98X=8Vme)y z?iOB^T8%m>)o6;PT>I-%>60+Rb~U@@pkWV&MX>XpI<~Sx%`UtQV-Bu`kr zS{@m-N-k)hBJUf#NY)gSOe#|4Yo`)r|AWbLc9T_dN4FL7wDQ%mZ)HpCj-A2QY~?Rj z$Em*7>$a<`f{(R+=*6tDj!Ui6p8LpEWu4>&PQmhmp?%cZ(&P1x3=Gmm6^>q+vXnlS1C`Ne!is}lZOm#eg|vY4+qUdX%p7V>VVp72BI72MOafmyr&=UnyU>oGK6VO_8G-rOGXzCCUpr)Ahn% z*_wOdY;bru z+f%J$El+CM2U93JFig!}eJ$p8!-{#os>?k1RWV;o9M?v|=a8vIJiBQjpWd~Cf2&`? z??smLwOOTndG{wgv12)J{8Mm9pKPRq}-C zCb^?`vb^|Qw%qzuiu|c+h1`sMSMHOga>2<&IdY^?_H~$T9b@ySwX%4IRl~&t1q<&@`9@|N0u^1k6cW&bRt?9#TQ+%Vo>od)m#ZWD)dril#+}ARH zrIw8gCoCqu&8BW@wuJ1-@kl5$9}Hu;owRJ|5iMJKrI_FEP{Oa)hs!0@>Pb;;FS~p{NdD3>T;4LP ztK6hF*JgnVu=~~$ZDYoX39Y4Z>Cch3rGXPt$*nAD=L`Yr@#+GsaJyGO$0ZGvfd6 z)uZvhpWw>+IK2)1y?=*hSlxdQ|MxTey!V4k&HleV`H9Z{>(gDk{m*Cr>!)(~Z_jED{=aAcLl&B0_3Qin$4#EL-~Zp+ZU5ul te;yD2$N5$I75~2Ixd#6pR~`EI|2lQ*($Ea6SO4Glw)F4c`~T0q{|m0@9qIr8 literal 0 HcmV?d00001 diff --git a/test/expect/ModelTester.test_raft_small_expect.pkl b/test/expect/ModelTester.test_raft_small_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bd0ee65add948ac71f29e43f17e52c5f16f258e5 GIT binary patch literal 46827 zcmZ^Kbx_rR6eUPXsz`|nNJ%$hz`F+!B%}ldR2u0{gYE{A?nXiER=~h6Y!ncTpJHHO z7yI4W**|u7mYFw?d3@g+zW06Zz2}~DuDvw}CmS0tFWdj~Gly*+n^%-iKwzAormt6w zmqtWzh`EUno7?~KlQPRECY-rS!y`5(FeIAKBg)S^HZa83BgQW*Iy}lFPKPg&ZRt`M z9_I6-nJc~g{UX`_*IW3^L&Cjc*8Q&+`66ezurt?aMRK@sGtWM;zFsS|B01NxsmDZe zIasqTwPv@TWo>W0CMJ@5Df8;u6fx^H*6cBnJUKo-;UOV@J~4seVbOecQNDgre!gab zJ~4ceyvmM|vtuIp93%NP?5%ltefHg5xQF@l|M{^0|K7O}+k*e&&Na2nO~n4M`~KfS znB&S%>V+(_IvY)gewER=`Mq>Y<1&q{c}DNO|4@=KKcZfXVsu0pYlddy&>#nlVt$jz z<`48{^>aF_@sKLTuTku&%M?}eoPz!SkzTz34)QF3W|R_oNA#d(VS=s$cBt-lLd`B~ z)F+ytVaswf+i9W2PzkNFi_yU+iq2_%%+$@sK5iknN6drJV=t3O(nGEqR<0H| zDY>)gLVf}r7^$M<=p$sycZU`?Pto^UF7(*VMaC!}HU)Dc;PY>i6#77ym7mep)%Quo z>oPswaf*)aK2D4I?onFM2a5IM!fsI!JQVV{VF4&jqj;9}e@ox4ORBrTx$zESf{qsSUy${S3+R{nmcnkGiJ4?1t$7%HQ4E=Xh0IFkLc*^&W0$o4Sbmucl{C1CcR$ro>WhZFi zvu^rU*+o~zr z%u|F#gfybpOQ3$$TqFkyLvnN;4%v%g?FC7^T($_w-7=7rmdC-JN?6OS3a&gY)LJcr z4!<$3Tr-1bfi-^EIHE|<6$&>!aJpy zxq9N;f^~?MG{YBheME~X;@f<2#E9_XKe^BJT;?84EILXF?Z*9FCu@z*Y3^kuAD1NQ z;Y~F5ristP}P3Sbqa_XhHfm zYOrml-i?FQ7cfa~nH>1s_nkDOUsH(EFztDAg^7&CqO+5t^7n4IW@$EFY z^&ExmcuL~Qe`st#0J*M`=EtvdJLB$gVyy=qxt3?XC zGR44VAc~Ab;^5U@fP&tI5LS~$*=|{gE6Jm_O%XCnlwrS51Bd1rKr+q4zN$Xr3sNC>T*^an48hpuMu6^_jXq>>&Xuu+9u#kLUT&# zY4tJMFE>WY!+(he-qVK8Q3@}*L0Pe9sBY6A+IQd`lU-CrYetObw$k9T8eLH)iDsKk3v-=xQ?4aq1*Pr1>DW_yYc%A)D?Hj1u4N1u+4 zQ`qf)^gTuhM^hIerbrF7SM?y-t&Pfj4Ty!PVwcTQ%u!ch>c$cXOf5$KO(~?zlEPH( zLTsM97}u(#VJIVu&dwze)s#oZQ3ZT9QbOH5HQact2lk0o(0gSD|MxbinRdeUpPMi< z;|m?`P+08`#rpq(;k_^j;f?{=k+TK4eZJWB#T(5kUfAd9f&Z>J!Ti%2bV(S2kF*i$ zuYj9HlF&RVh>}ruJe&QFex~diaQN*o;5PZSU-`IjzlXOC%TSt+Mijh=C00kJ)t6{) z+*=wsH%y(vmnm}7Q8L=yMm*nk(jAjjS|RO0w~w!+@FXo-eL0k7=a$nK^}{rF=MD|V zeSVP!+qym2u{qB5vH5$N1qTm@bn6cl2Tmyj}$PuZxkx zE`zs%vRJ!d2@WXAK~z^B2`mMSSStdrR55IJXEtM9!s+4p47fY)KNxcP+qS6!jZ zJx8fSqKyW#O6k&MBJo^wA@dep%1ja>N8efWYzyeHUJCs=(nzMu`YHSP7~NX_kMc)_ zpw_S$z3xoySiBU*PnD3}sDxV{N)VG#g8R54I+_&m!BY{zP{aZgMOX31MNBe}9iLTE&Z&+p*n@MeD;9Zp!!R-!PU$gd`5J?n>?mwn z5P=gtp%7;UBX%eNw|%xiqB&F>A)X~i?toP@Oq_-=%sqd4%9>K9zE=PtA~IFJzPAe3x!uYNaxqVgr+vgNelG} zn)p+v0XL>LKP=Y8-_1r)RI`BHFMGsac0*HzFE09qVM-?f1#GF9Fi(Z`tQ3T9PlU~Y8D2G3%=l5?S{{Tb{IEbja!#i;Pe_T?7pIa3M(n3Uz>ye z&I1Fhey0zF)vFC$%W3JCw30QpdvcYv|02<-a1nW)K1DCso{?whJsKIlNN(mQ=;6U` za+GeM+xaE5_D34+yb?tE2mC0;yOb>7?IXFkoAi6;9Zg=~!h_kOIA<%5o-$p;`WwMX z*%-QSjUlp^h0kGx+giZ>&p^px6NJZ^!tb{UqD+D0d=~QW8lg&R7217P;9$2Q`lk(W zZKXbR)K|jq*lN_m9z)mMz`dTyoAGdj2W^AnwlswHXXDen98?WtffbgCmr@xhxRM6l z_*4wbr(mWn5w>jGabY3?cMkYrJk+p-EOqfEOGVd)TF>sJ{ek_Yv*0C-ay_P;Pd7<#^gNAUK2CXu_mV+V z3r*x#QbBwH-RsV$Zzp!s{L7cf;K55$`^=7&Kj&aAn+)7}G!ZIfj7M)Rkgj0~jYumD zAF@WqHydayx5e;wJ7o0PLzCSRJ}wSee1UmQ(+0b0En&}N0sd$+TpR;ZtynNRx(bC? zO>kq5E#_=+#kzW5R6GyIBjxQ7FieN~lN{J36=BxXBD_c{#HGar*xi?h0{2{m|I3C$ zLl*RmGa){fj>wrEXj~kL8%BQkv&j|eJJ!Oj7RWuRk7L8ic==-yPIA=@SX|gNAfCuI z(Dcx-UvjlPE82yPS{pY|V00z1iJzww-Ph#S@RamsKP2IpoAi2Ykj`2%80hd}ij(dk zyS!EsNN%Hqx##F<>r=8c_)BiSf+$j7hzs9T@Uwq8)N|KhWrQOR>N;WRcV{%7+yK!e zSL9cD{{fRsu)^E<(Su9&OGme-L0Jn^D0H*bTPU!i*QxE5U+RV zgJ)G9ii$E2oWBDne}-drfDbl3SdTqsR`_|)2o|#1s3?}lF}N2Z7=3@xL`{)bY0;J`a=P_~@(o|nh34n7|uj<1o2UWE=0Jtt^u+o3mUGiGh|$6VteEanJ>>g8}OFN}hPV=SD-6W~3v z4H3GXOnRJR04x#qtLoPgu&fgP}uGXjwUz!VtFAnE*vj=64Co96O#gkP*}PP z?xwYt-E0=ya%EQU3fj-fm4?4C_2~*FR2#jWi)~7eFKJ_>oMXZ-wbjy>xP;hF9{@X~{OpyAV|{&vHD<0YauS^OFXbfGJa(pT&u zx;{iFtG8d{dmcNfn#%#l`9RKjp~@s z^qAp)YtVdqJpx*Mp&1i}%c(oCjOib_JZTttlMaoWnMgU1jk~*Yp_q`5gpGyh-c^7z za=Ey9FcafQ!z<=Y%^rw@`dB#Z{{$drfj68-191OVJk-6?5&k$22iEPx^z~}!nl~f1 zzYE6<_ha3Y{qQ!}hse^sNPgFiB8xq!Z|OqM-wvE`X~&gAt$1~%4))UJI3807*{hk5 zVC}%`$q<~_wHdiATe!V3#!gA|0U4jq{W^-;{g1cKVoeJ?VQmjvL9Fg{GLhaxCS|wj zb?#^468TEGQE;TL)I{-MNWGh{o?hEe%h2oB`Hoo75a`%4tP^W;&lqJshx!d5q1 z1VnAZhP)uGZb?9JTq<@(q~mx{CdPcT!Re6;71w+?tSdzP+G5mOl;X~|5{SJjfa|(k z)b(d#LNyH*T8X&H8wcl!FpQk=$ES!eY}l5F31p%CVj--o%Q17S4hNmuuw}R#QtpRP z{Ob@ZEf1oxYCk$(^`h5cFOFt*qwn5s49a!kW>g0m+*9h>|>c27Uit9#Rwdi5(w4t%ErM}E@r4eYq%HV1DvOM}x(1Jb@mSmtj7hoFrJ z2@OJK#8xa^m5zMn3B^O%P%g=ZT4+ABtO}v0RE%Yur6@2jfzDI`9+l@J$tVj7 zFVnC~G8s<;5}aX?6COx_dvq@E(A<**;V! z@5S4b-7xyL8ztIZcoxzDy{p%KiqkrV`4P#61K9<^Uc`}d|_WfZJZF@{DQBOz{ zBXnALl&r=^=~2%Z`E7qn-083B%K|R=d=bUH8;V%nxeSL=)^NMZ@t#)=XArvUdCx;MI|X3*yCCnp=+iU$uA_+ycANE*!Gm2gQ`O-PR~X@ZlnGuPDLE-Nne}D1_7IJjfo)#)mnXI31LRdAZ5xS+x!3 zA7jv7v<*|6(oqnV3+0?*T&gdJ@1a`ETy94FSSMC~*$Wr(UVJs~!OksvpjFg`8%H`2 zG};Cpu2!_FH3LqK7^|#?_S!OJ%qqmgV_8hDr{JGi4BGbj!)n1se9W-Lw7=Rw{MCp3 zf_duwE$q9EAFb|YwJ3h9CTOsMt zjK^V(Xeg-1mqk^`P~Qm=qXM+pFmuPI6l6VQ;`1#4=S4STLeBvqW=aF667KXbA5!b* zYN#-7Jl4z_eJ(^(Z*Az`)=c6~YGXK_ewtm?N8CMUh-2V1&D=dje_oxWpTAGgH{laB zEq9#!iP1aaUX%C(9`Hv>;UB*yraDaU4C`@!BoLS46H(&A%!;j9Sn(kn#mp@5A~O#w zhVoIoun;f(i?H%gF*Hz&fw_fne3pk72Xhdgo&{c~4AjV{!R1Xdw&!n0zikR`_b|PU zFCWtZCE&VL4snHAD5f{TaH1VntZtlE=tkS}-6&Ywi5RbTY>I7#S#C2_>l-oeKs|n6 zs0CkJ1s({Nq9r~bVH25HX`KS0pjfmU1!2`(58QjP4*$t48DLYo(eEdr)*rAU!`NTB ziY3v)L&=(^^y+jHDJs;{JdR_On|_4YI1f`o#zFePd4T*g_t6+vFF9xR&~@%^(os1~ z5eB2=B+G%QPI2s1S3}Ps!nNhjc-tQc9`hv7zf44_WuwC_hv_%D;5(KFqnG&z7b!#w zQ?>7h7c(e%=Mx`9e zQ^n9en1|(YnJ`_Eg7QDHXv+x35dkk;+_fH_e-{pz-M`dd>Y>_SzSqZCqqc~(&XbMi zMK7n@nNehMy__V|dWkQ*hkkF|L+{sg(Np~n8d}mu=Y?D7@SjHNoUEt6BAvAJ+YreN z|Dh+NB9OOJM(}_!4r@Dstu+u93|9WcoE`Qf*_eAV2Qm+t-t{^Ute^R?WAe#Est6Hk z#b|z1gdfs{u=CGHM{h3vea(h5)91UHd}6=OoFS1^sI5=Ofj`+;*2-W>%TkzrEXUP~ z8mu*H#NFquaLen$61^_0(_=7>UK>v6wcw3j6Bg??z)rsoY5FxdqF;q&`^&K7W)TM7 z<${wx6IRM8Ob?4g`?U~g^7`PInJWVJaSRN-Y3Mh3GTe9O%OzvY$Qo8f6gyS+=u_o? zfn+nMn7VGXQ_lM~3J`3gHR`RjbZraG4sE8%f+o7s(@1-+H;`|A7oAMGLwvq}7|lZj z8%3957LO73{&0Znn=MS=-44;m87O&@34zfp}1R6S?@s)hOVR|`CJAxPBA$8P!USs z&&kH0N0~U^oPoTcbU5gyLE=ve`j|eJp^=8IgA9h+ln+0Kx7e7s3l6f?Sbe-6EA3jK z^Ir#SE_5JDxE)n)t>|xW#>ixKo3qMb+p}+Xb$qVpwm@LupJV9)+i1 z<%2j3UJrr8d2a@*yI@6IbpQJ=?R|CbwZ?N@!&pJ7mskpK`KeM=k9cVlt9llGW*)5uPSbG183A|6j^>9Se4);Axa=1LH_xg2|) zZD2p@j*q^PXxy6!-@;^+A5B4gM=D$^(y%Hk9gE|b++pU@7tUE&v@Q#KH)p~)AOp9e z(&3qyhL72)NZFYJ*%iq!u}j1*%~TZL%0zZ}9@2!1k+`o6@pe^+_)v%7;%4}1cHo;% z2L$)FL0+j9tD2j!PO1q($hpwp%t6EY=``kIIJS2D4)J_S|Jz>ULCx!Z$=7EaU;L}W(!Ws6%9%aJ3RRj*;#Rse5aLXM#qp%#lTYb`B{Zs3 zK?ZrXRP?TiCT%;&KX8M; zatPe^#3L(VD-N1($Jl&k?tGqvT9*{;XQkqjVj4yT)3Hk_9aGV1STm4{7J*d!VgBB2 zl?;#PiI}%z2L|VFN2^u}T81;wl$MJ|)gm<9+lhw6O4KOUq2^W-suSAL*xZgzt2XSP zZo!GVW}IE!gey}GxKmk=$5e+GlQk$3tH7D_C3qK+52?jju)dapuOH*!Xw7g@?OqUO z+koTu78xHK7iUQ;X0RqUo@TX|eqkN<5u=JE9ZIOSCAY&t#8#e0X@Ysg$>P zzKL=^bP`{{J}MYLN`kI^w0zce3Nd;|HmkVtpQ{*>;uRrUZh+o{=CEL9__6m}Aley; zE}v+ul!?WSkvMo%Ct%un8#3qZfW)mtbmu2S{6{i2Gqt=`D+zCG5}_5i196$#ai)12 zX7z1_<>Lfw@kzqb=jk|6o`cgCg$%wb#f9z)3~sK)72zgkj%mZ1Iqfic&77Tq7U)+s zLo=cY%4-@SC)0q1Kk6WMyB4wI6{y})iqr4%F&>o#uAeDz{1=BRz7ULyd10yCdOTy- zXDN1OvD|JPWED@3u_g+*=ue{*i5=1=?TZ#;d4?OMl)M+sh|ai${1W-(}bMn zMnrctz;x?h6$q()9r8R)RUa0y)c{DSB zA>~OZlITJ$YL;F>YI0`8>1Q#wU&ETs^!MhY0}A)kViwAtb^xy*V%j|%^z zk9jjR%S-@8=OiEr1xCZvhK8{*PM)@c*~$$tulIluqwU`7@kXtlFT79tLB-4;AFl=A z__`pZJPF3c=MX%t4rTl;4A=8243CS#@lZbk4@V+!FFg`>Ribc_(Xyu5Q^Cp9==pVd zPzWq$ykWbrnxh))Z`Z@Wz6EBRS~2fz3!W;pU~gVCwtZ-Vg;NvepJ>FhMUB{()&Ldb zYN*+jLEWI(@IZn?wfW5{QT5~pHbJiy0|JjTaQ66~p+7t5`-P7d04c-;`!!b2HaYsJmFWXcoN|Wr1C?-yx*xssAfrlG`F0D!p@vj7HDWn^(7}{PQTumA$9#Uu-z1%8!vvqWDrM z4S{}TD16j~v8*xHJFbDvHaqZ@IpAXVI;35225WQ!{(N=CaiL9!RdR<8d4l_m7e2}R zVAR7GS8DultXVf|X;c z*!oKkM;Wc0|DhR1x-8JM%?dkg*1~47Jr;d*#QU?(=qqzUvEN3#3fYW{yWP?Lp9eDK z7$2plH{;Flf&61%aENcg^L75X^gR??hvQK>nt}+1!?x;Ya%_Jw{xZ)-Tlx) z!Erq_>+cb&P46dNzAJRK9XG8VxsuhHeCUHevqT&3LWM zcnSkNQQYYT@Bh3ZTo;Jh)zRQ)c#c#Ci_cujK@!75|GKafThCYG``J2_NHKNVxB(A5 z8?ZH<>FJEd(|@@E-ftVgCDe#crcU#!R%5x@P6ThxM?Hg4o20j6mSZUVGCXkUr~_0d zO_4amPSs)Jba+&aZrhm9+fy5eTQ!vCu1%#z&H_?DT|pbUn(4J=7sVCrBhI@=sZ_j= zWHt@b-lprc^5tE|%ld?3#a>Y6?@zRIe1?Xeal`e#AjU7vhyO__eCSz%xJG4UmuMp8 zo-X1g3=zI*75tkB9xu$WVW}nTLe|1!zXLj>oDg=%8Twz=rpqX`@Rti z3^(Ihf;*!L`k}@p98VceyYxaT4&TmV>UTcAzb?UoujN?9UW>n}by#z+4rPk<7)z*! z%C&mLE^5F)I5VT2Yry*XjX3+D3L3jhQMWx0e75QM!f1`RUIwB&ZzD1_*TUQ~nEXnFzq&B^4 z%q(%6x|iQ4ja857)w^+`u6I=F^MewEXF=gT4~DJ^BC$(^sgII);3Ez5_wsOjqm0e3 zG!Xbg7cmotNEtIi!4nfyKC(dDeH&zdwS$SF1O9Gv#ECQO5W~iJbW9mRco!Fqqvt7PVWq;6up0puCKf(hf3^Je^Y zsY|pOKEnWKi&kOW%mi$|EfA1k4Y4ye7-Y9athpWJ^X+l>mIKn}uY-=O6DAm+*z254 zIQei3N)#jE8@nBgpE3IK>>Pa3D#Rw2ow$@-iHEk;h-RyS(7_t)_Nj%rXdNalGMY1U zmLydgaO6n?bTz6V8C`;zV>x)q%)WK$Q3w$9fnMc0urXea8;s}1==liC^2uEKVx~_` zy)I-Wv4egO6w^>_Gj*sOqU}#FP$uJT`SSK2c?aF6q1X3`<^O>8y?j86d>>N5vxoG{ z^ATOHeoRN$pHWlbBoz*Qq;1Q8(3a*I+MU6HCPN;SzvIKX4qxo^-lSkcItx=y`S_Px!t_l>XFpPjoQf)3kFSQXYYm)M)G}U~I*k0OgUnbx zeES>lcDNEkj~GA01BSEfO2)^waOgFAU|YQ%`l?yrtkA^2;ajYX8DZj^XGq2Jo|Gq* zK{w>9Xs$*Nty?xg<#dmXN?uaS_2Dl~*(`@Ext`{6xtwztIu7KlF7!8&WyAa9LFVLQW!Z-mw66ZL)ZNeJNDF zY9eaUGMrd#45cI!^j$QCv+!!X-(Zf6CJQW>T!Vw!R!VfX_LgW?H#S%^MUj-zcYS&4&?gsBUN7n3EWbMc(?=sT`KV0 zssk6R70`QOjA?Viq1`~VfGKpt&G30>H4f=nAgXc=bbc|Kl%xZeb#FxEsx9bdykIZg zw`0zaG;~uAEOQI--_TC@@|7dir~(ZUl{neTaA6Os@pX1B<{8y7vuZtTnD}VBRAT#s zA`CiZVR6TH9Bm1LWuq&`YS$pLY8kl78E?On0M(c4(e#5&q)?DXw|XjQ+l@V>IMqi( zLU(D0<}3P}^n;wlzEStyY2sM@i8hW*(ZT3<#K-@Jyjot-iRCZJ{q1vx(|<{c?_QDM z`$?*Q|CZFJ-qY!+4`lP<6FvDbO|r_rX$j+D*qgfGMNy(pSgZ(sV%6=i9 z&tGW#+*dkY_Kiw?zf*wz4-&fZj}D~q;8BAxW}Om;%0p>5PAg%Duol`>4ROJ&d^lk3<-u%ayL!uDuz1mAvqDkss#H4@8b@1YB#kq9Tmp6@{}HuTCD;ITT|hGfOFyRY6#f z@%q#^pkcZJ;#Q3)ZfeBr?~O=b+k`)@O^E%~gem)G^heh~y?7_;_AvP8Vj8}ki^ZLv ze%PnA9{K*}jJ9onIi^YUbFz?v4%X4*ZQW#TdXi4@Um>-752>Nh35xX$(QjFo|Qg9(lQrmhs<3XmKmpIj1Q_@_7_dOo&~3+oVe)E1;b8m z?D>xeQi`)t5HK^M+u7cnhYK>1phI@KT}JlCfGw)cs+Q|Y{ZiwFYJ}@M+(zdoEZPm z(j^J_&G3vCm1*#~oQ33{dCV?ZF^+F6!~KFvd^}SN!4Hj4k!i!Pe{I-wt_^20+pyTO z4JiU`n7Gvn)3R2yt#1X_f>tcfs$;aJa->x0DZ8I0c|KA&Am5 z!uV1>7cOxkc)oc)#<$GFSoj<~Nf5$tssJA5@!@_Mvl~^%joa;<@b2Ztl&3J_MI^v+ zQ3eGG$`Du8LEGpGsF#@HxQPvJjjuy=kQ*evc;a-TKRkHCFsn2MT?@8D@lFbUh-D!6 zdN%q789k`4nDM-n!Sg^RD)!XkT5BWN>sw)1(TSIuT{yM76D4w;jL)zGE5$o7x3nGq z@wMZ0MjJ~1wPI~CqxUeiJc{v-aG%IVZDb1EoT89A=z|UeXI$$v$9sw6B$s=ZHvYUy zC4u+o((ow#wR}aoCqGh<$}e)b!HzN`ZoEFq3nMLl?A$Gg*GuNWsBRw0L`5-KBn~6q z1;}Dap=-YcuF8tx-A)k*an6NytPs}y$B&o|vr%!GnaNUkk?1lPjamzEo>vYp$5k== zgdWsOjbImOj%e7y*mDC854uBHbqg9wLNV`K46-)vK+e+I@G(dCOyp7kWmuM^Dnl8QzQ*tx{SWS_!-{E6hUCwPQ(n9W5lu==6CC`kKuMC z{%b`_cqb-*?7_OUUYsoLMMA<}7o((cW zoKR@zf!r#7e7hrrD}nP+$0o-3MkQgQu?S*=GML;bkJF!(uy~I$RL(2FU_=%sUl+kv zXaP1ViNT-dA=*_4$x(c`O#Ha{N(6U{7GhXmf!X<0$BT3Wyis9cYRCd#66|5D<%-+i z86KlQ2-8{7NVnYqzWg-YoytO;V?KP37URf{GUUIgfR{}T3>iElw6K}^oHk~^wiBi6 zy5WDS7aHBY_!zYp2UheTW>z;0uI|Q<(k`5E?Zgh*4n~J>L+{rHh&Wdv{%|QqMDk(6 zctma5w?oV~3}Z{Y&^fUVL8`aOar-c>xb}=>r5MZ?{FOeP{70c&obcMq3uk9REP62) z2hNFNb+aUflNTY(T^8&{%#MN-6O&(RIP*vg2UB%$DOVj&N|)k8wH!EF7DIB6B-9Vi z2TlsZ`5Yh20{HN?e=gd@CE*+{1I|lIII>t1TjC5LcGCzK6;^{=ZY_ofozWBEj%0p+ z*!6}()*%7Ewx+=MS_ZB!%!ShP0?b=qg1?u`FuAf4_j_wFAltxrJDX9-(~k6QUFa_F z!p^czB$Rf*qqrUBg>6vJZ-q#13w|+t>jdLtb$nU_nUY<2PenNODI0k$jDFD(2P@tn zybRoolxy~Qvg0Z>Pu(Yf&k0hy^Nv25{-7t1*%>~M2hBEu$lwxzXn_QR4lYFhBN@z} zkw>5cv%6)j4v{Er1XStcOy6>>c3lZ?dwnEXXrk6w8OQbI@IY-5KFf(iNNO(B=J8{o zcs3SR2_vpal+k9SpngCWxhItHlG!<-tGcMWvmAFsftO{L_&L`Rf+cQ{7V?31K`_ku zV&Imu0|x17sI|;O?WJ7Qt}a9!Q->O`3l061XfUc}=C1}cFK>Zz3Zr`+ZU((>g3ZDv zxY#$sH=_aJj0U~^T^%xI>hS$?1vIaf!1qQTYHwwrqcssCU6DxX@x%N5u5dfD7He-_ zppRa6sO;-g($9TQ!wY|tu?r`Jm+)ia?OYr>A^`zCX)I}xgW;T|uuW2f-#aaAbJIuZ z<>lx!FlMwY6Fk&1!yhptEcvSsi{}~$xuS%!eKI&!E{T!t^DxVsAFGb>!ZbnVz8|&d3aZ1Sd$r(Y)nfVX8U%^dpeC^z zS6@}(w?h?FPFJG-;7%BE7h=RU8$0$g^?z0Z;&+GOsJ$1yGCL5ujaKNB?kD5>H>p2w zj94^917H7A(|vAu?H7V&PRKXCd~Ch+y~tKaP#@AaD&oqz($<%l;1Zb%9ChA6Xl%b^|x7w4^b zXq*C$yXnyM&xYIYJY*CXVZYo?j2tV6qH_gyFkIqLIrFWW~ET&BmSEN>-9tghWLu zNtEaQ9W7}|OM6f4rQLhp4?cRvIrq87Z#^GIkVf?&8Xwt*R@?QWN=3=b+)4BM!hqjE`R&eV~ycdYbbeehuhJ7y9qRI@?`4i z??JC5{QmWi`4l=PjM`Q%r$v%?qFv&Jt#dZgsxh1Bn53Edu`-oPgfuehoklrtQmN<3 zR7%cCCDo;=w81fzei@`v#OD-xDe2d|qvEJ3T+&OltR#OPMoi-7jB~t5CEkr*gga1S zLlwJs{4CR{zQd*;ka7{OD$;`HF4W;-FDg7|M1pkgzYHBoZp+3{=}|lSEBUj$B>d`N z{AB8MLGmMYm-fb){*+oXi!O%FAoV^jZTuTaV^WsVd&$$WyTYHm?Pie1MHg!D zBkCdLZ45O(#U{AiX0sN2U}Kq-uQ0hQdHCu|99n-`ywaTBUbUf)a|P=A&Vg*#y3lkr zcUrXFgEkraQeovBIxgWb{ZfM{*uswvUUjGThzTUqv89AMGrH&}c}M?glV72f!Qi1n z_nVbztE)0ikaoE5_8q94E79z+O4Or9kxpAGQqxss>YuAl>!f^%8v_k!_*)Z-uNq1Z zA|>A;8%tX|N^|x$7fSv+l>$P%$@bbD>Lvu!^Sw*xxYjC4U;iJC`Wiz;S{o@)(gqL7 z-AHflZlpt!{%OU$II=E@qu(#$NNyTWBfVqjjWk~^*IrIPoI@!lYA&f&dQr-wDb!m_ z%E54y(Z&U3Y)ui^(g&?9*x)0pELWzXlCN;jD+B7MHiUK#w4$eL8F6<<${pZJD}GKT z_sgDSy4#;rA_M5*}oWHZ1ug8xgEPO`W7&X1^x=ah2v! z$@^`QCV5gV%xJ+?E7~>3j`ANl(5`u|RQO~%70#FL)3bS07#K>$FP2IA1@-W6B6ezd(8k+T5Lfzwd3f;B74#{ zcA*J3+$kc_lXlBy(X~$!XIHwAEPcbNqtgmHbw7w=BYbJR`V{(CG@iK4Xo_kcLN-B$ z^y5!A`p;dBe%w)^6+s>7{U-&w{`)sO-}5_z>1S3$?^uP`E7tYoTXsMC7n@n2Ou~O% zsK0*?QXi*FU-bIY-5&$#WUEw{hFek0xpCyWz=5p!1&>(JjS5dagN(z8TIW zxEOP*1(YpvTP}~ zhq>fwFd;X|GadR!lhQmj=;+1Hbk|;ulc2^l=ilYQ|_<-!Wv-V+5I`OM0+72iTAP4Q&46>+EvPOJ=M1n@yMY3N@GRB!8zz zo3`|)7axYxgqhYf#4Ogdv5YybuVtqqo7ngjcNky%hK&pP!|qu~ zI8UV}EnA^OftDuZCtahd2gcCEFnf}jy3xp&UNm@50QHgfm4FT_DAO^N&TXDUzb?$A zVcjOvoay$Io@qm;ZkbcN!9a58U?gd~4ahcEm#UxjmVAX;R36ox1~qo2;-2b~f4vhO zOYcN04ycoPlP0x2>p>-oy=ji2E*aPu(9IcAR>@K+M_~J4k|&Lj=8iFBC)LKa;nJB7 zai<@1BtN^?99lgkm?kBJQpvSYQZQaf-b)rz)rp0qA?d<`Ji|!b9Y%VRjz&+ufbv)Q zQ$Gt2DtYZH)l>&M;6IKmv`0~O^H3U-XhN%FwlP(+B9?jUAR7?JS@noZ%;D}`b~T}m z%^UfXMJ`b$Z+~@qzFdb^B=#qT!V#2QGmi8xNx2nIXOPveIV5&pM6qv{OFP~|vKl*& zQs#Nn^UdzW4o#$-RvCTzXGN}qhLc6cK=N!dp>+!VsCaZAIu|BoZ4~K{)~#M-tJ#xg zPv}PHE^1OlS$8sy?MavYd(oOP+BB%AE?s-8N70Q^KP&D-C+A6X=jvfnPaZ{IipNo9 ztphE*>?(PoX3)c*ezc?80?VQmm?O9~?6{2)418AK_* zbLi=JPqN9JLa8Gs((`5+jaxjL(z=+@3rXKK&e4#H*Q7Fsn|bWn$TAk6c7zRn3wF-= zB3roU7E|x=oK?*J#Kt=+P++wNZRxL1mD>i>Ri)ANW0^e}K5{2=pGARO(&$`YPW>J& zq@p(gWc*#~Cmm)`KTTKKrRP9>rTnN}7Ne-o05f_wVJPvy!IYLTfaa7LlZ7;6DZVzK zmd<)qG*p{zsr9B+MZKxlIBlxBu1)Ssbm*O$E+rM|lIb`-YP_x|<*)Rmaq|b$^6U|` z|H2r0qCli?Mv)Cj3O%fGlrV}8$lob4I-Tl((I?CO&7zrunp1M*{GBP_9AaD%iCMU z+-mBX;_0)jy5$NBet3t?nDvr9|E5I6^6oS%UdnQEGpC!qWVG#`E4h{UQYXp(Cd!vn z(19@OnlEW~l4p_6I!{tsxYzaL2N zewt87$A0v*dmnmjZ$Obd^hx!r9%W6|qk(&*XYsFuCwc17)k0HLfAs0sl>T&} zXc*1=I*NKb*-=yO1X}lYGFh;h^elZAb(PMB+Sl2nlscP!51vh58zhaf-z<9d&7Yp8 z`O`g9e_EC4LCb!*Qs^893OGHMW|@v8uT)cV|I&|KymiT5-i^M8CNTMsR2KU(Q{p@G znMU9qcD4Tjmi?rf%_^#A{pX%$14cBn^GiRlTB)a~N9str)4_D4b_}WBb|RmjUR0qM zLJv!q(}A^%Xzi3Ba*}jATJ1jcxM>De=DX24sXn+kj;D4Rx7ar^`@MR2obJzxSt~{~6Po%)a!qq7SV-Z9u>8>QU4e9s1Qpo1#p5(I4BMw53U# zYR5|0Q~6-h?PEbMsW!AmVLa_!Cgl!1o<^N#_|lJWel*y_kG!gUDWSVB9bMu>&o6pY zuVLOaA<>Igy!50uBi*ScY9hs)meJ&{qe;(mI6W`uPX}M>OPonha$M7yj00lXua-?x zR!s`qdw3g*HqK$5>G^Dgl9XW+zL!0Cbbx(TJjR+n-)4o+{;=Z-% z**jBa($vNv`j@txK07U@7YZSC_gDa3S~`nP4)&&_5}v$2+l}(wo$0BhkE%XK6qh6E z$<~aeLB3n#qQlvG{|MNGJQ@9 zXP+lVu{Hl<*pJ%@Eb?d)`>}N^i(Hh+emLc^$o{+7y0|jdFZmohoAH{BzNA8G(|XgL z>jSC6+nUzjn@CgVdXd7)0J5{2MN!J$RC|6peM@(vQ9jNT-p@h0A2NDZX+uxrt*F*` zBqhs7&^#YAvi>%d)YFI1qv3<-=(+xsxWJeutu>-Aoed~ISBIQUd(r3G?zGdnD^0xB znLY-qQr>ro3&~fdnpjN=4AG|EQ;eiKJcuIn%*jN-nyx;RNx349^gPOmZW%byr4thg zeiP})Z$~QI;Yd5iIa2D~2^1SOfjarh=)du!>GKfj-gF;G52ZW_kB7ae>8u6`l`0f6 z{1-ES;=_U$&u2wH7O~qKSFz5UqFL$WIOfnZkv(cnVe9g@v2PpmSoqXh_R;VrTlD%1 zi}3A4M@WZ04;n-k(z#sn#etH~y3zQ#60Y^ro+>kBG~djI^czRg70Ii+Ny02-+f8U> zxe;YZ8DC$T4ag%vpQ^v=Qm;)qv{Fx-Zj|?;(SkJVG$`@_4Ec0@0VU1dP;B2CCA%9PIhXF;~>M$+NS zBgxcw6cw%*MLK6k(N8t%A3|5pnUIpy!$(N{dv@2J zl(I&HVy|?fB|e?V_lcS`L#dJFS5@lspDJ}$Ri$sK9qF--#FGqACe<1Ra!dQkQVxG+ zm#)2G8tu>6B;5yW%h*=dJCy~m^3?m)})6$dXt)^ z0i~{!^15P-C?TOQZAj`z>$VzGWTpv4DGTR+2E3O*=k{{d;G}_8U)RUNhZU$3$;dRX>l-`M8j242WPa zV>hr~x3ihUh=Xid{8^TN_ddI1`HihR)sZrm^q_ruQvPM=0Ghnlh`zqirM!U>=DeUQ zy((6tw8u)cc>GUxg1=)WZ(p#CH!oRC+bg#0^=mfgRU4b~@(q){c*}-8f5-Hmy=RNR zypD?@RTp~lMxEyE?n;k*yV2a9J?K$WPnr|ciyqi$Q_mMY zXikj=l_yAhp)|Lrxc_E@K7M3;>kH;(d5?XPGQu9->dDSNGGG<22C$5eW^C;bYc@}! z$(=g6uo2y-Gi_ZzCUy*Af%%)*qS_s7`L%;=_4`vSTFNPj8}ODn?oyz}iq16hL^qly z?N#CTmC5+QA2wU@lavwpoZX*rn;8aQVt$DY%wfz4=F*?DDcu^F$L~|j_vKkO_gWJR z!bP^IyqSe(Uu9hiuQ01KO>9ovNtW2Ho*lJ1${zYuvd+^J zt5YRwt*c?WVPHOn=UC3AEA00A+f3i$30wO6C0o_}mPPOW#5S&OXGyNVnM;}iP5!Aw zQ|GJDj7zGdy-=CTM=H>N-`iQ~=J%{c?*;p{|2{LZyTM#9U1Td}pJrBG--WSiD$MVx z28%*(wsVIOldm4k?$5Ab|3*l9Rt$>!_bXUnHQWid&wS=#9j%<6s{dpr9XD|vLE`3JNz?Ppim<-qeS;pHhNgvwca zTMaA!-6+iQzb*8Vw+ZL^{1LdI%5*1*MD^a#$J>1;chOC8b)zVlt zyw_IttTvzJ%_?V8JJd1Nva>8#eu>qZHn2OH)$FH|RI8TkU}Nv3GT(`@>}%j6W-ZK+ zG7+8GqCuYQYSnxRhYOc9Y3o^Bc|5BPPiAk%WU#*fa+%||T;|s&gDrDRW(kYq+1TOh zS>U>5EbU?dJ8n3WJzwO+tZJQErs53t-FX)COAluJb~rOOk+l8QvFyvOO>Ds5WY+K} zgWb;C!9LmSW*uAiGsBftY?Rh<=6bk+1-Lh{)$b%8{rM%f(W{A_Z#vC-+KJ4s@&qgH zcZ{`dt74WNE7_XW`&hri9KmJZJ|Vp7xR7_^f^hQmJ)!N=J3*`UpHQ$=jah!}#?Al%j_9@Dnb#-6NvbJtu1Ac8`N9JcT`%Al6)!Y(xDm z7CSS`G;=ona4+`0tX+tk^HjK%*?}$kqs5fx4`kU**38VpnVrt^VcvfiviI}Xv9q*Z z>KDt{mlpx7)7Y8JB*lr@JhEj|Mh$246O7oJ+uhl*cimX$ecBS<*@qqOJAie(JCylk zS+eqJHcX)_V>8YSnm$O%%js9c2+f>J@1^#RJvxf0o@Ckz1BW9SG$VYzdOYA zrdG0#RpqR)_dZq_S;|)3-OZ*5MQmVE0sEu8lbLUsAq2gP5H6Q(5f=LF5LDGGgyQ;o zL0EE0xZC%Eu&U*~uvtlwdCk^jgH9N-uR6on$p~AP-r~gOn0qqwqy_Bv%jGP$(`vRe z!k-mC8qZ254`!vc8q8quoRl@x}8iLxe8!~NOCTON43fY>~g2C1Y!n95* zk{3^p-AXiP`R5&&QkD}_erwBA?1!_CIYz9@`|eDAf-=(^{Z=q|d`0M!a#UF9bWr%a z`>2qhSTA&&byCQ!J13ZEHw*ig-w;MM-xcbIJrRcdRb;C#^=4=DhceA2jLjB2Sh;2> z`}N>I*1vT#b7mRLc;aI*sYd(pdD?R3`h?i-*5T z=Z@EJ^Q1Z5v2vyr=D2#pdHg!)?MOq{tRgI*au|=T4#VBxFu2lTn7ulLotF=x%h5wv zv*Qq6#T-J*_k$Q@Sc!F1ftCgPF*tEA{yR{Lt1TrM(q4@9`bF@cv>Qe?6?io8D0XXc zEKq91pbyPxyLSua=O1H9Z5sxEZ^O-mCy3X+gHfd|cs9BT+s=p>KeHAepB==mh%&sm zvkPAua`1O=Iy!CL0F;2b)9!X&c^_hj}!S2Z1@cj9iZInW1$SFlpqKq*_*4wQK=a=`r$YtTIXITTKKr^})?&f41{L#e zfp57(i$0=1GMwlZh~HUp$QzW6F*W-z=4>4vde+2M>9Ob&Vk1?U`Wn!Y^^+kpQkGjaCZ+bd@8`TG$Wy>>TDRr5jHr!d6bTMv!-o8fya9edq&;M>U( z*xOVh<-jq7T`q;!@NG!!y$Ims0|*TZo&B?Wedkk|Ut1_fbw@+kTx_ED}r~yvt%D6eI@jb zIEr>uV75s%4EDv~@LV6*EHOt_lrns@s`$=Xw%pq(q+S*s!XFi%qT`=oR9q8bDHA3=w? zhw*6VLC7vuK>yQTwD;PBt2RYA#@$%Ny%beOzNK}B&Qige|%bO1-RPa@vz8g8Awgkk>Yu>Q+Qv}}=Mph+!O*Hz*C)B`YjRR-lJCCF3Whmivh z;QYkHm>+T!zZ2__Q7*^uOAR>v{tVW{orSt(1M=^mfccghh|=>p?P)olq?IDVt_U4p z??7&^99SR|XG1gK-zyzocq$S@lhLo&7S!-fa0`t`&e&L}_gar9hgada`%-Ls7mU!P zxo|M`fkERmd|ogK>jhuzycv%B)^T{B0}xE^xelELErTisA$-OG{a&<%F8f!_5n;%ID(Od zwJ;dZq4MMu-o-ZIX8&e%n9+=hOV1)XNsgAXYUrFUhySBu6#dD?OWkw~AH5mvej70{ zB^KE{1}{Hsz!>8V*fc#FhdQmtw6Jyfa%nBPl&-?{@5`{$B@BT(0%87c4!%j(16w_? z`NSJ`75SQr zq1r=+0IjZAJNyJ+Fnhl= z=-%fUu4~*w?w@Oz^Y#J;+&hKW7f#^di6e+8FUQy&yJ6_H3o}=jO10tu&K;|U(ybF1 z_v0iYbkF0UO$%Om-$J7CEyNtSfOWP&#F;8AnO2J6`?;7CngXA18?Z-tAxfQQ8(_1gZe4Zh_pF6(( zbw!1;Grn~7!@|=m@v(jrOm#D{U_~(sFCWA`Q#o|w&%^yuE7n~(g|)x-WA%V-c$2>Z z?VrZulVf+>vEsb+xHaG5xWvwAaXGhjQ^JA;W=M_l!9C|#gnQ&bb#5hghMk7x>N~i; z{VlFpJjIoZw{a!p3NEQM;zFSu=k2O-?$&-ZMigOh`c90r+=CNW4!~(y4X$(pd=EC_ z`IIYI`u-Mvr94KDkB?CQ=89COMd&;|gfkBc5q@VY`rO=roRR>1++mM7H?8n$hAo!1 z$nbrNJ=R~G2(?MBNNaS5uA>JEPkN)iqYr}NiSBDWaD3!+EO_gVuBC1`Hpdln^<2>T z+C)^Y^uyE>tMM~B5sk&!$UUg_3I9UUK)x(+(uRd&7;9bWjFY1h@3TrsNn~!sV!WUV4Avxx5;Ns{1i>$}Wt)n2T<2cH?#DN}L%|i$bT9SQB&+GvjZ< zvg8rePqiV3zs3*KoAAv%iQ|d~VYp}q*0pXzZ9*v4&$5S|l>zhu)zSN7PxOe=M^{x7 zsHK}i+0YUSWj6R@N%&sxgsjn%V6oc;=exTgaJ@4W-Z~-2#|a}FC*qRTL=t?czSk`M3Wxuy5m7%t{PFr^YRK+*E}6i?zsVZpPB9FYq(=C!D{$L+1P^ zcz*3BtR`GQY&Fm_pc?%$_hMPcd}wsZM&^Yg^hv2e*_30j=zbElEf-;*(Ta;6PcT#a z9a^@&!K3Xr@Hq!i%HIdAq6|!MU59xyrXs;_0G!W#;_~6mJpB1%KA_+$e>+qe4x2Pk z@U%C+v%Z+Va|kMaSi)+AH8kAE;KdRfAk`Mxm1D8&@;ErZw}Y{!fS+azQ=iVnf9F?V zruSyNa?OT>`ayftQ5+a_8WZ%d;YQadkl((D*-=&aXP5)FejTo5v zV|WIGgN{Sz&_SsED#3S$JS?2C9Se`_M2OZNgseD#;A=+_G>RiA<1B){Tt?uGR($_( z9fNj?@N%v|!q;rn7H-7lyg3;5#Tsd@nz-Y9kr%Do&m%|G@+oa+_<*8o{JYm<-mLk7 zmz?>-xm*RNx$5ZhrW-7e^@Q)n-iUSA#eM?=Tz%OGzmD{S$%g)zGIE zKjRHD{m1eA?xu(QvbG^cv^is|d?mg&ZpB{9GWhI}qjSgWXb*gWO^NrQbKolWT{(xb zUpX%59EJb5a=e>egy`68H0?}>up=8sa`R!Dvm3jzr1Oz^5Q!N_p}6e?R&H&8p*u&| z?sCY#=RkdO0#1w%fp?odzHRG+k&&PHqNY+Fe=(j%$E5Pr*4cd7n_WD(a4(-X^Dy`6 za-4hMBzKFrz{M%o`2lvH@3eTqlZ`*{sGdLgXeC9IuTeq&&z+FBuq(ps`{3X+3w%wO zfKjI2h`bPn+1AmBQA@<`TibAQ-%eDSq+t242t-#-K}x_7lxlZCXjU!n^3R*c=WmdA z77xnKS9$OmZYTNNM;gf1w?(r@AUZg0hS7oojE$*=XTf=_Y`urF>(UI=avti<4UkFC z-m<2HC_Y!qi(@x*ozFj57M-ejc7gd6Qz z%Dp?T=ILSU`J%I%czd_)JkF<(YwfP!MQ>}lh4mSJJi3LuG~MT|J=^%tPl`Af-VHgQ z`XDfTI7WXOi;m$gxcSiwrOSe_>|+Fc()@5K$_^CL2ci4_@_>!tuKqDRC`^rqKB4;0 zvmeUl6dUpjJ@@bz9)Gx^t0^?!dZ6psXl&2Qgh6mQipMnIi}N)!_Pv1I_LEqA={Uxh zRH0YYe!QGkj1xohkgBi^@gtJ)a$yQwN>fq(SlVm*q+_Y~Hne19U{v#VWOm6yNY`|H zKOBSZm4Pt(J`wIT1d9?oL#y;A_p-_5{j+SiZS_~V)tV1-GoejBQ2Dvs=-flOQOX^; z*7O_l9y(X#8u!l2=l=d6k8aTBcRE^g>q#?tT51IU8j{HSsONIm1ABR-doAxe;XFV6 z?;c-U_km~CDdMws7fgxJ!u}RRbR01Vvyw+($}Iy7{;LdS$J@Lnbszt9ID-2+_U4^~ zeC0DI8Os*`Qx%>LI4`GLn|S9}_jyQ^9^5ugz=))!=+cyoKa-2`{>m|gA8*2og(u-- zZ~~Vv9D&2S3e*nYgW>n{QIwJeZ6OU$BNK6A`etMd--IyL1UNj3gTe8Q_>vKe3yWh= z;~5 zo9vJ*)6Qy@efrW}@XEIq%J5|c8U>A7`eS0X8ilde-FLSt=MtCVn#Oi zyAsN8{ping9OUx99!>R~Mq0`W8lTE0?lBdrr+Em*|JDml9@~ZA`}YYWl^}$AT^9-u zycWWg6c|1JD(t$~DzvvA7baiJ7j|FXAbj2KEF4bK6yzQE%N91@v77nDM)uhMsO-W+ zRUtQhln~)CR~Y;&Ug$kzr=V0JU1OCd;k3#r;oQ_EXR-)X|Pu-MRCtMe3>iNor4Fk z{cjnbyB5PbZzmF7WuetN9fRYOaOg-JxaunO*)tDo9Ncj8u?ZVdrDQr&D=CK=gmYla&*M@I~svA#B4o|7f@%YyebQ9H>|=~Dcic{4TSAR+y5)-S$?%Qfd*QL*HUFC6@%ywe zEu%)5Hf*mjIc%0WDy75(B;rs<_DeC z6QH-=3_9`J&`DOnsKHlx{HzMTVEA^P_C1nM$#mvgqjdS6;wy3o`CR$J>k9SzruUN# zNX?T?+TJXyyQm~IyBiAku38J9XHFATZ-)sf2Vw=C4{5^1zyx8++&RM6FBU@XrVc{% zn0>Mwt|$AG)PACKv6tN1^ql;&|4#Y#&T4X{JX5=;p8m2NQRu(Ng{KmyU@3HXv6R3T@iITVHG5#gT!^c&Kx>t&z z(mafLkb=(IF*v$26kZxr@i)pE1#b*Ne#*Fzb)CB|IKVA><#FB93H;BiV1ChgJkQ>% z&(HLDEe~wjA^&Y+B+qJGSReJ({e+5J2fJfoJMCO8f7o@1GL^N?@{(Qe8!Ma8BUkp{ z)rGQa!)xs-vs~)y9+SK=wM;(L?1P-1>+^_eftPIz;Afw^^Nsz7abcb+*I=jRU01D= zzq->{9~EY6XS|zai8J@fzTK{qiMA(YiMi)xoVfN#P@(*9m^WClHxNzWF&z4i?Dm9^o~BsDR9gSPncm8R%CO-!w+WDD0&%jQ~zySbk2AwDT} zFSj(w<2p4-yhCObzgj<==RX_ILo^0*YpYINtFBe<f?y|~j$=8+dHn^&<@meM#yu=wyxKKJf3?la2~<6k7fqu?kOdOieHRmAg8bj6c@ zO~o-S{lxMT9kFkurnud;qu7)E#GmIc@FMv-E{{41gWN+%vn_(kopeO5jm2&2V4Pm< zjPhDjZ2Q>-e^%b+H^UF|#;1GuiJ3cjMdLQU%Qlgxmq+uCc8hsiUtiv=?8Hx=n#eEA z@!?m0hw)q4>-iGB&3svKI*&M-$D@=>_)c+^AQt^PV!bUe>LUILde34Zb6ettZpke7eT@sG(1 zc%NB}yP4^8^*?R$g*yx7+t!-N=RLl7;)}{RyB(W@1;zDUc=O4BT=S_P3_kCG*$U~| z`T7Gx$7+eyYln(|*UH3%x{+enAA`kA>po(il|99hBWj}0^Ir%Jc#g=2%~&PvSmUI6 z)WI$fjaw6;e{nhXuABzTz7{xfP7~Xo-{m_Z5Alo(R+?)D;nv#E-A%0IGY6=^*(@FKjC6fsJ!D5#3iE>mI0KRlW+s{grTU%wK-} zXgfa`@_=95dWyfS-OE)Tr|@Z7!8~!kC65pJA^+AXMV`MmvEHL3T6m?R%XeK@$ElFz z68=_>j?NE}e6Ew2x!6Q3kcii@Kc`X1ry0>K~2Te-dVM}Dzj0>%wNX0R2`WDsKXCtz^6 zJuWp8B8(UeSIW?EVH`qC#!8r(4QiUL5xs0Q6o*_n>Nfug?26E4Hx~OlL z@P^LyxNrCstFwEHiba-U;(j-A)yf6pz%8C)OM$bv;mBCg{M>Ny-aR8R@l$uvMpaq7 zX#EDm;#-hm&+$*89D(v|Tv{84rQ!1-ytRX7ur^Mge#8eQRPmuZj&but_1yAI10Q|o zEVun|f!nFHaHe;YJDT6+NoDW2roAd!EwoYCcMt+KtzrJx5$$hgqTx;;HZ`rp!R~9I zE=FMC(q(8gT7-d@gR$Yi`FJ>d4(#svA}7fcEB?*I>MBpH4e~;?zBe{r_C|cH4-&0? zk<#Xi^a4MWa%a@_9EFQPeemeG8on4k=emt`oGne|7e+X73#BLWd4V~?@t&ik=X4;l zY|~KI`yzaQD2o9>CSuri5?9#I5vxA079*C2h@(Dxi|yeq;;~P*;_C3BqRl6L@z*kS zan+L_*tGs0*vCfvns)?8uN7di;}%R`x(H{cJ7A2pK4w`xvx8Ru>NG3?D!oNG zv03<`nt={U(jMF`8QK|}(O2SohVF<#DEklVoK_(yc@;j4U4vDxqwqI-Jx-ZyfZEy^ zOlpZm>X0~Gi;jn;girVX;RoB{_Lwqv2*S2&LU~&oH~&?`eF8S}E?NEfHO1jV$)s$q zmt>C-kvrir>=uqM=`6ZA4;CL9JBs1I1H}HrqQqlI!^BDCFFrg!SzIwwCaOL%6Y~}t zi6g#giYKEL#AO$rBL7%3F6};n-)UtSvMvMOA<^i%-wPqJBXG`F4YmJ0Jxb^}Q3cwW-Ql`z5TeuUa3py;9!r>J?@jAb(vS!yZpSUY z3qK{yspH&ybf1?C!+=Z-o}Z493sN8iCSp=hJUoJ9kf0X}L;pA&DoDVD$D47xe-f4i zrJ%#UG~~RIxDs>ewIVmceEf2hr+L8ifi)_J>%sf`cRpi#J$JK=;al@HxZSwE@?_Uf zyvZpTd#WYO!tn{T!ZgI{QA5O;e;ma2({sdO<5!6*N`u8ZbsteNe3IyNbDWqu&P=>s zXe1h{b{FS|DTzk*uW>c$CN|$V1-rh7@gbxDd#aL9{c|}CQ{1qr-w=%d)D8EqO8iN+ zgfVAz#^Kd!aP#Si*JG6tZ>R*vzyCNt+8GNRjM2Hp29>j?!tHG^*4RbERXGVp=d9Z%Kk-?IuJOY{cm=aj4Rjuyk1xwgjYN^`>+z zsMwCFE!mK@=fTvV04wjLA*N3>3{TF2m9rf@FYDv}zAwCM%^|)n-k;aoRmwjat>ecB z4#bI=c!{^H#oD=Vu_(BkXwcVGyl~TAj7s$vt-QiT^X&QJ;MSRTYTrwo$9-uZxrqNNkE3ShK79F;4YQI2G-roNJgGAVmsnuoSyLQ% zWr7b~3^B^AH&(f8qBdL&e=-!Ye(_fxe6s`Ae>Ffx&(YXtF$J|#g7NZbG(JyA#@}l> zP@BI8e^!^F%3(Kxy6u9&-8@_=%|hJrZLk$m@xJpG6yJ!4^5so%zLA9Fduh1xGy}cg zWW!hL%>_Sq;i+N?26x(vo_0BSn-`DduR##1#^di&L(KF4z#Cgj_^h(g{9sf^d8p1S zK4Q>x^m>$yt!9nr8r6=|^LvULy9^cW504iWy?sQx#f!wYO>@Mo;_0G;+)@14Y9sFa zJWO=y+E;8J+*90b-%*Sj@fG8&p5d>}b#U2fthcWQb1H%2c=jG`^zpDnNr%Z8a5xAvM{GcDLJIEP z&qLqQd$B%lKQ@mpMf!^(?8weXiG-IQ__-a&_HTvgmy9Nj%^1IT3m&AWVQtiQXb0xv z$dm%O+w8_$4x=)^5T3dc&NXNmCK)=Rq=ylnYrf&JKXbUyq{(fRi|rOS z4?#=MXoLy-p}Oz}rk+<24d?b2+rAAIYm-QfH}(`AHwKG)BK<^_CGMj1%j@D9?=j+l zDZ|7#hrZ%78!d5yxtch%^e2{edV|`9_weuHB{+@%Qqm9M`j;Z;duAY@HV&UVFGuWw zdFVZDnzX|ZtaB`Ic3eMrKJ129u?qO?_K+{Pc*75S_Q2juGZbET!B(vxoZhyf?)ZFs@$3{p6?UXh0*LsqIa z8yXpzIJL7LQKe6j+o~e2^w$-qJsu{S2QjhBo9UukZh$y1)Jt4$J6TNVJYLMbJz6Z^ zH&ogiK;PywcqM5fhH2$u$(aq9`%&WUJ&f^g#S{MBE{1n7*eHK9TR~Vl zED;m6Dq!&YGCn@}gs0Csia#S#+4v}$*8=D}H_q3;awv5JdWc2E!(`&x;k zIt~?o9_%Y}-`-+eM|IJmNlCnO=m*R$zeUWON4VDM1_lpm!deG8E=ixmc>NyC9+Qhp zF3Ax5C5-VwAjT?BhR%?Y_%>M=70>?iAoKIQ_mO7)tX36r-9gBTbVTs21t^sIc8Y5{ z*8DC+Q28M!?>_={^@HdsX=?N@?SZj*5r(AggvCcmuQV+~swJuTZJmL_#yko4FUHGP z`;ffmFf6;&;YOZ_wZqP#Pkl4uPu3xG>n`k>9gl^+UJ|};g5`rA@cBH9YpuUkUod;L zkbG%3He{T{a-YW-nWi9`eby8Q%`_7G)tZS01~ReN`pM$#Gv4B@N$#Ti6-V*hOgqv3 z?nv?YTvKt>i@sucn6^0JlZKcVry{D^D~PWYzM)}l8}hzAM8vWinDXX41_g;we0)^m z2lpXE;>x_*x4>#fBLP>)J8+(Ui-rf3&4ZEHt@fpN|@+tg7iraxHfhHy7i8Q zm83E7>9`x~|2u;Cni@R*a~Lv*1IU$hYTuf7W2*KpRQl(k%kE4Bz1|Aj9$B!CE5OL_ zr7&A~5JT@D!{DhR`ky+7zSb?!-*+1(15QFwvlI`$Ct~Gke^AZxKy(S>zct0eZ{LSxZ9_7bIS99|*KY7azBcvFPhmb!X z|E0#_?&S>FblZbW_Zq04s>8AO)lzLa1igvnXbLaI+WaC6Z`z4_67QJYF9WOJR{3gUpE?^x3C0kwv&&>`{>+^*e1wt0&*<27OU z^9E>rszG^VDJCjr;lZYOeCiv9ej_GfnqZ0qw@wH<-poB_T;^>fRk2^1=ffsCqW_fz zSY8o>>EY50XkCP!zYoF8>?jHsS3#@tAmZOw;D`BsY1b&j{evZ#`mqS7EOw#l_-?GK zlIrpP!}wiP3*-WE>1W}TcnRt;66O$j7glvAQ4zie{fBPGsI#6JIYz>&rk&5_*uBmet_Xy+Y$FjRb272r#P=?UvcWNVIrF_Ml_%0ASTsJ7GGbr7oGlB z+L?z_^~P%-g$yA>$WTZF2^kZ6Dbb)olS%_=qyedZnpKjNluF8w5JiTRA;bROPgGJG zD4Ci|lO}1Rc%R?(o^#%F&cE++ZF}#_TK2N-XZYUt=YH0g(JWojg7qFgge`H^Wwb<< z>F(&mzFYsr=!Q1vl(yne;xjx6Z2~QQfMmBj*gd=oOBJue^>Zl>9mbFrX{zgJ)L(cO9(cUlUOTw9EwsuC=lTgI7@auiq)79|2Q@>e-?QUOoSYxPXO zj*PM!h}Fo$ibg&Ma0rIXRY&Acl0kCHY3|Qfi_^Q_igL^|gm}l>cs=U{@56p!%vf3W zY>Fxio2SQyFB``01X!_PuKnniy0O(xW@8Thj_1x5Sf5S+!Y*?Tt(b+0#Kl9?}P!!+2I zIp9LCq1f1Bj95226y;4s!vHUIgsq0g=WU1|u@70#qfr_ghh*tw+{sHv&w^~2%U*_0 zehGLP1aY4#e7QufB%ue=X`umxrd!Z2yd0W5cl!M3fZFn?Dn9QIv+c2og=$B5Ao zcLRk<_Ylk5`9GO2@Ci@xsigrQf8EC?^;`I8QHi!GuYkr-|62Ysk4%e=>!aV;sYCL4v?kZ(w=d8`ziVRtb z`3N>I!-fSrJF-O;BiRHu17>kkjSb-Xvux5=G^N%f`+O;O=O#mg*MjdC9|CcX<5=Jk z`2CK7X!kLwE5zd6(Gw8sp2U}g6DU*So-AkH=Ctp@_^K^v$y$qz(LPWLo`nogH=O!D z9&OX7!!dI{qIxew%i?tyaU}$_e;3|x7F^}k0nGb)46kP-V`F|6daD)TXz*1SKBz&q zbt6Q_U*XR8&$w~u1MFWvN9LFY=*Qf_;kT8L6iN|%{32d|$iVz@Nxc07a+TREI{ z7K~yJSM6EzK@+xRt}fFyRANDmJ;+)25+8frfa%FhtR8s?Z^s7%%Qqt~d?$+QcEgy@ zZ;o_CpquLw{um6^deR*QU?%9Z) zy#PH!e6i~HD%`sj0Q;m(ND17Iy#~8c61f*PE29x;n26lRnfMgP$EVrXv0C{7(yO22 z_1@1g9{U%`3x8tI$M;yd;|ZoK*TO3K8v0va!Ees>Kbe_~YcG$YU&nT=xI6=;8;oJ# z-%hXIpCp|}QZzovUgS8$TiBplCCuL6h~N9(BPqNKXU55}4yMFh$7-@T3}o+xVa$mv z*xZ@6>`lN>_D_@+lg*N2W=Fom{q`fQmMy`7aS52;wGE|fXCmf-7gmm03Q@{x^q1R! z4*}b-Wc6;;B!$DiHWKQ?529CT1int$i>FWcnEHGhHcM}U=kNenO11rgBl8KVYM1F*{REZTN$&ew`S~Q zqAk;!TYT~5@d|!bz129B z7JwGn4KQ6Dg3WifaaK1JWAj6i7{3L@wfr+9w-#;ADjX}9Q6VN&XbM)MCq}?4q zme0n##)T-GvK)F?i%_dgc$K~bMp1k4@cKR^Jv{=^j?;Lqn~kv25)AXILiMkQoc($U zCEKsaf6xt2KHhZBY(rAbQ@Bm6!~3h(5#>|@%i?T29CZfUvyb8N(NJhtdt%F)5ol79 z!a?6$ikoIb+jlvYuT0Vtf+JIfA+z2?JoFDt`FQltTdtMXYBA+<1GXT;gk>DDVjWu@ zS<~Ec?9ZK1OwY}T3FT_c$GSI@n)?o}uGNUJ%EE1fgJ^>v@(1~2bErRrXa10JUX9zQ zSL0~UYRvcMeJrg(%O-zVv!ytwum~^acw$WQbi}-$i1*gxFmC46 zyTtE^EBk_GuU6>)ZpP9R_mOVH{gt{hn5^fS4i&DiGeS+bYFQ#Vau1Qgvq9tjE4}4q;^jELgb1Xg1wz982(; z$ijEnv%r*LY{7Lc=K8)5Tcq2LQOjx}dm$e!oF@Ja>D3*+rC3Z~zWJmlZ$ng#Ml;>>XT!wreM$6d8)zHJcH6EeL|XKYncTMK))EmJXW* z?OpD;-{^wF@s7OKu!YTbbI5!ehQ`H)$l{vsc4rNETvURgVIMr)ql}>X!RR~O0q3?( z=XsT7XdJW!rC0W$&(aupDkdVU{wxx9W#jOO0_=Ij+0WgT@H4v&uZB8I+tZA(BVR&) z?=vL&H*qHP0g`O)KwqZ{i4vRZ5RNQ^-Vz7MTB@L0@;3E# z2_PluD6!PGO3@ccvP3$>u-po-yQ1=x?1#-W3MDd*T1s!4cB4dp-ROP4yn#f`*In~CVC zUBWX>Td^ZN5O7u_W*_88Tn4Mn?rE6yqbNMhva6ia;=u}LUF+y_nSIUb> zq+L;}^gBP-Y56y4mUc;*y$aT5M%G5mw_^l5bJ>=?*x<|zY$mfM-)FJszFurd)FkF# zWY2P+4`m$+n#|5thW)~O?AEvm1^*1BTk&~1&z8i?Z-(bM{h(l+o1HO0vZDu{!E(W}T@md_l{n*HcK^O*-_ujLuBUqk_~_y7eoTo=%M&i?JL-_PcQe8zpVW{4Mz9sTR1iC*gCLX1+IDwrT})Xq(Mc0$tdfZ!o!BAMfDuXs5FVeveyM|~AIit^Q>#$_$^&c7N2Avk1H@Iz!B_tkeYweK zhid^Tv2&E*m_Q?nqp8$t55+8AOZjtV)7>d!2-r%G8mj5sTLrvYHj4Wy^Wd>)CvI?0 z@0Ce9r2MXcH(^}20F2rQyD|t*dkk-2UWIOVB{bIGfO0E; zZ@g|p>dqbf;a8gfUQow16$ab93)o!9=k;Z~As4;?hhr?EaatC3b4zL8vuWggqaRxx zuERoHjagW4b9T7Go+a!a$1=xGVTBT&>?(P)=L1%-Z6&jrhL;P=`)$eGPYzn z4#e-HZlz81vUMRl)E$_B+~vQ4J4EN=ft zxRu?;w@+C(m3|l&ANS($%1tQk@WER?X7yLLg-gO91PoVz)7lTjif++|3^DbOyGTMr z8u^EuprflIY4gP`boYo48LoGuefKug5zlgZR{57sXb;3?4*|{lywRI>LcJ@R^8v|- zUy}i|o#%NTE(^2Lve8-?7}c z=w_I(yaEBYD{&(05_-QrjUfGl5a$LW&2SQS9yh=xl@40o4pT5)QHNRz&jv_LfY)LlY%SZ2=~4S(GV=&D z2gjoKk5l-{dD^EZ(@?YeJgzw9AVc{Qjy*3zi?|rElZ)X|Ujzl;B3%1eh|tZ2aF8xU z$Ke9x>J}g?JPkQ1N3di&ufq!FV$(-UY&BMdWa({EG~GoD*3^p~LN!^Xg)W=+d=RTY zV$6KTnXtF*BUx~&75g*Gp6%`J#7=k!Y{g>>X60|lB!8;0yL+VBXoF99=x`tFXJ3ZW zx)Zq2vlA{IL3sRjDKgV0X@SxTI6GV;psqr_!jZDhjIoNORZ8A?TY94u_0H-d|_j**yy%WmjNBz$QrC z+>MD|kvRO}2pYGYgn<(Geomi59@n3Ku_Ek!&og?b_&mq`63;;A;R@GZWfxw+EX7Qm zDa*iT?%`v}am@AEgQw-bP%0H+V(|cc_5Vm7%g>TGInefdCgQMF+U%g8E<5f&kR`4* zWM|hHv+T9QSm8SE{{@U>Hv;)w$afTze>a#N4bWtIKjhf7-@o7^^8_2zDiAT~91_f; z@Mc~BtR^#p^&+veLrz_T^j6um{OU`o+foHrfKCV{7Y!gN9gZ75ov@TM=)kZHS zJfXbGyA(Q>(KK;B)eI{lsaRIEF*Y(H{%DC!h$%(Am=q?<230Xy7)QHltO%B_PMC#zTkANzr93-}Km0*MaOq z>>w8O)_`@6He$OD3}$kVjoGRV#w@2pi+$Um$kxB_!Qc0-*!Q6Zs%@8%)E1B7A9uqj ze=&wA&4Y>m447T#WH_&jt#>%VuE`c-Y%L&)8ICXUhS;F1gC7ZM*sQ0>wfx@r>)1(B zq7T$ZXyKe`4W%8ern=D&Nb1>RvIu`e3rBpV$l6X?6-|)K9lF`i|}x4AtsK=L#cHp%txjnWoQyK4B|NV5`#`Pu4VTP z=Y7&Tcot1XlgJF-*A>we-$1W2Pf&BP8C{t$QvB!i1yTRedQ7ibmlWhMi4nFXKQ z*e=y!B555qK4SoLpEQ8Y2-jk>B>J*hTO?WL+YiWYd4QaI<;bf{$K~Q^teYPMZ-r^l z)Zul6$wWjtxWZ-fICRYyaM{lh>o$+(jH@la-?GF}B@>LAVTgN22ViZzDipO9aDG8A z9CrLkx7i!o-u;-;+8gOs>qGk5bf2{D){szflU5d8qk~zD@=ur3nKLgbB%mLLD_NrH zf(Ihq1EK$YAJ$i%K=siK7%nZsjb8eQHw^J3$ac^j+WhRv!pHJ^w??{9ropy7Tfrt zKkIs|&UW8ZWwI42EUG|>Im9Wjp9+#}We?9swm!l_o@akpkd5hyJQo+X2N4YmacY?p zk2REUUPkpnnbdJ9j?{jHlW(9uC3m>dQ@aBcyZ#|vtXGC+ zhc$Ne^TMl* z!vstG$yP`9&d=nNT0j#H{X@AAd(q{$q2$bK%!jhKMC98H{+d}OyV}dzvlS9$&fkT%$U|nOg6K=r(i3!artk zpQ(n^ob5X&Q9>7U_Rx_l1L%OKz1VW}7SWhdrh>+Y145IJKC}NYfW>aqWZ!z!m`9`v z%Te#k6i>@Bzu_|M#w95>sr?W4Cf?!V)_SaPu7Hko9`4*sz@9_<5tzOPjplB65MzP^ zyUj6ny)}L<;q2E85tfg2fv)95)D4=7{VFrj|IZ98JTM8V1IFUrd3#ulw!o&Up(vX> z5WU{?$D~b4oC}nIW8WsauXTx*4?alpK9gzXr(fdW(GKFfb)8Nr;Z35&>eL$bNz}{MSJ)quE?ig9V%IkGXB7?Vj5(?? zO76$Xd#kXrRZ6Vnwj#S?p}^!e_hBP?y5Oq#2Hx7W*gS;S%vM}~aXyLz@!KFSn2A2! zR*1i63-i>`xVq61o)ZPU8!!&rKaPhACLlFtG8D8Y;lV~{B-PqsrMWpQ4-P@+TRl{` zY9J~_5uQE0cy9GQeNbj(6LEqhB^Of8*FMyD|6H+y-#aIx&Cf)C++BpYg>9nxVGG4? z$1S5f~v2+iggUG>qYQ9-Zl(+wh}AzXCWs{K!>L( z#tqkm?!w=cA9s}|Jv=~UWI`qz){6&axQOBd^@XCYy}}?W6Y|lI&FJpOzKl_2(VJD- z$Rbr%)1k`Nj8bC?fod!>TaA4TQDxaivaBKDC+4<4L;Zwm%uc+7y7mOjni!7MG2U?W zbHW9E5p*Aj5Vc?6YrZ&RZHF@+pC5~9-s4cN?g9y0XH506!^y~zczxLzqn_$whnza9 ztrVcSSPIuwzEPan13IaCj;ufbL$+52Qi@Epc;)fEPIK?y7oFE1E6mP3C@6M#2;*N5 z62E#^3RnaMuc( z&ln;77<) ziwgU(gc>#0@=Am0{?TL$)dsKx6J1stqRT2LsIbERy_v_Y&#?A;1Wj2&S6VurjXQ)0 zwi(sc;W7I>8pt zw1=VlgEkDf_SA5oSXrabp>MgDp|7m^iXr$92YO7cy7?tM=9$N2&$U)Lln`gb<_&9;hjq!Zoc)Fu3Rn_Yvc;F++sYhGS5bXoq$!Ywl-^gx_=!9-o2kqCCheb}K_AW6P}K!FicHrPCof+sifYjl8qGt6H9;4Jp8OZW zb)iC7j1_{C)FIKiZ5PE^+e2u#KCcOS`r^eW8_qE#nX9pA(`kL7`p15uA+b_;JHAI~Oa6UkQa?Y zX0wcQdde5!n`ElsdDTR4RSa>u1qB+p>nJ@lcuupHXrkCsgsBx{;c|R5>{nXCa`bQv zQ80ksV=buWs-VxVK9HOxfezzp>Sc9^j6(;Ly5A^qM{KNUfu)O(bRbbUw4+g=VtMCE ziLVG7EWyrJ$+GKfl-XMqO{T#4jS=&QuxWqISl}5OwllR@2Qj z_U8yL<@E($lO+Cp1Qh!(#b?h2s5>zWg$?eA(;koQoXhZu=W7nNjKB~*V>~>s3p05& zg!{{(?065Y^K2u1%|@zubd7e#<&*jR)AU__Bb{^TLk%;V%U|9~5gFKu1lv8wg~cCk z32eBebNwP+=L#uh=fc!lp>FIxp=jr4(d>c0%JXm7&^gz1I*{>|io^8bqcj-fp6T;> zy*6aS)bVzXGOCBl+C=MLY=f1a3Y1iL7*K$iS=%qq7(&?FKnRtmv`E()&R zp9ICb8qV=PuVBc%#yV4JcKo~&n>$pC>6|rSErzD-bh0(`)fcg4A>)}rgEechGiE1w zz3=%`ni=M_A%1NQG|X}k)piKZyVjs5%@?SehtRfZa8_}Jr1coExt2V0I22<~>*3aQ zHJt5`!)`+fTuE-Dz&{N%WZpGuzI2g}DkssT)sfU{JCWo=FNm88A3K$1_Z4=e?G$oq ziiJTpzX}t4HJl}%Svp%?Hg+yIlyu%|ctz;w_7>WL;zgbCv7(T_+WOh&76I48N+7ByRnHO)0q7q zCpIM1jP>uW$D|_^*=x;Ts2uo+*EL)>Fh9;2wvABv=7*f$i!fPYCR%0Ou(z)R4Ae)W ze1IVq8gN1Hr9AYXN}%)dM=H!}q~MfmH1gy{dKaBY8R7fsTBkof^3|b8jecUIytAV4 z?~{bJ`;&#CS}j6goq}`Cso~D1z7w2NmpeKqebaI_>h(d88JH&2Z*&lL7LF1>4t#Z%W=$(pSj9mdmX$w*9c!>; z+q)f^bj^77&d!G2PBCVAefzWHAu{aq^^Yh_xPzVRbGbixnEUYS@V9sg?lsKDnO_sI zUTX~8?0Nolo&nzdqmHr@vdBC0o1UqDAl0xIa`|3E8&(jdJ<6j86H`d03B#RMU82vuh;T4`0&{AgC z|IudRdLve3Gm;tav0*pn3}t@9w3%X;JX;w58>gKgmb~c)wavW|t?`Kl?QNnN`gIh>b;JUXN_z9IjI{O?&_vxF+FFoK z7p9#it-hh;^SmF8OHmT<7<63JUS%&l3q2}KOsp0fS|pqmSL-^rKUZ@;>H9$tBy)u- zVYx7@vq2=|HNM=ck2Jlh-cEPiN=e)IJ}oG1qImUJ)U@^^>D>51BUL1Dx2!h;{N$kA zUkT^v1I<>wL=x2-XnsI1ib_9Ro_Rnl+L7!b9Q~Utv}QI6-t*5QaDOpEuiQfPyC+E2 zZbw6!6no~V$lg8BU|-h^WaCE5j<=NFb(@Z?Dy53yrS$!M2@ShYLi5*_(1DR9 zRQ~x2ea^c=2J5fTd$ZHDzG@B)_HPmovprTe^yLjvN7xj>ERvWdbFgo_nr=+ zqM}^T>b*~Bju<4U51Aqw_sCBy`+X>lEQzGX&5YJPZlK?{U(@cgc9Of$LpP_&Ao`jj zW;>}ti}MK0Blvnk((>qa=qdU1%c5<=*V1f@-V{>YSiUYSUbMZ`R#?&$Ej+S4hCYpF zVYjsiEA%)AK~1=^qzylMbwjOAmYJrhuw|e7v#=uyEX7KKg=M|N)z7u)u_}X8$XRSF zQEhL|Q*)3)1cJLief~ zp?Mq=J`O%Cyy`0w8d8fy{3=3aQd91Wo1agncZXu>)rA{WIqw<8KKVj6`4Z6i-Urh5 z{qT8FRrM zde~8Xa!iHOj7x~(+I7q3I+z}MiLEK$u)JB4g`JgUm5aN0U-JeI;dS_Nl8~FtnOb%N zLmuuy#eHvB4i_Op#T?Im4utJf6&$=SiRuNf=^v9DWc2$SJ-87`FBfm2bms%qFykck zU7A6{h5`!STS4iu_o*@CDfPYbkq!ksCih!8q%bs;j!GNRtStw`cf87+N;2Mw7VFLw zg7QxYbEOUmBNbf*v!N}bZ;y(dmTcN7wy5te3YY{IrI&AG{&!FI_ZDrVp&t zRpD%?i|H0aut;P9|0xdG=rtZQJVqgJoGw&sWblkiP)oX0qsXhute)7?j=2fVBCrcMPCqliuX_3@8FA1IfjkD0H2}p zh?D2*ZZrp>U&?e?^&JKO6^2OT^WtW$UKsMKl}^RpAh}8BD5NWb9xj+gmBx-_9=C!f z?~R~PaUxy(nNO`2Rix|xn5Lz+)7VeF;C=WHJrv&~_m63`$8J6CJ2rq?U+fi2MxAw9 zdA>n(^vK8HG0AJDq#pXps_FX(tF zW3rVFwkZxp&g)TlAr?U`ZZgINdE(wiH$;Svg7X1={5~#^HDf>0W6x_eZA~I=@3oq| zd8RouX)F?>d5$)8Bi;y=9(3Yzof+#q)J9k5*xyQ7s(nEATa<0O?~g_ zl+zzeW)4R24@)S=3%n|sit?9UP>NWEY4Kk07EeUrb6f1{ZHyB`)sQh+5`oQ6=%_&{ z6|6i#f%6CA_DEX{9GVX?0 zakv+uHXHD2^dd|YxgmR;Icy(l!$MUK^OL^NRIR(@9FtA6V-wx!SKGpG$`+c5)WgJ-vT=Bv??v;{R-2irf3k!&{|a=`*Za?J7k-9WhW{P<-b z35>CSP8ilnT{S9*-8UGPW9@Of)15PU-jG@sjJrqoAR#acGJlStVCq3EJG}?SFE(P& zUY->mHVdNLW04SGfli()v%k_8L+<^e=VtYE;=m>DZ5<|+Lkns9Bu%Q7OB1(APbp8a zDlg5wk}Y-=M$n^sp(M9Ei^lS~o6~v;)K_a@y^$&We~&=%6dmr9$|1A$GtG#)O&_h! zQ)JU#nkMB+DyRR5Uw3zjcfTJ>Ne^cbPdw85hcOg#D}y9h8ATP|C!GteRP;;=o;vFI zK8M%Chq*3wYXa^{&qsi>Ka{p^#RIQ!3<=%^C5Mf;7qt>G&*x*F%@hpSFG5R;1(Ga` z;1kBV8+X}0Nm~jcB`!-y{Co3o@6}sEqE8?G?0;>_5-t);)-PSB`G3#ge$vW0QvCJ)y?MAx{(JXd=dd)lPB)W3|M#Zy?+^aR_Md08Y{`Zt zBLi2j`QJzX_rFc)e;nD#pZ)iv%`N`-vH$(7l>f)E9tHpJ*#CGhJlthuM*PQ4`n}Bl zYxiFN@#sJAi~i$zE&mn&Zu~gefA9Af{`*}*LXzJ++@)pyecX$G{k#AF-S__h#i{@R literal 0 HcmV?d00001 diff --git a/test/test_models.py b/test/test_models.py index b5500ef08b4..2e0ed783849 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -93,7 +93,7 @@ def _get_expected_file(name=None): return expected_file -def _assert_expected(output, name, prec): +def _assert_expected(output, name, prec=None, atol=None, rtol=None): """Test that a python value matches the recorded contents of a file based on a "check" name. The value must be pickable with `torch.save`. This file @@ -110,10 +110,11 @@ def _assert_expected(output, name, prec): MAX_PICKLE_SIZE = 50 * 1000 # 50 KB binary_size = os.path.getsize(expected_file) if binary_size > MAX_PICKLE_SIZE: - raise RuntimeError(f"The output for {filename}, is larger than 50kb") + raise RuntimeError(f"The output for {filename}, is larger than 50kb - got {binary_size}kb") else: expected = torch.load(expected_file) - rtol = atol = prec + rtol = rtol or prec # keeping prec param for legacy reason, but could be removed ideally + atol = atol or prec torch.testing.assert_close(output, expected, rtol=rtol, atol=atol, check_dtype=False) @@ -818,5 +819,33 @@ def test_detection_model_trainable_backbone_layers(model_fn, disable_weight_load assert n_trainable_params == _model_tests_values[model_name]["n_trn_params_per_layer"] +@needs_cuda +@pytest.mark.parametrize("model_builder", (models.optical_flow.raft_large, models.optical_flow.raft_small)) +@pytest.mark.parametrize("scripted", (False, True)) +def test_raft(model_builder, scripted): + + torch.manual_seed(0) + + # We need very small images, otherwise the pickle size would exceed the 50KB + # As a resut we need to override the correlation pyramid to not downsample + # too much, otherwise we would get nan values (effective H and W would be + # reduced to 1) + corr_block = models.optical_flow.raft.CorrBlock(num_levels=2, radius=2) + + model = model_builder(corr_block=corr_block).eval().to("cuda") + if scripted: + model = torch.jit.script(model) + + bs = 1 + img1 = torch.rand(bs, 3, 80, 72).cuda() + img2 = torch.rand(bs, 3, 80, 72).cuda() + + preds = model(img1, img2) + flow_pred = preds[-1] + # Tolerance is fairly high, but there are 2 * H * W outputs to check + # The .pkl were generated on the AWS cluter, on the CI it looks like the resuts are slightly different + _assert_expected(flow_pred, name=model_builder.__name__, atol=1e-2, rtol=1) + + if __name__ == "__main__": pytest.main([__file__]) diff --git a/torchvision/models/__init__.py b/torchvision/models/__init__.py index 516e47feb19..c9d11f88f01 100644 --- a/torchvision/models/__init__.py +++ b/torchvision/models/__init__.py @@ -12,6 +12,7 @@ from .regnet import * from . import detection from . import feature_extraction +from . import optical_flow from . import quantization from . import segmentation from . import video diff --git a/torchvision/models/optical_flow/__init__.py b/torchvision/models/optical_flow/__init__.py new file mode 100644 index 00000000000..9dd32f25dec --- /dev/null +++ b/torchvision/models/optical_flow/__init__.py @@ -0,0 +1 @@ +from .raft import RAFT, raft_large, raft_small diff --git a/torchvision/models/optical_flow/_utils.py b/torchvision/models/optical_flow/_utils.py new file mode 100644 index 00000000000..693b3f14009 --- /dev/null +++ b/torchvision/models/optical_flow/_utils.py @@ -0,0 +1,45 @@ +from typing import Optional + +import torch +import torch.nn.functional as F +from torch import Tensor + + +def grid_sample(img: Tensor, absolute_grid: Tensor, mode: str = "bilinear", align_corners: Optional[bool] = None): + """Same as torch's grid_sample, with absolute pixel coordinates instead of normalized coordinates.""" + h, w = img.shape[-2:] + + xgrid, ygrid = absolute_grid.split([1, 1], dim=-1) + xgrid = 2 * xgrid / (w - 1) - 1 + ygrid = 2 * ygrid / (h - 1) - 1 + normalized_grid = torch.cat([xgrid, ygrid], dim=-1) + + return F.grid_sample(img, normalized_grid, mode=mode, align_corners=align_corners) + + +def make_coords_grid(batch_size: int, h: int, w: int): + coords = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch_size, 1, 1, 1) + + +def upsample_flow(flow, up_mask: Optional[Tensor] = None): + """Upsample flow by a factor of 8. + + If up_mask is None we just interpolate. + If up_mask is specified, we upsample using a convex combination of its weights. See paper page 8 and appendix B. + Note that in appendix B the picture assumes a downsample factor of 4 instead of 8. + """ + batch_size, _, h, w = flow.shape + new_h, new_w = h * 8, w * 8 + + if up_mask is None: + return 8 * F.interpolate(flow, size=(new_h, new_w), mode="bilinear", align_corners=True) + + up_mask = up_mask.view(batch_size, 1, 9, 8, 8, h, w) + up_mask = torch.softmax(up_mask, dim=2) # "convex" == weights sum to 1 + + upsampled_flow = F.unfold(8 * flow, kernel_size=3, padding=1).view(batch_size, 2, 9, 1, 1, h, w) + upsampled_flow = torch.sum(up_mask * upsampled_flow, dim=2) + + return upsampled_flow.permute(0, 1, 4, 2, 5, 3).reshape(batch_size, 2, new_h, new_w) diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py new file mode 100644 index 00000000000..02705a7ebdb --- /dev/null +++ b/torchvision/models/optical_flow/raft.py @@ -0,0 +1,659 @@ +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.nn.modules.instancenorm import InstanceNorm2d +from torchvision.ops import ConvNormActivation + +from ._utils import grid_sample, make_coords_grid, upsample_flow + + +__all__ = ( + "RAFT", + "raft_large", + "raft_small", +) + + +class ResidualBlock(nn.Module): + """Slightly modified Residual block with extra relu and biases.""" + + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): + super().__init__() + + # Note regarding bias=True: + # Usually we can pass bias=False in conv layers followed by a norm layer. + # But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset, + # and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful + # for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm + # because these aren't frozen, but we don't bother (also, we woudn't be able to load the original weights). + self.convnormrelu1 = ConvNormActivation( + in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True + ) + self.convnormrelu2 = ConvNormActivation( + out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True + ) + + if stride == 1: + self.downsample = nn.Identity() + else: + self.downsample = ConvNormActivation( + in_channels, + out_channels, + norm_layer=norm_layer, + kernel_size=1, + stride=stride, + bias=True, + activation_layer=None, + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + y = x + y = self.convnormrelu1(y) + y = self.convnormrelu2(y) + + x = self.downsample(x) + + return self.relu(x + y) + + +class BottleneckBlock(nn.Module): + """Slightly modified BottleNeck block (extra relu and biases)""" + + def __init__(self, in_channels, out_channels, *, norm_layer, stride=1): + super(BottleneckBlock, self).__init__() + + # See note in ResidualBlock for the reason behind bias=True + self.convnormrelu1 = ConvNormActivation( + in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True + ) + self.convnormrelu2 = ConvNormActivation( + out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True + ) + self.convnormrelu3 = ConvNormActivation( + out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True + ) + self.relu = nn.ReLU(inplace=True) + + if stride == 1: + self.downsample = nn.Identity() + else: + self.downsample = ConvNormActivation( + in_channels, + out_channels, + norm_layer=norm_layer, + kernel_size=1, + stride=stride, + bias=True, + activation_layer=None, + ) + + def forward(self, x): + y = x + y = self.convnormrelu1(y) + y = self.convnormrelu2(y) + y = self.convnormrelu3(y) + + x = self.downsample(x) + + return self.relu(x + y) + + +class FeatureEncoder(nn.Module): + """The feature encoder, used both as the actual feature encoder, and as the context encoder. + + It must downsample its input by 8. + """ + + def __init__(self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), norm_layer=nn.BatchNorm2d): + super().__init__() + + assert len(layers) == 5 + + # See note in ResidualBlock for the reason behind bias=True + self.convnormrelu = ConvNormActivation(3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=2, bias=True) + + self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=1) + self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=2) + self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=2) + + self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1) + + self._init_weights() + + def _init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride): + block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride) + block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1) + return nn.Sequential(block1, block2) + + def forward(self, x): + x = self.convnormrelu(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv(x) + + return x + + +class MotionEncoder(nn.Module): + """The motion encoder, part of the update block. + + Takes the current predicted flow and the correlation features as input and returns an encoded version of these. + """ + + def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128): + super().__init__() + + assert len(flow_layers) == 2 + assert len(corr_layers) in (1, 2) + + self.convcorr1 = ConvNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1) + if len(corr_layers) == 2: + self.convcorr2 = ConvNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3) + else: + self.convcorr2 = nn.Identity() + + self.convflow1 = ConvNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7) + self.convflow2 = ConvNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3) + + # out_channels - 2 because we cat the flow (2 channels) at the end + self.conv = ConvNormActivation( + corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3 + ) + + self.out_channels = out_channels + + def forward(self, flow, corr_features): + corr = self.convcorr1(corr_features) + corr = self.convcorr2(corr) + + flow_orig = flow + flow = self.convflow1(flow) + flow = self.convflow2(flow) + + corr_flow = torch.cat([corr, flow], dim=1) + corr_flow = self.conv(corr_flow) + return torch.cat([corr_flow, flow_orig], dim=1) + + +class ConvGRU(nn.Module): + """Convolutional Gru unit.""" + + def __init__(self, *, input_size, hidden_size, kernel_size, padding): + super().__init__() + self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding) + self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding) + self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding) + + def forward(self, h, x): + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz(hx)) + r = torch.sigmoid(self.convr(hx)) + q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1))) + h = (1 - z) * h + z * q + return h + + +def _pass_through_h(h, _): + # Declared here for torchscript + return h + + +class RecurrentBlock(nn.Module): + """Recurrent block, part of the update block. + + Takes the current hidden state and the concatenation of (motion encoder output, context) as input. + Returns an updated hidden state. + """ + + def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))): + super().__init__() + + assert len(kernel_size) == len(padding) + assert len(kernel_size) in (1, 2) + + self.convgru1 = ConvGRU( + input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0] + ) + if len(kernel_size) == 2: + self.convgru2 = ConvGRU( + input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1] + ) + else: + self.convgru2 = _pass_through_h + + self.hidden_size = hidden_size + + def forward(self, h, x): + h = self.convgru1(h, x) + h = self.convgru2(h, x) + return h + + +class FlowHead(nn.Module): + """Flow head, part of the update block. + + Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow". + """ + + def __init__(self, *, in_channels, hidden_size): + super().__init__() + self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1) + self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + return self.conv2(self.relu(self.conv1(x))) + + +class UpdateBlock(nn.Module): + """The update block which contains the motion encoder, the recurrent block, and the flow head. + + It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block. + """ + + def __init__(self, *, motion_encoder, recurrent_block, flow_head): + super().__init__() + self.motion_encoder = motion_encoder + self.recurrent_block = recurrent_block + self.flow_head = flow_head + + self.hidden_state_size = recurrent_block.hidden_size + + def forward(self, hidden_state, context, corr_features, flow): + motion_features = self.motion_encoder(flow, corr_features) + x = torch.cat([context, motion_features], dim=1) + + hidden_state = self.recurrent_block(hidden_state, x) + delta_flow = self.flow_head(hidden_state) + return hidden_state, delta_flow + + +class MaskPredictor(nn.Module): + """Mask predictor to be used when upsampling the predicted flow. + + It takes the hidden state of the recurrent unit as input and outputs the mask. + This is not used in the raft-small model. + """ + + def __init__(self, *, in_channels, hidden_size, multiplier=0.25): + super().__init__() + self.convrelu = ConvNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3) + # 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder + # and we interpolate with all 9 surrounding neighbors. See paper and appendix B. + self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0) + + # In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch. + # See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419 + # or https://github.com/princeton-vl/RAFT/issues/24. + # It doesn't seem to affect epe significantly and can likely be set to 1. + self.multiplier = multiplier + + def forward(self, x): + x = self.convrelu(x) + x = self.conv(x) + return self.multiplier * x + + +class CorrBlock(nn.Module): + """The correlation block. + + Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder, + and then indexes from this pyramid to create correlation features. + The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that + are within a ``radius``, according to the infinity norm (see paper section 3.2). + Note: typo in the paper, it should be infinity norm, not 1-norm. + """ + + def __init__(self, *, num_levels: int = 4, radius: int = 4): + super().__init__() + self.num_levels = num_levels + self.radius = radius + + self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :') + + # The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius} + # so it's a square surrounding x', and its sides have a length of 2 * radius + 1 + # The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo: + # https://github.com/princeton-vl/RAFT/issues/122 + self.out_channels = num_levels * (2 * radius + 1) ** 2 + + def build_pyramid(self, fmap1, fmap2): + """Build the correlation pyramid from two feature maps. + + The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2) + The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions + to build the correlation pyramid. + """ + + torch._assert(fmap1.shape == fmap2.shape, "Input feature maps should have the same shapes") + corr_volume = self._compute_corr_volume(fmap1, fmap2) + + batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w + corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w) + self.corr_pyramid = [corr_volume] + for _ in range(self.num_levels - 1): + corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2) + self.corr_pyramid.append(corr_volume) + + def index_pyramid(self, centroids_coords): + """Return correlation features by indexing from the pyramid.""" + neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels + di = torch.linspace(-self.radius, self.radius, neighborhood_side_len) + dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len) + delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device) + delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2) + + batch_size, _, h, w = centroids_coords.shape # _ = 2 + centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2) + + indexed_pyramid = [] + for corr_volume in self.corr_pyramid: + sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2) + indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view( + batch_size, h, w, -1 + ) + indexed_pyramid.append(indexed_corr_volume) + centroids_coords = centroids_coords / 2 + + corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous() + + expected_output_shape = (batch_size, self.out_channels, h, w) + torch._assert( + corr_features.shape == expected_output_shape, + f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}", + ) + + return corr_features + + def _compute_corr_volume(self, fmap1, fmap2): + batch_size, num_channels, h, w = fmap1.shape + fmap1 = fmap1.view(batch_size, num_channels, h * w) + fmap2 = fmap2.view(batch_size, num_channels, h * w) + + corr = torch.matmul(fmap1.transpose(1, 2), fmap2) + corr = corr.view(batch_size, h, w, 1, h, w) + return corr / torch.sqrt(torch.tensor(num_channels)) + + +class RAFT(nn.Module): + def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None): + """RAFT model from + `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. + + args: + feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8. + Its input is the concatenation of ``image1`` and ``image2``. + context_encoder (nn.Module): The context encoder. It must downsample the input by 8. + Its input is ``image1``. As in the original implementation, its output will be split into 2 parts: + + - one part will be used as the actual "context", passed to the recurrent unit of the ``update_block`` + - one part will be used to initialize the hidden state of the of the recurrent unit of + the ``update_block`` + + These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output + of the ``context_encoder`` must be strictly greater than ``hidden_state_size``. + + corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the + ``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose + 2 methods: + + - a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the + output of the ``feature_encoder``). + - a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns + the correlation features. See paper section 3.2. + + It must expose an ``out_channels`` attribute. + + update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the + flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation + features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow`` + prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute. + mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow. + The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B. + If ``None`` (default), the flow is upsampled using interpolation. + """ + super().__init__() + + self.feature_encoder = feature_encoder + self.context_encoder = context_encoder + self.corr_block = corr_block + self.update_block = update_block + + self.mask_predictor = mask_predictor + + if not hasattr(self.update_block, "hidden_state_size"): + raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.") + + def forward(self, image1, image2, num_flow_updates: int = 12): + + batch_size, _, h, w = image1.shape + torch._assert((h, w) == image2.shape[-2:], "input images should have the same shape") + torch._assert((h % 8 == 0) and (w % 8 == 0), "input image H and W should be divisible by 8") + + fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0)) + fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0) + torch._assert(fmap1.shape[-2:] == (h // 8, w // 8), "The feature encoder should downsample H and W by 8") + + self.corr_block.build_pyramid(fmap1, fmap2) + + context_out = self.context_encoder(image1) + torch._assert(context_out.shape[-2:] == (h // 8, w // 8), "The context encoder should downsample H and W by 8") + + # As in the original paper, the actual output of the context encoder is split in 2 parts: + # - one part is used to initialize the hidden state of the recurent units of the update block + # - the rest is the "actual" context. + hidden_state_size = self.update_block.hidden_state_size + out_channels_context = context_out.shape[1] - hidden_state_size + torch._assert( + out_channels_context > 0, + f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than" + f"hidden_state={hidden_state_size} channels", + ) + hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1) + hidden_state = torch.tanh(hidden_state) + context = F.relu(context) + + coords0 = make_coords_grid(batch_size, h // 8, w // 8).cuda() + coords1 = make_coords_grid(batch_size, h // 8, w // 8).cuda() + + flow_predictions = [] + for _ in range(num_flow_updates): + coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper + corr_features = self.corr_block.index_pyramid(centroids_coords=coords1) + + flow = coords1 - coords0 + hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow) + + coords1 = coords1 + delta_flow + + up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state) + upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask) + flow_predictions.append(upsampled_flow) + + return flow_predictions + + +def _raft( + *, + # Feature encoder + feature_encoder_layers, + feature_encoder_block, + feature_encoder_norm_layer, + # Context encoder + context_encoder_layers, + context_encoder_block, + context_encoder_norm_layer, + # Correlation block + corr_block_num_levels, + corr_block_radius, + # Motion encoder + motion_encoder_corr_layers, + motion_encoder_flow_layers, + motion_encoder_out_channels, + # Recurrent block + recurrent_block_hidden_state_size, + recurrent_block_kernel_size, + recurrent_block_padding, + # Flow Head + flow_head_hidden_size, + # Mask predictor + use_mask_predictor, + **kwargs, +): + feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder( + block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer + ) + context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder( + block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer + ) + + corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius) + + update_block = kwargs.pop("update_block", None) + if update_block is None: + motion_encoder = MotionEncoder( + in_channels_corr=corr_block.out_channels, + corr_layers=motion_encoder_corr_layers, + flow_layers=motion_encoder_flow_layers, + out_channels=motion_encoder_out_channels, + ) + + # See comments in forward pass of RAFT class about why we split the output of the context encoder + out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size + recurrent_block = RecurrentBlock( + input_size=motion_encoder.out_channels + out_channels_context, + hidden_size=recurrent_block_hidden_state_size, + kernel_size=recurrent_block_kernel_size, + padding=recurrent_block_padding, + ) + + flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size) + + update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head) + + mask_predictor = kwargs.pop("mask_predictor", None) + if mask_predictor is None and use_mask_predictor: + mask_predictor = MaskPredictor( + in_channels=recurrent_block_hidden_state_size, + hidden_size=256, + multiplier=0.25, # See comment in MaskPredictor about this + ) + + return RAFT( + feature_encoder=feature_encoder, + context_encoder=context_encoder, + corr_block=corr_block, + update_block=update_block, + mask_predictor=mask_predictor, + **kwargs, # not really needed, all params should be consumed by now + ) + + +def raft_large(*, pretrained=False, progress=True, **kwargs): + """RAFT model from + `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. + + Args: + pretrained (bool): TODO not implemented yet + progress (bool): If True, displays a progress bar of the download to stderr + kwargs (dict): Parameters that will be passed to the :class:`~torchvision.models.optical_flow.RAFT` class + to override any default. + + Returns: + nn.Module: The model. + """ + + if pretrained: + raise ValueError("Pretrained weights aren't available yet") + + return _raft( + # Feature encoder + feature_encoder_layers=(64, 64, 96, 128, 256), + feature_encoder_block=ResidualBlock, + feature_encoder_norm_layer=InstanceNorm2d, + # Context encoder + context_encoder_layers=(64, 64, 96, 128, 256), + context_encoder_block=ResidualBlock, + context_encoder_norm_layer=BatchNorm2d, + # Correlation block + corr_block_num_levels=4, + corr_block_radius=4, + # Motion encoder + motion_encoder_corr_layers=(256, 192), + motion_encoder_flow_layers=(128, 64), + motion_encoder_out_channels=128, + # Recurrent block + recurrent_block_hidden_state_size=128, + recurrent_block_kernel_size=((1, 5), (5, 1)), + recurrent_block_padding=((0, 2), (2, 0)), + # Flow head + flow_head_hidden_size=256, + # Mask predictor + use_mask_predictor=True, + **kwargs, + ) + + +def raft_small(*, pretrained=False, progress=True, **kwargs): + """RAFT "small" model from + `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. + + Args: + pretrained (bool): TODO not implemented yet + progress (bool): If True, displays a progress bar of the download to stderr + kwargs (dict): Parameters that will be passed to the :class:`~torchvision.models.optical_flow.RAFT` class + to override any default. + + Returns: + nn.Module: The model. + + """ + + if pretrained: + raise ValueError("Pretrained weights aren't available yet") + + return _raft( + # Feature encoder + feature_encoder_layers=(32, 32, 64, 96, 128), + feature_encoder_block=BottleneckBlock, + feature_encoder_norm_layer=InstanceNorm2d, + # Context encoder + context_encoder_layers=(32, 32, 64, 96, 160), + context_encoder_block=BottleneckBlock, + context_encoder_norm_layer=None, + # Correlation block + corr_block_num_levels=4, + corr_block_radius=3, + # Motion encoder + motion_encoder_corr_layers=(96,), + motion_encoder_flow_layers=(64, 32), + motion_encoder_out_channels=82, + # Recurrent block + recurrent_block_hidden_state_size=96, + recurrent_block_kernel_size=(3,), + recurrent_block_padding=(1,), + # Flow head + flow_head_hidden_size=128, + # Mask predictor + use_mask_predictor=False, + **kwargs, + ) From 93b26da3623c9134d64f026a234d97caab0675f2 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 6 Dec 2021 18:01:06 +0100 Subject: [PATCH 19/23] adding PR template (#4586) * adding PR template * update * Update PULL_REQUEST_TEMPLATE.md * Update PULL_REQUEST_TEMPLATE.md * Update .github/PULL_REQUEST_TEMPLATE.md Co-authored-by: Nicolas Hug Co-authored-by: Nicolas Hug --- .github/PULL_REQUEST_TEMPLATE.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..f267cc7da50 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1 @@ + From c610fbd760d345c96729441240c6d00fa5ad8f24 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 29 Nov 2021 23:42:26 +0000 Subject: [PATCH 20/23] add api usage log for io --- torchvision/io/image.py | 11 +++++++++++ torchvision/io/video.py | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/torchvision/io/image.py b/torchvision/io/image.py index f835565016c..dd1801d6bd6 100644 --- a/torchvision/io/image.py +++ b/torchvision/io/image.py @@ -3,6 +3,7 @@ import torch from .._internally_replaced_utils import _get_extension_path +from ..utils import _log_api_usage_once try: @@ -41,6 +42,7 @@ def read_file(path: str) -> torch.Tensor: Returns: data (Tensor) """ + _log_api_usage_once("torchvision.io.read_file") data = torch.ops.image.read_file(path) return data @@ -54,6 +56,7 @@ def write_file(filename: str, data: torch.Tensor) -> None: filename (str): the path to the file to be written data (Tensor): the contents to be written to the output file """ + _log_api_usage_once("torchvision.io.write_file") torch.ops.image.write_file(filename, data) @@ -74,6 +77,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_png") output = torch.ops.image.decode_png(input, mode.value, False) return output @@ -93,6 +97,7 @@ def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the PNG file. """ + _log_api_usage_once("torchvision.io.encode_png") output = torch.ops.image.encode_png(input, compression_level) return output @@ -109,6 +114,7 @@ def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): compression_level (int): Compression factor for the resulting file, it must be a number between 0 and 9. Default: 6 """ + _log_api_usage_once("torchvision.io.write_png") output = encode_png(input, compression_level) write_file(filename, output) @@ -137,6 +143,7 @@ def decode_jpeg( Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_jpeg") device = torch.device(device) if device.type == "cuda": output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device) @@ -160,6 +167,7 @@ def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the JPEG file. """ + _log_api_usage_once("torchvision.io.encode_jpeg") if quality < 1 or quality > 100: raise ValueError("Image quality should be a positive number between 1 and 100") @@ -178,6 +186,7 @@ def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): quality (int): Quality of the resulting JPEG file, it must be a number between 1 and 100. Default: 75 """ + _log_api_usage_once("torchvision.io.write_jpeg") output = encode_jpeg(input, quality) write_file(filename, output) @@ -201,6 +210,7 @@ def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHAN Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.decode_image") output = torch.ops.image.decode_image(input, mode.value) return output @@ -221,6 +231,7 @@ def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torc Returns: output (Tensor[image_channels, image_height, image_width]) """ + _log_api_usage_once("torchvision.io.read_image") data = read_file(path) return decode_image(data, mode) diff --git a/torchvision/io/video.py b/torchvision/io/video.py index 0ddd60a4586..cdb426d6d09 100644 --- a/torchvision/io/video.py +++ b/torchvision/io/video.py @@ -9,6 +9,7 @@ import numpy as np import torch +from ..utils import _log_api_usage_once from . import _video_opt @@ -77,6 +78,7 @@ def write_video( audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc. audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream """ + _log_api_usage_once("torchvision.io.write_video") _check_av_available() video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() @@ -256,6 +258,7 @@ def read_video( aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int) """ + _log_api_usage_once("torchvision.io.read_video") from torchvision import get_video_backend @@ -374,6 +377,7 @@ def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[in video_fps (float, optional): the frame rate for the video """ + _log_api_usage_once("torchvision.io.read_video_timestamps") from torchvision import get_video_backend if get_video_backend() != "pyav": From d227affadd1add0c61ce11fe395ae0b0303d3ff3 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Tue, 30 Nov 2021 23:59:16 +0000 Subject: [PATCH 21/23] cover VideoReader --- torchvision/io/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py index 8ee832f43d7..f2ae6dff51e 100644 --- a/torchvision/io/__init__.py +++ b/torchvision/io/__init__.py @@ -2,6 +2,7 @@ import torch +from ..utils import _log_api_usage_once from ._video_opt import ( Timebase, VideoMetaData, @@ -106,6 +107,7 @@ class VideoReader: """ def __init__(self, path: str, stream: str = "video", num_threads: int = 0) -> None: + _log_api_usage_once(self) if not _has_video_opt(): raise RuntimeError( "Not compiled with video_reader support, " From ed7513d25308340c0f25d7f8ea8212eecc08841b Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Wed, 1 Dec 2021 00:46:32 +0000 Subject: [PATCH 22/23] cover c++ APIs --- torchvision/csrc/io/image/cpu/decode_jpeg.cpp | 1 + torchvision/csrc/io/image/cpu/decode_png.cpp | 1 + torchvision/csrc/io/image/cpu/encode_jpeg.cpp | 1 + torchvision/csrc/io/image/cpu/encode_png.cpp | 1 + torchvision/csrc/io/image/cpu/read_write_file.cpp | 2 ++ torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp | 1 + torchvision/csrc/io/video/video.cpp | 1 + torchvision/csrc/io/video_reader/video_reader.cpp | 4 ++++ 8 files changed, 12 insertions(+) diff --git a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp index c6e971c3b12..dc60f5e8f71 100644 --- a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp @@ -70,6 +70,7 @@ static void torch_jpeg_set_source_mgr( } // namespace torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/decode_png.cpp b/torchvision/csrc/io/image/cpu/decode_png.cpp index 0df55daed68..0c33cbfadb2 100644 --- a/torchvision/csrc/io/image/cpu/decode_png.cpp +++ b/torchvision/csrc/io/image/cpu/decode_png.cpp @@ -23,6 +23,7 @@ torch::Tensor decode_png( const torch::Tensor& data, ImageReadMode mode, bool allow_16_bits) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_png"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp index a8dbc7b2a28..be09694b28e 100644 --- a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp @@ -25,6 +25,7 @@ using JpegSizeType = size_t; using namespace detail; torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_jpeg"); // Define compression structures and error handling struct jpeg_compress_struct cinfo {}; struct torch_jpeg_error_mgr jerr {}; diff --git a/torchvision/csrc/io/image/cpu/encode_png.cpp b/torchvision/csrc/io/image/cpu/encode_png.cpp index d28bad95890..655cf38ae26 100644 --- a/torchvision/csrc/io/image/cpu/encode_png.cpp +++ b/torchvision/csrc/io/image/cpu/encode_png.cpp @@ -63,6 +63,7 @@ void torch_png_write_data( } // namespace torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_png"); // Define compression structures and error handling png_structp png_write; png_infop info_ptr; diff --git a/torchvision/csrc/io/image/cpu/read_write_file.cpp b/torchvision/csrc/io/image/cpu/read_write_file.cpp index a0bb7df72d5..120ba34b65f 100644 --- a/torchvision/csrc/io/image/cpu/read_write_file.cpp +++ b/torchvision/csrc/io/image/cpu/read_write_file.cpp @@ -33,6 +33,7 @@ std::wstring utf8_decode(const std::string& str) { #endif torch::Tensor read_file(const std::string& filename) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_file"); #ifdef _WIN32 // According to // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019, @@ -76,6 +77,7 @@ torch::Tensor read_file(const std::string& filename) { } void write_file(const std::string& filename, torch::Tensor& data) { + C10_LOG_API_USAGE_ONCE("torchvision.io.write_file"); // Check that the input tensor is on CPU TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); diff --git a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp index 68f63ced427..017fdebc9ef 100644 --- a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp +++ b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp @@ -33,6 +33,7 @@ torch::Tensor decode_jpeg_cuda( const torch::Tensor& data, ImageReadMode mode, torch::Device device) { + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg_cuda"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK( diff --git a/torchvision/csrc/io/video/video.cpp b/torchvision/csrc/io/video/video.cpp index d5a24398694..de7b557f7ae 100644 --- a/torchvision/csrc/io/video/video.cpp +++ b/torchvision/csrc/io/video/video.cpp @@ -157,6 +157,7 @@ void Video::_getDecoderParams( } // _get decoder params Video::Video(std::string videoPath, std::string stream, int64_t numThreads) { + C10_LOG_API_USAGE_ONCE("torchvision.io.Video"); // set number of threads global numThreads_ = numThreads; // parse stream information diff --git a/torchvision/csrc/io/video_reader/video_reader.cpp b/torchvision/csrc/io/video_reader/video_reader.cpp index 51b0750b431..3580387718d 100644 --- a/torchvision/csrc/io/video_reader/video_reader.cpp +++ b/torchvision/csrc/io/video_reader/video_reader.cpp @@ -583,6 +583,7 @@ torch::List read_video_from_memory( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_memory"); return readVideo( false, input_video, @@ -627,6 +628,7 @@ torch::List read_video_from_file( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_file"); torch::Tensor dummy_input_video = torch::ones({0}); return readVideo( true, @@ -653,10 +655,12 @@ torch::List read_video_from_file( } torch::List probe_video_from_memory(torch::Tensor input_video) { + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_memory"); return probeVideo(false, input_video, ""); } torch::List probe_video_from_file(std::string videoPath) { + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_file"); torch::Tensor dummy_input_video = torch::ones({0}); return probeVideo(true, dummy_input_video, videoPath); } From f3aba201c2dedc1f608a9b108467e510e1f23a32 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 6 Dec 2021 22:48:01 +0000 Subject: [PATCH 23/23] add _cpp suffix to c++ APIs --- torchvision/csrc/io/image/cpu/decode_jpeg.cpp | 2 +- torchvision/csrc/io/image/cpu/decode_png.cpp | 2 +- torchvision/csrc/io/image/cpu/encode_jpeg.cpp | 2 +- torchvision/csrc/io/image/cpu/encode_png.cpp | 2 +- torchvision/csrc/io/image/cpu/read_write_file.cpp | 4 ++-- torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp | 2 +- torchvision/csrc/io/video/video.cpp | 2 +- torchvision/csrc/io/video_reader/video_reader.cpp | 8 ++++---- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp index dc60f5e8f71..0167ed70a64 100644 --- a/torchvision/csrc/io/image/cpu/decode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/decode_jpeg.cpp @@ -70,7 +70,7 @@ static void torch_jpeg_set_source_mgr( } // namespace torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { - C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg"); + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg_cpp"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/decode_png.cpp b/torchvision/csrc/io/image/cpu/decode_png.cpp index 0c33cbfadb2..8ab0fed205c 100644 --- a/torchvision/csrc/io/image/cpu/decode_png.cpp +++ b/torchvision/csrc/io/image/cpu/decode_png.cpp @@ -23,7 +23,7 @@ torch::Tensor decode_png( const torch::Tensor& data, ImageReadMode mode, bool allow_16_bits) { - C10_LOG_API_USAGE_ONCE("torchvision.io.decode_png"); + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_png_cpp"); // Check that the input tensor dtype is uint8 TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); // Check that the input tensor is 1-dimensional diff --git a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp index be09694b28e..739783919ae 100644 --- a/torchvision/csrc/io/image/cpu/encode_jpeg.cpp +++ b/torchvision/csrc/io/image/cpu/encode_jpeg.cpp @@ -25,7 +25,7 @@ using JpegSizeType = size_t; using namespace detail; torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { - C10_LOG_API_USAGE_ONCE("torchvision.io.encode_jpeg"); + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_jpeg_cpp"); // Define compression structures and error handling struct jpeg_compress_struct cinfo {}; struct torch_jpeg_error_mgr jerr {}; diff --git a/torchvision/csrc/io/image/cpu/encode_png.cpp b/torchvision/csrc/io/image/cpu/encode_png.cpp index 655cf38ae26..ca308f357ff 100644 --- a/torchvision/csrc/io/image/cpu/encode_png.cpp +++ b/torchvision/csrc/io/image/cpu/encode_png.cpp @@ -63,7 +63,7 @@ void torch_png_write_data( } // namespace torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { - C10_LOG_API_USAGE_ONCE("torchvision.io.encode_png"); + C10_LOG_API_USAGE_ONCE("torchvision.io.encode_png_cpp"); // Define compression structures and error handling png_structp png_write; png_infop info_ptr; diff --git a/torchvision/csrc/io/image/cpu/read_write_file.cpp b/torchvision/csrc/io/image/cpu/read_write_file.cpp index 120ba34b65f..b1d1a48c4b9 100644 --- a/torchvision/csrc/io/image/cpu/read_write_file.cpp +++ b/torchvision/csrc/io/image/cpu/read_write_file.cpp @@ -33,7 +33,7 @@ std::wstring utf8_decode(const std::string& str) { #endif torch::Tensor read_file(const std::string& filename) { - C10_LOG_API_USAGE_ONCE("torchvision.io.read_file"); + C10_LOG_API_USAGE_ONCE("torchvision.io.read_file_cpp"); #ifdef _WIN32 // According to // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019, @@ -77,7 +77,7 @@ torch::Tensor read_file(const std::string& filename) { } void write_file(const std::string& filename, torch::Tensor& data) { - C10_LOG_API_USAGE_ONCE("torchvision.io.write_file"); + C10_LOG_API_USAGE_ONCE("torchvision.io.write_file_cpp"); // Check that the input tensor is on CPU TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); diff --git a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp index 017fdebc9ef..37674d2b44d 100644 --- a/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp +++ b/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp @@ -33,7 +33,7 @@ torch::Tensor decode_jpeg_cuda( const torch::Tensor& data, ImageReadMode mode, torch::Device device) { - C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg_cuda"); + C10_LOG_API_USAGE_ONCE("torchvision.io.decode_jpeg_cuda_cpp"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK( diff --git a/torchvision/csrc/io/video/video.cpp b/torchvision/csrc/io/video/video.cpp index de7b557f7ae..ea4d31628e6 100644 --- a/torchvision/csrc/io/video/video.cpp +++ b/torchvision/csrc/io/video/video.cpp @@ -157,7 +157,7 @@ void Video::_getDecoderParams( } // _get decoder params Video::Video(std::string videoPath, std::string stream, int64_t numThreads) { - C10_LOG_API_USAGE_ONCE("torchvision.io.Video"); + C10_LOG_API_USAGE_ONCE("torchvision.io.Video_cpp"); // set number of threads global numThreads_ = numThreads; // parse stream information diff --git a/torchvision/csrc/io/video_reader/video_reader.cpp b/torchvision/csrc/io/video_reader/video_reader.cpp index 3580387718d..6b1c70d0bed 100644 --- a/torchvision/csrc/io/video_reader/video_reader.cpp +++ b/torchvision/csrc/io/video_reader/video_reader.cpp @@ -583,7 +583,7 @@ torch::List read_video_from_memory( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { - C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_memory"); + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_memory_cpp"); return readVideo( false, input_video, @@ -628,7 +628,7 @@ torch::List read_video_from_file( int64_t audioEndPts, int64_t audioTimeBaseNum, int64_t audioTimeBaseDen) { - C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_file"); + C10_LOG_API_USAGE_ONCE("torchvision.io.read_video_from_file_cpp"); torch::Tensor dummy_input_video = torch::ones({0}); return readVideo( true, @@ -655,12 +655,12 @@ torch::List read_video_from_file( } torch::List probe_video_from_memory(torch::Tensor input_video) { - C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_memory"); + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_memory_cpp"); return probeVideo(false, input_video, ""); } torch::List probe_video_from_file(std::string videoPath) { - C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_file"); + C10_LOG_API_USAGE_ONCE("torchvision.io.probe_video_from_file_cpp"); torch::Tensor dummy_input_video = torch::ones({0}); return probeVideo(true, dummy_input_video, videoPath); }