diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..0390ed26d2 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,34 @@ +version: 2.1 + +# this allows you to use CircleCI's dynamic configuration feature +setup: true + +# the path-filtering orb is required to continue a pipeline based on +# the path of an updated fileset +orbs: + path-filtering: circleci/path-filtering@0.1.2 + +workflows: + # the always-run workflow is always triggered, regardless of the pipeline parameters. + always-run: + jobs: + # the path-filtering/filter job determines which pipeline + # parameters to update. + - path-filtering/filter: + name: check-updated-files + # 3-column, whitespace-delimited mapping. One mapping per + # line: + # + mapping: | + mmseg/.* lint_only false + requirements/.* lint_only false + tests/.* lint_only false + tools/.* lint_only false + configs/.* lint_only false + .circleci/.* lint_only false + base-revision: dev-1.x + # this is the path of the configuration we should trigger once + # path filtering and pipeline parameter value updates are + # complete. In this case, we are using the parent dynamic + # configuration itself. + config-path: .circleci/test.yml diff --git a/.circleci/docker/Dockerfile b/.circleci/docker/Dockerfile new file mode 100644 index 0000000000..b1d40e0e14 --- /dev/null +++ b/.circleci/docker/Dockerfile @@ -0,0 +1,12 @@ + +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +# To fix GPG key error when running apt-get update +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx diff --git a/.circleci/test.yml b/.circleci/test.yml new file mode 100644 index 0000000000..414e3c4ced --- /dev/null +++ b/.circleci/test.yml @@ -0,0 +1,190 @@ +version: 2.1 + +# the default pipeline parameters, which will be updated according to +# the results of the path-filtering orb +parameters: + lint_only: + type: boolean + default: true + +jobs: + lint: + docker: + - image: cimg/python:3.7.4 + steps: + - checkout + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 75 mmseg + build_cpu: + parameters: + # The python version must match available image tags in + # https://circleci.com/developer/images/image/cimg/python + python: + type: string + torch: + type: string + torchvision: + type: string + docker: + - image: cimg/python:<< parameters.python >> + resource_class: large + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 + - run: + name: Configure Python & pip + command: | + pip install --upgrade pip + pip install wheel + - run: + name: Install PyTorch + command: | + python -V + pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html + - run: + name: Install mmseg dependencies + command: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt -r requirements/optional.txt + - run: + name: Build and install + command: | + pip install -e . + - run: + name: Skip timm unittests and generate coverage report + command: | + python -m coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py + python -m coverage xml + python -m coverage report -m + build_cuda: + parameters: + torch: + type: string + cuda: + type: enum + enum: ["10.1", "10.2", "11.1"] + cudnn: + type: integer + default: 7 + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + # docker_layer_caching: true + resource_class: gpu.nvidia.small + steps: + - checkout + - run: + # Cloning repos in VM since Docker doesn't have access to the private key + name: Clone Repos + command: | + git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine + git clone -b dev-1.x --depth 1 https://github.com/open-mmlab/mmclassification.git /home/circleci/mmclassification + git clone -b dev-3.x --depth 1 https://github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection + - run: + name: Build Docker image + command: | + docker build .circleci/docker -t mmseg:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >> + docker run --gpus all -t -d -v /home/circleci/project:/mmseg -v /home/circleci/mmengine:/mmengine -v /home/circleci/mmclassification:/mmclassification -v /home/circleci/mmdetection:/mmdetection -w /mmseg --name mmseg mmseg:gpu + - run: + name: Install mmseg dependencies + command: | + docker exec mmseg pip install -e /mmengine + docker exec mmseg pip install -U openmim + docker exec mmseg mim install 'mmcv>=2.0.0rc3' + docker exec mmseg pip install -e /mmclassification + docker exec mmseg pip install -e /mmdetection + docker exec mmseg pip install -r requirements/tests.txt -r requirements/optional.txt + - run: + name: Build and install + command: | + docker exec mmseg pip install -e . + - run: + name: Run unittests but skip timm unittests + command: | + docker exec mmseg pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py +workflows: + pr_stage_lint: + when: << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - dev-1.x + - 1.x + - master + pr_stage_test: + when: + not: + << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - dev-1.x + - 1.x + - master + - build_cpu: + name: minimum_version_cpu + torch: 1.6.0 + torchvision: 0.7.0 + python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images + requires: + - lint + - build_cpu: + name: maximum_version_cpu + torch: 1.13.0 + torchvision: 0.14.0 + python: 3.9.0 + requires: + - minimum_version_cpu + - hold: + type: approval + requires: + - maximum_version_cpu + - build_cuda: + name: mainstream_version_gpu + torch: 1.8.1 + # Use double quotation mark to explicitly specify its type + # as string instead of number + cuda: "10.2" + requires: + - hold + merge_stage_test: + when: + not: + << pipeline.parameters.lint_only >> + jobs: + - build_cuda: + name: minimum_version_gpu + torch: 1.6.0 + # Use double quotation mark to explicitly specify its type + # as string instead of number + cuda: "10.1" + filters: + branches: + only: + - dev-1.x + - 1.x + - master diff --git a/.dev/batch_test_list.py b/.dev/batch_test_list.py new file mode 100644 index 0000000000..c4fd8f97e4 --- /dev/null +++ b/.dev/batch_test_list.py @@ -0,0 +1,133 @@ +# yapf: disable +# Inference Speed is tested on NVIDIA V100 +hrnet = [ + dict( + config='configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py', + checkpoint='fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth', # noqa + eval='mIoU', + metric=dict(mIoU=33.0), + ), + dict( + config='configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py', + checkpoint='fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth', # noqa + eval='mIoU', + metric=dict(mIoU=76.31), + ), + dict( + config='configs/hrnet/fcn_hr48_512x512_160k_ade20k.py', + checkpoint='fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth', + eval='mIoU', + metric=dict(mIoU=42.02), + ), + dict( + config='configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py', + checkpoint='fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.65), + ), +] +pspnet = [ + dict( + config='configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth', # noqa + eval='mIoU', + metric=dict(mIoU=78.55), + ), + dict( + config='configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth', # noqa + eval='mIoU', + metric=dict(mIoU=79.76), + ), + dict( + config='configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth', # noqa + eval='mIoU', + metric=dict(mIoU=44.39), + ), + dict( + config='configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth', # noqa + eval='mIoU', + metric=dict(mIoU=42.48), + ), +] +resnest = [ + dict( + config='configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth', # noqa + eval='mIoU', + metric=dict(mIoU=45.44), + ), + dict( + config='configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth', # noqa + eval='mIoU', + metric=dict(mIoU=78.57), + ), +] +fastscnn = [ + dict( + config='configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py', + checkpoint='fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth', + eval='mIoU', + metric=dict(mIoU=70.96), + ) +] +deeplabv3plus = [ + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.98), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.97), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.09), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth', # noqa + eval='mIoU', + metric=dict(mIoU=79.83), + ), +] +vit = [ + dict( + config='configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py', + checkpoint='upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth', + eval='mIoU', + metric=dict(mIoU=47.73), + ), + dict( + config='configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py', + checkpoint='upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth', + eval='mIoU', + metric=dict(mIoU=43.52), + ), +] +fp16 = [ + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.46), + ) +] +swin = [ + dict( + config='configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa + checkpoint='upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', # noqa + eval='mIoU', + metric=dict(mIoU=44.41), + ) +] +# yapf: enable diff --git a/.dev/batch_train_list.txt b/.dev/batch_train_list.txt new file mode 100644 index 0000000000..17d19932e6 --- /dev/null +++ b/.dev/batch_train_list.txt @@ -0,0 +1,19 @@ +configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py +configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py +configs/hrnet/fcn_hr48_512x512_160k_ade20k.py +configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py +configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py +configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py +configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py +configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py +configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py +configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py +configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py +configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py +configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py +configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py diff --git a/.dev/benchmark_evaluation.sh b/.dev/benchmark_evaluation.sh new file mode 100755 index 0000000000..68dc272dad --- /dev/null +++ b/.dev/benchmark_evaluation.sh @@ -0,0 +1,41 @@ +PARTITION=$1 +CHECKPOINT_DIR=$2 + +echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x512_160k_ade20k --cfg-options dist_params.port=28171 & +echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x1024_160k_cityscapes --cfg-options dist_params.port=28172 & +echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x512_160k_ade20k --cfg-options dist_params.port=28173 & +echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x1024_160k_cityscapes --cfg-options dist_params.port=28174 & +echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28175 & +echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28176 & +echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28177 & +echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x512_160k_ade20k --cfg-options dist_params.port=28178 & +echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28179 & +echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28180 & +echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py $CHECKPOINT_DIR/fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fast_scnn_lr0.12_8x4_160k_cityscapes --cfg-options dist_params.port=28181 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28182 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28183 & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28184 & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28185 & +echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_vit-b16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28186 & +echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_deit-s16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28187 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes-cc58bc8d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes --cfg-options dist_params.port=28188 & +echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py $CHECKPOINT_DIR/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K --cfg-options dist_params.port=28189 & diff --git a/.dev/benchmark_inference.py b/.dev/benchmark_inference.py new file mode 100644 index 0000000000..b17c144aed --- /dev/null +++ b/.dev/benchmark_inference.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import hashlib +import logging +import os +import os.path as osp +import warnings +from argparse import ArgumentParser + +import requests +from mmengine import Config + +from mmseg.apis import inference_model, init_model, show_result_pyplot +from mmseg.utils import get_root_logger + +# ignore warnings when segmentors inference +warnings.filterwarnings('ignore') + + +def download_checkpoint(checkpoint_name, model_name, config_name, collect_dir): + """Download checkpoint and check if hash code is true.""" + url = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{checkpoint_name}' # noqa + + r = requests.get(url) + assert r.status_code != 403, f'{url} Access denied.' + + with open(osp.join(collect_dir, checkpoint_name), 'wb') as code: + code.write(r.content) + + true_hash_code = osp.splitext(checkpoint_name)[0].split('-')[1] + + # check hash code + with open(osp.join(collect_dir, checkpoint_name), 'rb') as fp: + sha256_cal = hashlib.sha256() + sha256_cal.update(fp.read()) + cur_hash_code = sha256_cal.hexdigest()[:8] + + assert true_hash_code == cur_hash_code, f'{url} download failed, ' + 'incomplete downloaded file or url invalid.' + + if cur_hash_code != true_hash_code: + os.remove(osp.join(collect_dir, checkpoint_name)) + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint_root', help='Checkpoint file root path') + parser.add_argument( + '-i', '--img', default='demo/demo.png', help='Image file') + parser.add_argument('-a', '--aug', action='store_true', help='aug test') + parser.add_argument('-m', '--model-name', help='model name to inference') + parser.add_argument( + '-s', '--show', action='store_true', help='show results') + parser.add_argument( + '-d', '--device', default='cuda:0', help='Device used for inference') + args = parser.parse_args() + return args + + +def inference(config_name, checkpoint, args, logger=None): + cfg = Config.fromfile(config_name) + if args.aug: + if 'flip' in cfg.data.test.pipeline[ + 1] and 'img_scale' in cfg.data.test.pipeline[1]: + cfg.data.test.pipeline[1].img_ratios = [ + 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 + ] + cfg.data.test.pipeline[1].flip = True + else: + if logger is not None: + logger.error(f'{config_name}: unable to start aug test') + else: + print(f'{config_name}: unable to start aug test', flush=True) + + model = init_model(cfg, checkpoint, device=args.device) + # test a single image + result = inference_model(model, args.img) + + # show the results + if args.show: + show_result_pyplot(model, args.img, result) + return result + + +# Sample test whether the inference code is correct +def main(args): + config = Config.fromfile(args.config) + + if not os.path.exists(args.checkpoint_root): + os.makedirs(args.checkpoint_root, 0o775) + + # test single model + if args.model_name: + if args.model_name in config: + model_infos = config[args.model_name] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + config_name = model_info['config'].strip() + print(f'processing: {config_name}', flush=True) + checkpoint = osp.join(args.checkpoint_root, + model_info['checkpoint'].strip()) + try: + # build the model from a config file and a checkpoint file + inference(config_name, checkpoint, args) + except Exception: + print(f'{config_name} test failed!') + continue + return + else: + raise RuntimeError('model name input error.') + + # test all model + logger = get_root_logger( + log_file='benchmark_inference_image.log', log_level=logging.ERROR) + + for model_name in config: + model_infos = config[model_name] + + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config'], flush=True) + config_path = model_info['config'].strip() + config_name = osp.splitext(osp.basename(config_path))[0] + checkpoint_name = model_info['checkpoint'].strip() + checkpoint = osp.join(args.checkpoint_root, checkpoint_name) + + # ensure checkpoint exists + try: + if not osp.exists(checkpoint): + download_checkpoint(checkpoint_name, model_name, + config_name.rstrip('.py'), + args.checkpoint_root) + except Exception: + logger.error(f'{checkpoint_name} download error') + continue + + # test model inference with checkpoint + try: + # build the model from a config file and a checkpoint file + inference(config_path, checkpoint, args, logger) + except Exception as e: + logger.error(f'{config_path} " : {repr(e)}') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/.dev/benchmark_train.sh b/.dev/benchmark_train.sh new file mode 100755 index 0000000000..cde47a0a57 --- /dev/null +++ b/.dev/benchmark_train.sh @@ -0,0 +1,40 @@ +PARTITION=$1 + +echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24727 --work-dir work_dirs/hrnet/fcn_hr18s_512x512_160k_ade20k >/dev/null & +echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24728 --work-dir work_dirs/hrnet/fcn_hr18s_512x1024_160k_cityscapes >/dev/null & +echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24729 --work-dir work_dirs/hrnet/fcn_hr48_512x512_160k_ade20k >/dev/null & +echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24730 --work-dir work_dirs/hrnet/fcn_hr48_512x1024_160k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24731 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24732 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24733 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24734 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24735 --work-dir work_dirs/resnest/pspnet_s101-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24736 --work-dir work_dirs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24737 --work-dir work_dirs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24738 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24739 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24740 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24741 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes >/dev/null & +echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24742 --work-dir work_dirs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k >/dev/null & +echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24743 --work-dir work_dirs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24744 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes >/dev/null & +echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24745 --work-dir work_dirs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K >/dev/null & diff --git a/.dev/check_urls.py b/.dev/check_urls.py new file mode 100644 index 0000000000..58a1354ba5 --- /dev/null +++ b/.dev/check_urls.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os +from argparse import ArgumentParser + +import requests +import yaml as yml + +from mmseg.utils import get_root_logger + + +def check_url(url): + """Check url response status. + + Args: + url (str): url needed to check. + + Returns: + int, bool: status code and check flag. + """ + flag = True + r = requests.head(url) + status_code = r.status_code + if status_code == 403 or status_code == 404: + flag = False + + return status_code, flag + + +def parse_args(): + parser = ArgumentParser('url valid check.') + parser.add_argument( + '-m', + '--model-name', + type=str, + help='Select the model needed to check') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + model_name = args.model_name + + # yml path generate. + # If model_name is not set, script will check all of the models. + if model_name is not None: + yml_list = [(model_name, f'configs/{model_name}/{model_name}.yml')] + else: + # check all + yml_list = [(x, f'configs/{x}/{x}.yml') for x in os.listdir('configs/') + if x != '_base_'] + + logger = get_root_logger(log_file='url_check.log', log_level=logging.ERROR) + + for model_name, yml_path in yml_list: + # Default yaml loader unsafe. + model_infos = yml.load(open(yml_path), Loader=yml.CLoader)['Models'] + for model_info in model_infos: + config_name = model_info['Name'] + checkpoint_url = model_info['Weights'] + # checkpoint url check + status_code, flag = check_url(checkpoint_url) + if flag: + logger.info(f'checkpoint | {config_name} | {checkpoint_url} | ' + f'{status_code} valid') + else: + logger.error( + f'checkpoint | {config_name} | {checkpoint_url} | ' + f'{status_code} | error') + # log_json check + checkpoint_name = checkpoint_url.split('/')[-1] + model_time = '-'.join(checkpoint_name.split('-')[:-1]).replace( + f'{config_name}_', '') + # two style of log_json name + # use '_' to link model_time (will be deprecated) + log_json_url_1 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}_{model_time}.log.json' # noqa + status_code_1, flag_1 = check_url(log_json_url_1) + # use '-' to link model_time + log_json_url_2 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}-{model_time}.log.json' # noqa + status_code_2, flag_2 = check_url(log_json_url_2) + if flag_1 or flag_2: + if flag_1: + logger.info( + f'log.json | {config_name} | {log_json_url_1} | ' + f'{status_code_1} | valid') + else: + logger.info( + f'log.json | {config_name} | {log_json_url_2} | ' + f'{status_code_2} | valid') + else: + logger.error( + f'log.json | {config_name} | {log_json_url_1} & ' + f'{log_json_url_2} | {status_code_1} & {status_code_2} | ' + 'error') + + +if __name__ == '__main__': + main() diff --git a/.dev/clean_models.py b/.dev/clean_models.py deleted file mode 100644 index c9ac2acbc0..0000000000 --- a/.dev/clean_models.py +++ /dev/null @@ -1,125 +0,0 @@ -import argparse -import glob -import json -import os -import os.path as osp - -import mmcv - -# build schedule look-up table to automatically find the final model -SCHEDULES_LUT = { - '20ki': 20000, - '40ki': 40000, - '60ki': 60000, - '80ki': 80000, - '160ki': 160000 -} -RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc'] - - -def get_final_iter(config): - iter_num = SCHEDULES_LUT[config.split('_')[-2]] - return iter_num - - -def get_final_results(log_json_path, iter_num): - result_dict = dict() - with open(log_json_path, 'r') as f: - for line in f.readlines(): - log_line = json.loads(line) - if 'mode' not in log_line.keys(): - continue - - if log_line['mode'] == 'train' and log_line['iter'] == iter_num: - result_dict['memory'] = log_line['memory'] - - if log_line['iter'] == iter_num: - result_dict.update({ - key: log_line[key] - for key in RESULTS_LUT if key in log_line - }) - return result_dict - - -def parse_args(): - parser = argparse.ArgumentParser(description='Gather benchmarked models') - parser.add_argument( - 'root', - type=str, - help='root path of benchmarked models to be gathered') - parser.add_argument( - 'config', - type=str, - help='root path of benchmarked configs to be gathered') - - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - models_root = args.root - config_name = args.config - - # find all models in the root directory to be gathered - raw_configs = list(mmcv.scandir(config_name, '.py', recursive=True)) - - # filter configs that is not trained in the experiments dir - used_configs = [] - for raw_config in raw_configs: - work_dir = osp.splitext(osp.basename(raw_config))[0] - if osp.exists(osp.join(models_root, work_dir)): - used_configs.append(work_dir) - print(f'Find {len(used_configs)} models to be gathered') - - # find final_ckpt and log file for trained each config - # and parse the best performance - model_infos = [] - for used_config in used_configs: - exp_dir = osp.join(models_root, used_config) - # check whether the exps is finished - final_iter = get_final_iter(used_config) - final_model = 'iter_{}.pth'.format(final_iter) - model_path = osp.join(exp_dir, final_model) - - # skip if the model is still training - if not osp.exists(model_path): - print(f'{used_config} not finished yet') - continue - - # get logs - log_json_path = glob.glob(osp.join(exp_dir, '*.log.json'))[0] - log_txt_path = glob.glob(osp.join(exp_dir, '*.log'))[0] - model_performance = get_final_results(log_json_path, final_iter) - - if model_performance is None: - print(f'{used_config} does not have performance') - continue - - model_time = osp.split(log_txt_path)[-1].split('.')[0] - model_infos.append( - dict( - config=used_config, - results=model_performance, - iters=final_iter, - model_time=model_time, - log_json_path=osp.split(log_json_path)[-1])) - - # publish model for each checkpoint - for model in model_infos: - - model_name = osp.split(model['config'])[-1].split('.')[0] - - model_name += '_' + model['model_time'] - for checkpoints in mmcv.scandir( - osp.join(models_root, model['config']), suffix='.pth'): - if checkpoints.endswith(f"iter_{model['iters']}.pth" - ) or checkpoints.endswith('latest.pth'): - continue - print('removing {}'.format( - osp.join(models_root, model['config'], checkpoints))) - os.remove(osp.join(models_root, model['config'], checkpoints)) - - -if __name__ == '__main__': - main() diff --git a/.dev/gather_benchmark_evaluation_results.py b/.dev/gather_benchmark_evaluation_results.py new file mode 100644 index 0000000000..fec83f133a --- /dev/null +++ b/.dev/gather_benchmark_evaluation_results.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +from mmengine import Config +from mmengine.fileio import dump, load + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked model evaluation results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + '--out', + type=str, + default='benchmark_evaluation_info.json', + help='output path of gathered metrics and compared ' + 'results to be stored') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + root_path = args.root + metrics_out = args.out + result_dict = {} + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + previous_metrics = model_info['metric'] + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + + # Load benchmark evaluation json + metric_json_dir = osp.join(root_path, fname) + if not osp.exists(metric_json_dir): + print(f'{metric_json_dir} not existed.') + continue + + json_list = glob.glob(osp.join(metric_json_dir, '*.json')) + if len(json_list) == 0: + print(f'There is no eval json in {metric_json_dir}.') + continue + + log_json_path = list(sorted(json_list))[-1] + metric = load(log_json_path) + if config not in metric.get('config', {}): + print(f'{config} not included in {log_json_path}') + continue + + # Compare between new benchmark results and previous metrics + differential_results = dict() + new_metrics = dict() + for record_metric_key in previous_metrics: + if record_metric_key not in metric['metric']: + raise KeyError('record_metric_key not exist, please ' + 'check your config') + old_metric = previous_metrics[record_metric_key] + new_metric = round(metric['metric'][record_metric_key] * 100, + 2) + + differential = new_metric - old_metric + flag = '+' if differential > 0 else '-' + differential_results[ + record_metric_key] = f'{flag}{abs(differential):.2f}' + new_metrics[record_metric_key] = new_metric + + result_dict[config] = dict( + differential=differential_results, + previous=previous_metrics, + new=new_metrics) + + if metrics_out: + dump(result_dict, metrics_out, indent=4) + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') diff --git a/.dev/gather_benchmark_train_results.py b/.dev/gather_benchmark_train_results.py new file mode 100644 index 0000000000..f801a0dde5 --- /dev/null +++ b/.dev/gather_benchmark_train_results.py @@ -0,0 +1,100 @@ +import argparse +import glob +import os.path as osp + +from gather_models import get_final_results +from mmengine import Config +from mmengine.fileio import dump + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked models train results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + '--out', + type=str, + default='benchmark_train_info.json', + help='output path of gathered metrics to be stored') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + root_path = args.root + metrics_out = args.out + + evaluation_cfg = Config.fromfile(args.config) + + result_dict = {} + for model_key in evaluation_cfg: + model_infos = evaluation_cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + config = model_info['config'] + + # benchmark train dir + model_name = osp.split(osp.dirname(config))[1] + config_name = osp.splitext(osp.basename(config))[0] + exp_dir = osp.join(root_path, model_name, config_name) + if not osp.exists(exp_dir): + print(f'{config} hasn\'t {exp_dir}') + continue + + # parse config + cfg = Config.fromfile(config) + total_iters = cfg.runner.max_iters + exp_metric = cfg.evaluation.metric + if not isinstance(exp_metric, list): + exp_metrics = [exp_metric] + + # determine whether total_iters ckpt exists + ckpt_path = f'iter_{total_iters}.pth' + if not osp.exists(osp.join(exp_dir, ckpt_path)): + print(f'{config} hasn\'t {ckpt_path}') + continue + + # only the last log json counts + log_json_path = list( + sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1] + + # extract metric value + model_performance = get_final_results(log_json_path, total_iters) + if model_performance is None: + print(f'log file error: {log_json_path}') + continue + + differential_results = dict() + old_results = dict() + new_results = dict() + for metric_key in model_performance: + if metric_key in ['mIoU']: + metric = round(model_performance[metric_key] * 100, 2) + old_metric = model_info['metric'][metric_key] + old_results[metric_key] = old_metric + new_results[metric_key] = metric + differential = metric - old_metric + flag = '+' if differential > 0 else '-' + differential_results[ + metric_key] = f'{flag}{abs(differential):.2f}' + result_dict[config] = dict( + differential_results=differential_results, + old_results=old_results, + new_results=new_results, + ) + + # 4 save or print results + if metrics_out: + dump(result_dict, metrics_out, indent=4) + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') diff --git a/.dev/gather_models.py b/.dev/gather_models.py index 1899195d7d..fe6c3901c8 100644 --- a/.dev/gather_models.py +++ b/.dev/gather_models.py @@ -1,18 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import glob +import hashlib import json import os import os.path as osp import shutil -import subprocess -import mmcv import torch +from mmengine import Config +from mmengine.fileio import dump +from mmengine.utils import mkdir_or_exist, scandir # build schedule look-up table to automatically find the final model RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc'] +def calculate_file_sha256(file_path): + """calculate file sha256 hash code.""" + with open(file_path, 'rb') as fp: + sha256_cal = hashlib.sha256() + sha256_cal.update(fp.read()) + return sha256_cal.hexdigest() + + def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') # remove optimizer for smaller file size @@ -21,10 +32,17 @@ def process_checkpoint(in_file, out_file): # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. torch.save(checkpoint, out_file) - sha = subprocess.check_output(['sha256sum', out_file]).decode() - final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) - subprocess.Popen(['mv', out_file, final_file]) - return final_file + # The hash code calculation and rename command differ on different system + # platform. + sha = calculate_file_sha256(out_file) + final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth' + os.rename(out_file, final_file) + + # Remove prefix and suffix + final_file_name = osp.split(final_file)[1] + final_file_name = osp.splitext(final_file_name)[0] + + return final_file_name def get_final_iter(config): @@ -35,40 +53,43 @@ def get_final_iter(config): def get_final_results(log_json_path, iter_num): result_dict = dict() - with open(log_json_path, 'r') as f: + last_iter = 0 + with open(log_json_path) as f: for line in f.readlines(): log_line = json.loads(line) if 'mode' not in log_line.keys(): continue - if log_line['mode'] == 'train' and log_line['iter'] == iter_num: - result_dict['memory'] = log_line['memory'] - - if log_line['iter'] == iter_num: + # When evaluation, the 'iter' of new log json is the evaluation + # steps on single gpu. + flag1 = ('aAcc' in log_line) or (log_line['mode'] == 'val') + flag2 = (last_iter == iter_num - 50) or (last_iter == iter_num) + if flag1 and flag2: result_dict.update({ key: log_line[key] for key in RESULTS_LUT if key in log_line }) return result_dict + last_iter = log_line['iter'] + def parse_args(): parser = argparse.ArgumentParser(description='Gather benchmarked models') parser.add_argument( - 'root', - type=str, - help='root path of benchmarked models to be gathered') + '-f', '--config-name', type=str, help='Process the selected config.') parser.add_argument( - 'config', + '-w', + '--work-dir', + default='work_dirs/', type=str, - help='root path of benchmarked configs to be gathered') + help='Ckpt storage root folder of benchmarked models to be gathered.') parser.add_argument( - 'out_dir', + '-c', + '--collect-dir', + default='work_dirs/gather', type=str, - help='output path of gathered models to be stored') - parser.add_argument('out_file', type=str, help='the output json file name') - parser.add_argument( - '--filter', type=str, nargs='+', default=[], help='config filter') + help='Ckpt collect root folder of gathered models.') parser.add_argument( '--all', action='store_true', help='whether include .py and .log') @@ -78,37 +99,33 @@ def parse_args(): def main(): args = parse_args() - models_root = args.root - models_out = args.out_dir - config_name = args.config - mmcv.mkdir_or_exist(models_out) + work_dir = args.work_dir + collect_dir = args.collect_dir + selected_config_name = args.config_name + mkdir_or_exist(collect_dir) # find all models in the root directory to be gathered - raw_configs = list(mmcv.scandir(config_name, '.py', recursive=True)) + raw_configs = list(scandir('./configs', '.py', recursive=True)) # filter configs that is not trained in the experiments dir used_configs = [] for raw_config in raw_configs: - work_dir = osp.splitext(osp.basename(raw_config))[0] - if osp.exists(osp.join(models_root, work_dir)): - used_configs.append((work_dir, raw_config)) + config_name = osp.splitext(osp.basename(raw_config))[0] + if osp.exists(osp.join(work_dir, config_name)): + if (selected_config_name is None + or selected_config_name == config_name): + used_configs.append(raw_config) print(f'Find {len(used_configs)} models to be gathered') # find final_ckpt and log file for trained each config # and parse the best performance model_infos = [] - for used_config, raw_config in used_configs: - bypass = True - for p in args.filter: - if p in used_config: - bypass = False - break - if bypass: - continue - exp_dir = osp.join(models_root, used_config) + for used_config in used_configs: + config_name = osp.splitext(osp.basename(used_config))[0] + exp_dir = osp.join(work_dir, config_name) # check whether the exps is finished final_iter = get_final_iter(used_config) - final_model = 'iter_{}.pth'.format(final_iter) + final_model = f'iter_{final_iter}.pth' model_path = osp.join(exp_dir, final_model) # skip if the model is still training @@ -133,8 +150,7 @@ def main(): model_time = osp.split(log_json_path)[-1].split('.')[0] model_infos.append( dict( - config=used_config, - raw_config=raw_config, + config_name=config_name, results=model_performance, iters=final_iter, model_time=model_time, @@ -143,13 +159,12 @@ def main(): # publish model for each checkpoint publish_model_infos = [] for model in model_infos: - model_publish_dir = osp.join(models_out, - model['raw_config'].rstrip('.py')) - model_name = osp.split(model['config'])[-1].split('.')[0] + config_name = model['config_name'] + model_publish_dir = osp.join(collect_dir, config_name) publish_model_path = osp.join(model_publish_dir, - model_name + '_' + model['model_time']) - trained_model_path = osp.join(models_root, model['config'], + config_name + '_' + model['model_time']) + trained_model_path = osp.join(work_dir, config_name, 'iter_{}.pth'.format(model['iters'])) if osp.exists(model_publish_dir): for file in os.listdir(model_publish_dir): @@ -162,35 +177,36 @@ def main(): print(f'dir {model_publish_dir} exists, no model found') else: - mmcv.mkdir_or_exist(model_publish_dir) + mkdir_or_exist(model_publish_dir) # convert model final_model_path = process_checkpoint(trained_model_path, publish_model_path) model['model_path'] = final_model_path - new_json_path = f'{model_name}-{model["log_json_path"]}' + new_json_path = f'{config_name}_{model["log_json_path"]}' # copy log shutil.copy( - osp.join(models_root, model['config'], model['log_json_path']), + osp.join(work_dir, config_name, model['log_json_path']), osp.join(model_publish_dir, new_json_path)) + if args.all: new_txt_path = new_json_path.rstrip('.json') shutil.copy( - osp.join(models_root, model['config'], + osp.join(work_dir, config_name, model['log_json_path'].rstrip('.json')), osp.join(model_publish_dir, new_txt_path)) if args.all: # copy config to guarantee reproducibility - raw_config = osp.join(config_name, model['raw_config']) - mmcv.Config.fromfile(raw_config).dump( + raw_config = osp.join('./configs', f'{config_name}.py') + Config.fromfile(raw_config).dump( osp.join(model_publish_dir, osp.basename(raw_config))) publish_model_infos.append(model) models = dict(models=publish_model_infos) - mmcv.dump(models, osp.join(models_out, args.out_file)) + dump(models, osp.join(collect_dir, 'model_infos.json'), indent=4) if __name__ == '__main__': diff --git a/.dev/generate_benchmark_evaluation_script.py b/.dev/generate_benchmark_evaluation_script.py new file mode 100644 index 0000000000..bdc5a8fca0 --- /dev/null +++ b/.dev/generate_benchmark_evaluation_script.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +from mmengine import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark test model list to script') + parser.add_argument('config', help='test config file path') + parser.add_argument('--port', type=int, default=28171, help='dist port') + parser.add_argument( + '--work-dir', + default='work_dirs/benchmark_evaluation', + help='the dir to save metric') + parser.add_argument( + '--out', + type=str, + default='.dev/benchmark_evaluation.sh', + help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def process_model_info(model_info, work_dir): + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + job_name = fname + checkpoint = model_info['checkpoint'].strip() + work_dir = osp.join(work_dir, fname) + if not isinstance(model_info['eval'], list): + evals = [model_info['eval']] + else: + evals = model_info['eval'] + eval = ' '.join(evals) + return dict( + config=config, + job_name=job_name, + checkpoint=checkpoint, + work_dir=work_dir, + eval=eval) + + +def create_test_bash_info(commands, model_test_dict, port, script_name, + partition): + config = model_test_dict['config'] + job_name = model_test_dict['job_name'] + checkpoint = model_test_dict['checkpoint'] + work_dir = model_test_dict['work_dir'] + eval = model_test_dict['eval'] + + echo_info = f'\necho \'{config}\' &' + commands.append(echo_info) + commands.append('\n') + + command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + + command_info += f'{partition} ' + command_info += f'{job_name} ' + command_info += f'{config} ' + command_info += f'$CHECKPOINT_DIR/{checkpoint} ' + + command_info += f'--eval {eval} ' + command_info += f'--work-dir {work_dir} ' + command_info += f'--cfg-options dist_params.port={port} ' + command_info += '&' + + commands.append(command_info) + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + + commands = [] + partition_name = 'PARTITION=$1' + commands.append(partition_name) + commands.append('\n') + + checkpoint_root = 'CHECKPOINT_DIR=$2' + commands.append(checkpoint_root) + commands.append('\n') + + script_name = osp.join('tools', 'slurm_test.sh') + port = args.port + work_dir = args.work_dir + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config']) + model_test_dict = process_model_info(model_info, work_dir) + create_test_bash_info(commands, model_test_dict, port, script_name, + '$PARTITION') + port += 1 + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str + '\n') + + +if __name__ == '__main__': + main() diff --git a/.dev/generate_benchmark_train_script.py b/.dev/generate_benchmark_train_script.py new file mode 100644 index 0000000000..4e6fa181ff --- /dev/null +++ b/.dev/generate_benchmark_train_script.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +# Default using 4 gpu when training +config_8gpu_list = [ + 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa + 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py', + 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py', +] + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark model json to script') + parser.add_argument( + 'txt_path', type=str, help='txt path output by benchmark_filter') + parser.add_argument('--port', type=int, default=24727, help='dist port') + parser.add_argument( + '--out', + type=str, + default='.dev/benchmark_train.sh', + help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def create_train_bash_info(commands, config, script_name, partition, port): + cfg = config.strip() + + # print cfg name + echo_info = f'echo \'{cfg}\' &' + commands.append(echo_info) + commands.append('\n') + + _, model_name = osp.split(osp.dirname(cfg)) + config_name, _ = osp.splitext(osp.basename(cfg)) + # default setting + if cfg in config_8gpu_list: + command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + else: + command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + command_info += f'{partition} ' + command_info += f'{config_name} ' + command_info += f'{cfg} ' + command_info += f'--cfg-options ' \ + f'checkpoint_config.max_keep_ckpts=1 ' \ + f'dist_params.port={port} ' + command_info += f'--work-dir work_dirs/{model_name}/{config_name} ' + # Let the script shut up + command_info += '>/dev/null &' + + commands.append(command_info) + commands.append('\n') + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + + root_name = './tools' + script_name = osp.join(root_name, 'slurm_train.sh') + port = args.port + partition_name = 'PARTITION=$1' + + commands = [] + commands.append(partition_name) + commands.append('\n') + commands.append('\n') + + with open(args.txt_path) as f: + model_cfgs = f.readlines() + for i, cfg in enumerate(model_cfgs): + create_train_bash_info(commands, cfg, script_name, '$PARTITION', + port) + port += 1 + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str) + + +if __name__ == '__main__': + main() diff --git a/.dev/generate_table.py b/.dev/generate_table.py deleted file mode 100644 index 25142caee0..0000000000 --- a/.dev/generate_table.py +++ /dev/null @@ -1,152 +0,0 @@ -import argparse -import csv -import glob -import json -import os.path as osp -from collections import OrderedDict - -import mmcv - -# build schedule look-up table to automatically find the final model -RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc'] - - -def get_final_iter(config): - iter_num = config.split('_')[-2] - assert iter_num.endswith('ki') - return int(iter_num[:-2]) * 1000 - - -def get_final_results(log_json_path, iter_num): - result_dict = dict() - with open(log_json_path, 'r') as f: - for line in f.readlines(): - log_line = json.loads(line) - if 'mode' not in log_line.keys(): - continue - - if log_line['mode'] == 'train' and log_line[ - 'iter'] == iter_num - 50: - result_dict['memory'] = log_line['memory'] - - if log_line['iter'] == iter_num: - result_dict.update({ - key: log_line[key] * 100 - for key in RESULTS_LUT if key in log_line - }) - return result_dict - - -def get_total_time(log_json_path, iter_num): - - def convert(seconds): - hour = seconds // 3600 - seconds %= 3600 - minutes = seconds // 60 - seconds %= 60 - - return f'{hour:d}:{minutes:2d}:{seconds:2d}' - - time_dict = dict() - with open(log_json_path, 'r') as f: - last_iter = 0 - total_sec = 0 - for line in f.readlines(): - log_line = json.loads(line) - if 'mode' not in log_line.keys(): - continue - - if log_line['mode'] == 'train': - cur_iter = log_line['iter'] - total_sec += (cur_iter - last_iter) * log_line['time'] - last_iter = cur_iter - time_dict['time'] = convert(int(total_sec)) - - return time_dict - - -def parse_args(): - parser = argparse.ArgumentParser(description='Gather benchmarked models') - parser.add_argument( - 'root', - type=str, - help='root path of benchmarked models to be gathered') - parser.add_argument( - 'config', - type=str, - help='root path of benchmarked configs to be gathered') - parser.add_argument( - 'out', type=str, help='output path of gathered models to be stored') - - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - models_root = args.root - models_out = args.out - config_name = args.config - mmcv.mkdir_or_exist(models_out) - - # find all models in the root directory to be gathered - raw_configs = list(mmcv.scandir(config_name, '.py', recursive=True)) - - # filter configs that is not trained in the experiments dir - exp_dirs = [] - for raw_config in raw_configs: - work_dir = osp.splitext(osp.basename(raw_config))[0] - if osp.exists(osp.join(models_root, work_dir)): - exp_dirs.append(work_dir) - print(f'Find {len(exp_dirs)} models to be gathered') - - # find final_ckpt and log file for trained each config - # and parse the best performance - model_infos = [] - for work_dir in exp_dirs: - exp_dir = osp.join(models_root, work_dir) - # check whether the exps is finished - final_iter = get_final_iter(work_dir) - final_model = 'iter_{}.pth'.format(final_iter) - model_path = osp.join(exp_dir, final_model) - - # skip if the model is still training - if not osp.exists(model_path): - print(f'{model_path} not finished yet') - continue - - # get logs - log_json_path = glob.glob(osp.join(exp_dir, '*.log.json'))[0] - model_performance = get_final_results(log_json_path, final_iter) - - if model_performance is None: - continue - - head = work_dir.split('_')[0] - backbone = work_dir.split('_')[1] - crop_size = work_dir.split('_')[-3] - dataset = work_dir.split('_')[-1] - model_info = OrderedDict( - head=head, - backbone=backbone, - crop_size=crop_size, - dataset=dataset, - iters=f'{final_iter//1000}ki') - model_info.update(model_performance) - model_time = get_total_time(log_json_path, final_iter) - model_info.update(model_time) - model_info['config'] = work_dir - model_infos.append(model_info) - - with open( - osp.join(models_out, 'models_table.csv'), 'w', - newline='') as csvfile: - writer = csv.writer( - csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL) - writer.writerow(model_infos[0].keys()) - for model_info in model_infos: - writer.writerow(model_info.values()) - - -if __name__ == '__main__': - main() diff --git a/.dev/log_collector/example_config.py b/.dev/log_collector/example_config.py new file mode 100644 index 0000000000..bc2b4d6c1d --- /dev/null +++ b/.dev/log_collector/example_config.py @@ -0,0 +1,18 @@ +work_dir = '../../work_dirs' +metric = 'mIoU' + +# specify the log files we would like to collect in `log_items` +log_items = [ + 'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup', + 'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr', + 'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr', + 'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr' +] +# or specify ignore_keywords, then the folders whose name contain +# `'segformer'` won't be collected +# ignore_keywords = ['segformer'] + +# should not include metric +other_info_keys = ['mAcc'] +markdown_file = 'markdowns/lr_in_trans.json.md' +json_file = 'jsons/trans_in_cnn.json' diff --git a/.dev/log_collector/log_collector.py b/.dev/log_collector/log_collector.py new file mode 100644 index 0000000000..0c2ff61880 --- /dev/null +++ b/.dev/log_collector/log_collector.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import datetime +import json +import os +import os.path as osp +from collections import OrderedDict + +from utils import load_config + +# automatically collect all the results + +# The structure of the directory: +# ├── work-dir +# │ ├── config_1 +# │ │ ├── time1.log.json +# │ │ ├── time2.log.json +# │ │ ├── time3.log.json +# │ │ ├── time4.log.json +# │ ├── config_2 +# │ │ ├── time5.log.json +# │ │ ├── time6.log.json +# │ │ ├── time7.log.json +# │ │ ├── time8.log.json + + +def parse_args(): + parser = argparse.ArgumentParser(description='extract info from log.json') + parser.add_argument('config_dir') + args = parser.parse_args() + return args + + +def has_keyword(name: str, keywords: list): + for a_keyword in keywords: + if a_keyword in name: + return True + return False + + +def main(): + args = parse_args() + cfg = load_config(args.config_dir) + work_dir = cfg['work_dir'] + metric = cfg['metric'] + log_items = cfg.get('log_items', []) + ignore_keywords = cfg.get('ignore_keywords', []) + other_info_keys = cfg.get('other_info_keys', []) + markdown_file = cfg.get('markdown_file', None) + json_file = cfg.get('json_file', None) + + if json_file and osp.split(json_file)[0] != '': + os.makedirs(osp.split(json_file)[0], exist_ok=True) + if markdown_file and osp.split(markdown_file)[0] != '': + os.makedirs(osp.split(markdown_file)[0], exist_ok=True) + + assert not (log_items and ignore_keywords), \ + 'log_items and ignore_keywords cannot be specified at the same time' + assert metric not in other_info_keys, \ + 'other_info_keys should not contain metric' + + if ignore_keywords and isinstance(ignore_keywords, str): + ignore_keywords = [ignore_keywords] + if other_info_keys and isinstance(other_info_keys, str): + other_info_keys = [other_info_keys] + if log_items and isinstance(log_items, str): + log_items = [log_items] + + if not log_items: + log_items = [ + item for item in sorted(os.listdir(work_dir)) + if not has_keyword(item, ignore_keywords) + ] + + experiment_info_list = [] + for config_dir in log_items: + preceding_path = os.path.join(work_dir, config_dir) + log_list = [ + item for item in os.listdir(preceding_path) + if item.endswith('.log.json') + ] + log_list = sorted( + log_list, + key=lambda time_str: datetime.datetime.strptime( + time_str, '%Y%m%d_%H%M%S.log.json')) + val_list = [] + last_iter = 0 + for log_name in log_list: + with open(os.path.join(preceding_path, log_name)) as f: + # ignore the info line + f.readline() + all_lines = f.readlines() + val_list.extend([ + json.loads(line) for line in all_lines + if json.loads(line)['mode'] == 'val' + ]) + for index in range(len(all_lines) - 1, -1, -1): + line_dict = json.loads(all_lines[index]) + if line_dict['mode'] == 'train': + last_iter = max(last_iter, line_dict['iter']) + break + + new_log_dict = dict( + method=config_dir, metric_used=metric, last_iter=last_iter) + for index, log in enumerate(val_list, 1): + new_ordered_dict = OrderedDict() + new_ordered_dict['eval_index'] = index + new_ordered_dict[metric] = log[metric] + for key in other_info_keys: + if key in log: + new_ordered_dict[key] = log[key] + val_list[index - 1] = new_ordered_dict + + assert len(val_list) >= 1, \ + f"work dir {config_dir} doesn't contain any evaluation." + new_log_dict['last eval'] = val_list[-1] + new_log_dict['best eval'] = max(val_list, key=lambda x: x[metric]) + experiment_info_list.append(new_log_dict) + print(f'{config_dir} is processed') + + if json_file: + with open(json_file, 'w') as f: + json.dump(experiment_info_list, f, indent=4) + + if markdown_file: + lines_to_write = [] + for index, log in enumerate(experiment_info_list, 1): + lines_to_write.append( + f"|{index}|{log['method']}|{log['best eval'][metric]}" + f"|{log['best eval']['eval_index']}|" + f"{log['last eval'][metric]}|" + f"{log['last eval']['eval_index']}|{log['last_iter']}|\n") + with open(markdown_file, 'w') as f: + f.write(f'|exp_num|method|{metric} best|best index|' + f'{metric} last|last index|last iter num|\n') + f.write('|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n') + f.writelines(lines_to_write) + + print('processed successfully') + + +if __name__ == '__main__': + main() diff --git a/.dev/log_collector/readme.md b/.dev/log_collector/readme.md new file mode 100644 index 0000000000..4a8b9b6bd0 --- /dev/null +++ b/.dev/log_collector/readme.md @@ -0,0 +1,144 @@ +# Log Collector + +## Function + +Automatically collect logs and write the result in a json file or markdown file. + +If there are several `.log.json` files in one folder, Log Collector assumes that the `.log.json` files other than the first one are resume from the preceding `.log.json` file. Log Collector returns the result considering all `.log.json` files. + +## Usage: + +To use log collector, you need to write a config file to configure the log collector first. + +For example: + +example_config.py: + +```python +# The work directory that contains folders that contains .log.json files. +work_dir = '../../work_dirs' +# The metric used to find the best evaluation. +metric = 'mIoU' + +# **Don't specify the log_items and ignore_keywords at the same time.** +# Specify the log files we would like to collect in `log_items`. +# The folders specified should be the subdirectories of `work_dir`. +log_items = [ + 'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup', + 'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr', + 'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr', + 'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr' +] +# Or specify `ignore_keywords`. The folders whose name contain one +# of the keywords in the `ignore_keywords` list(e.g., `'segformer'`) +# won't be collected. +# ignore_keywords = ['segformer'] + +# Other log items in .log.json that you want to collect. +# should not include metric. +other_info_keys = ["mAcc"] +# The output markdown file's name. +markdown_file ='markdowns/lr_in_trans.json.md' +# The output json file's name. (optional) +json_file = 'jsons/trans_in_cnn.json' +``` + +The structure of the work-dir directory should be like: + +```text +├── work-dir +│ ├── folder1 +│ │ ├── time1.log.json +│ │ ├── time2.log.json +│ │ ├── time3.log.json +│ │ ├── time4.log.json +│ ├── folder2 +│ │ ├── time5.log.json +│ │ ├── time6.log.json +│ │ ├── time7.log.json +│ │ ├── time8.log.json +``` + +Then , cd to the log collector folder. + +Now you can run log_collector.py by using command: + +```bash +python log_collector.py ./example_config.py +``` + +The output markdown file is like: + +| exp_num | method | mIoU best | best index | mIoU last | last index | last iter num | +| :-----: | :-----------------------------------------------------: | :-------: | :--------: | :-------: | :--------: | :-----------: | +| 1 | segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup | 0.2776 | 10 | 0.2776 | 10 | 160000 | +| 2 | segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr | 0.2802 | 10 | 0.2802 | 10 | 160000 | +| 3 | segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr | 0.4943 | 11 | 0.4943 | 11 | 160000 | +| 4 | segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr | 0.4883 | 11 | 0.4883 | 11 | 160000 | + +The output json file is like: + +```json +[ + { + "method": "segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 10, + "mIoU": 0.2776, + "mAcc": 0.3779 + }, + "best eval": { + "eval_index": 10, + "mIoU": 0.2776, + "mAcc": 0.3779 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 10, + "mIoU": 0.2802, + "mAcc": 0.3764 + }, + "best eval": { + "eval_index": 10, + "mIoU": 0.2802, + "mAcc": 0.3764 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 11, + "mIoU": 0.4943, + "mAcc": 0.6097 + }, + "best eval": { + "eval_index": 11, + "mIoU": 0.4943, + "mAcc": 0.6097 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 11, + "mIoU": 0.4883, + "mAcc": 0.6061 + }, + "best eval": { + "eval_index": 11, + "mIoU": 0.4883, + "mAcc": 0.6061 + } + } +] +``` diff --git a/.dev/log_collector/utils.py b/.dev/log_collector/utils.py new file mode 100644 index 0000000000..848516a783 --- /dev/null +++ b/.dev/log_collector/utils.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modified from https://github.dev/open-mmlab/mmcv +import os.path as osp +import sys +from importlib import import_module + + +def load_config(cfg_dir: str) -> dict: + assert cfg_dir.endswith('.py') + root_path, file_name = osp.split(cfg_dir) + temp_module = osp.splitext(file_name)[0] + sys.path.insert(0, root_path) + mod = import_module(temp_module) + sys.path.pop(0) + cfg_dict = { + k: v + for k, v in mod.__dict__.items() if not k.startswith('__') + } + del sys.modules[temp_module] + return cfg_dict diff --git a/.dev/md2yml.py b/.dev/md2yml.py new file mode 100755 index 0000000000..fc9c67e470 --- /dev/null +++ b/.dev/md2yml.py @@ -0,0 +1,317 @@ +#!/usr/bin/env python + +# Copyright (c) OpenMMLab. All rights reserved. +# This tool is used to update model-index.yml which is required by MIM, and +# will be automatically called as a pre-commit hook. The updating will be +# triggered if any change of model information (.md files in configs/) has been +# detected before a commit. + +import glob +import os +import os.path as osp +import re +import sys + +from lxml import etree +from mmengine.fileio import dump + +MMSEG_ROOT = osp.dirname(osp.dirname(osp.dirname(__file__))) + +COLLECTIONS = [ + 'ANN', 'APCNet', 'BiSeNetV1', 'BiSeNetV2', 'CCNet', 'CGNet', 'DANet', + 'DeepLabV3', 'DeepLabV3+', 'DMNet', 'DNLNet', 'DPT', 'EMANet', 'EncNet', + 'ERFNet', 'FastFCN', 'FastSCNN', 'FCN', 'GCNet', 'ICNet', 'ISANet', 'KNet', + 'NonLocalNet', 'OCRNet', 'PointRend', 'PSANet', 'PSPNet', 'Segformer', + 'Segmenter', 'FPN', 'SETR', 'STDC', 'UNet', 'UPerNet' +] +COLLECTIONS_TEMP = [] + + +def dump_yaml_and_check_difference(obj, filename, sort_keys=False): + """Dump object to a yaml file, and check if the file content is different + from the original. + + Args: + obj (any): The python object to be dumped. + filename (str): YAML filename to dump the object to. + sort_keys (str); Sort key by dictionary order. + Returns: + Bool: If the target YAML file is different from the original. + """ + + str_dump = dump(obj, None, file_format='yaml', sort_keys=sort_keys) + if osp.isfile(filename): + file_exists = True + with open(filename, encoding='utf-8') as f: + str_orig = f.read() + else: + file_exists = False + str_orig = None + + if file_exists and str_orig == str_dump: + is_different = False + else: + is_different = True + with open(filename, 'w', encoding='utf-8') as f: + f.write(str_dump) + + return is_different + + +def parse_md(md_file): + """Parse .md file and convert it to a .yml file which can be used for MIM. + + Args: + md_file (str): Path to .md file. + Returns: + Bool: If the target YAML file is different from the original. + """ + collection_name = osp.split(osp.dirname(md_file))[1] + configs = os.listdir(osp.dirname(md_file)) + + collection = dict( + Name=collection_name, + Metadata={'Training Data': []}, + Paper={ + 'URL': '', + 'Title': '' + }, + README=md_file, + Code={ + 'URL': '', + 'Version': '' + }) + collection.update({'Converted From': {'Weights': '', 'Code': ''}}) + models = [] + datasets = [] + paper_url = None + paper_title = None + code_url = None + code_version = None + repo_url = None + + # To avoid re-counting number of backbone model in OpenMMLab, + # if certain model in configs folder is backbone whose name is already + # recorded in MMClassification, then the `COLLECTION` dict of this model + # in MMSegmentation should be deleted, and `In Collection` in `Models` + # should be set with head or neck of this config file. + is_backbone = None + + with open(md_file, encoding='UTF-8') as md: + lines = md.readlines() + i = 0 + current_dataset = '' + while i < len(lines): + line = lines[i].strip() + # In latest README.md the title and url are in the third line. + if i == 2: + paper_url = lines[i].split('](')[1].split(')')[0] + paper_title = lines[i].split('](')[0].split('[')[1] + if len(line) == 0: + i += 1 + continue + elif line[:3] == 'Before you create a PR, make sure that your code lints and is formatted by yapf. +> Before you create a PR, make sure that your code lints and is formatted by yapf. ### C++ and CUDA + We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 3ba13e0cec..aa982e548b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1 +1,6 @@ blank_issues_enabled: false + +contact_links: + - name: MMSegmentation Documentation + url: https://mmsegmentation.readthedocs.io + about: Check the docs and FAQ to see if you question is already answered. diff --git a/.github/ISSUE_TEMPLATE/error-report.md b/.github/ISSUE_TEMPLATE/error-report.md index 1b129c1574..807781c6a5 100644 --- a/.github/ISSUE_TEMPLATE/error-report.md +++ b/.github/ISSUE_TEMPLATE/error-report.md @@ -4,12 +4,12 @@ about: Create a report to help us improve title: '' labels: '' assignees: '' - --- Thanks for your error report and we appreciate it a lot. **Checklist** + 1. I have searched related issues but cannot get the expected help. 2. The bug has not been fixed in the latest version. @@ -17,25 +17,32 @@ Thanks for your error report and we appreciate it a lot. A clear and concise description of what the bug is. **Reproduction** + 1. What command or script did you run? -``` -A placeholder for the command. -``` + + ```none + A placeholder for the command. + ``` + 2. Did you make any modifications on the code or config? Did you understand what you have modified? + 3. What dataset did you use? **Environment** -1. Please run `python mmseg/utils/collect_env.py` to collect necessary environment infomation and paste it here. +1. Please run `python mmseg/utils/collect_env.py` to collect necessary environment information and paste it here. 2. You may add addition that may be helpful for locating the problem, such as - - How you installed PyTorch [e.g., pip, conda, source] - - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) **Error traceback** + If applicable, paste the error trackback here. -``` + +```none A placeholder for trackback. ``` **Bug fix** + If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 33f9d5f235..7e3b855fe6 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -4,15 +4,14 @@ about: Suggest an idea for this project title: '' labels: '' assignees: '' - --- -**Describe the feature** +# Describe the feature **Motivation** A clear and concise description of the motivation of the feature. -Ex1. It is inconvenient when [....]. -Ex2. There is a recent paper [....], which is very helpful for [....]. +Ex1. It is inconvenient when \[....\]. +Ex2. There is a recent paper \[....\], which is very helpful for \[....\]. **Related resources** If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. diff --git a/.github/ISSUE_TEMPLATE/general_questions.md b/.github/ISSUE_TEMPLATE/general_questions.md index b5a6451a6c..f02dd63a80 100644 --- a/.github/ISSUE_TEMPLATE/general_questions.md +++ b/.github/ISSUE_TEMPLATE/general_questions.md @@ -4,5 +4,4 @@ about: Ask general questions to get help title: '' labels: '' assignees: '' - --- diff --git a/.github/ISSUE_TEMPLATE/reimplementation_questions.md b/.github/ISSUE_TEMPLATE/reimplementation_questions.md new file mode 100644 index 0000000000..63e4c3b3ab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/reimplementation_questions.md @@ -0,0 +1,69 @@ +--- +name: Reimplementation Questions +about: Ask about questions during model reimplementation +title: '' +labels: reimplementation +assignees: '' +--- + +If you feel we have helped you, give us a STAR! :satisfied: + +**Notice** + +There are several common situations in the reimplementation issues as below + +1. Reimplement a model in the model zoo using the provided configs +2. Reimplement a model in the model zoo on other datasets (e.g., custom datasets) +3. Reimplement a custom model but all the components are implemented in MMSegmentation +4. Reimplement a custom model with new modules implemented by yourself + +There are several things to do for different cases as below. + +- For cases 1 & 3, please follow the steps in the following sections thus we could help to quickly identify the issue. +- For cases 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code, and the users should be responsible for the code they write. +- One suggestion for cases 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections, and try as clear as possible so that we can better help you. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The issue has not been fixed in the latest version. + +**Describe the issue** + +A clear and concise description of the problem you meet and what you have done. + +**Reproduction** + +1. What command or script did you run? + +``` +A placeholder for the command. +``` + +2. What config dir you run? + +``` +A placeholder for the config. +``` + +3. Did you make any modifications to the code or config? Did you understand what you have modified? +4. What dataset did you use? + +**Environment** + +1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmseg/utils/collect_env.py` to collect the necessary environment information and paste it here. +2. You may add an addition that may be helpful for locating the problem, such as + 1. How you installed PyTorch \[e.g., pip, conda, source\] + 2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Results** + +If applicable, paste the related results here, e.g., what you expect and what you get. + +``` +A placeholder for results comparison +``` + +**Issue fix** + +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..09d5305ede --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,25 @@ +Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. + +## Motivation + +Please describe the motivation of this PR and the goal you want to achieve through this PR. + +## Modification + +Please briefly describe what modification is made in this PR. + +## BC-breaking (Optional) + +Does the modification introduce changes that break the backward-compatibility of the downstream repos? +If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. + +## Use cases (Optional) + +If this PR introduces a new feature, it is better to list some use cases here, and update the documentation. + +## Checklist + +1. Pre-commit or other linting tools are used to fix the potential lint issues. +2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. +3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D. +4. The documentation has been modified accordingly, like docstring or example tutorials. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 68afd6e5ec..0000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,101 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: build - -on: [push, pull_request] - -jobs: - - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.7 - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Install linting dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 isort==4.3.21 yapf interrogate - - name: Lint with flake8 - run: flake8 . - - name: Lint with isort - run: isort --recursive --check-only --diff mmseg/ tests/ examples/ - - name: Format python codes with yapf - run: yapf -r -d mmseg/ tests/ examples/ - - name: Check docstring - run: interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --exclude mmseg/ops --ignore-regex "__repr__" --fail-under 80 mmseg - - build: - env: - CUDA: 10.1.105-1 - CUDA_SHORT: 10.1 - UBUNTU_VERSION: ubuntu1804 - FORCE_CUDA: 1 - MMCV_CUDA_ARGS: -gencode=arch=compute_61,code=sm_61 - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.6, 3.7] - torch: [1.3.0+cpu, 1.5.0+cpu] - include: - - torch: 1.3.0+cpu - torchvision: 0.4.2+cpu - - torch: 1.5.0+cpu - torchvision: 0.6.0+cpu - - torch: 1.5.0+cpu - torchvision: 0.6.0+cpu - python-version: 3.8 - - torch: 1.5.0+cu101 - torchvision: 0.6.0+cu101 - python-version: 3.7 - - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install CUDA - if: ${{matrix.torch == '1.5.0+cu101'}} - run: | - export INSTALLER=cuda-repo-${UBUNTU_VERSION}_${CUDA}_amd64.deb - wget http://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/${INSTALLER} - sudo dpkg -i ${INSTALLER} - wget https://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/7fa2af80.pub - sudo apt-key add 7fa2af80.pub - sudo apt update -qq - sudo apt install -y cuda-${CUDA_SHORT/./-} cuda-cufft-dev-${CUDA_SHORT/./-} - sudo apt clean - export CUDA_HOME=/usr/local/cuda-${CUDA_SHORT} - export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${CUDA_HOME}/include:${LD_LIBRARY_PATH} - export PATH=${CUDA_HOME}/bin:${PATH} - sudo apt-get install -y ninja-build - - name: Install Pillow - if: ${{matrix.torchvision == '0.4.2+cpu'}} - run: pip install Pillow==6.2.2 - - name: Install PyTorch - run: pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html - - name: Install mmseg dependencies - run: | - pip install mmcv-full==latest+torch${{matrix.torch}} -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html - pip install -r requirements.txt - - name: Build and install - run: rm -rf .eggs && pip install -e . - - name: Run unittests and generate coverage report - run: | - coverage run --branch --source mmseg -m pytest tests/ - coverage xml - coverage report -m --omit="mmseg/utils/*","mmseg/apis/*" - # Only upload coverage report for python3.7 && pytorch1.5 - - name: Upload coverage to Codecov - if: ${{matrix.torch == '1.5.0+cu101' && matrix.python-version == '3.7'}} - uses: codecov/codecov-action@v1.0.10 - with: - file: ./coverage.xml - flags: unittests - env_vars: OS,PYTHON - name: codecov-umbrella - fail_ci_if_error: false diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000000..ab64085cba --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,26 @@ +name: deploy + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Build MMSegmentation + run: | + pip install wheel + python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + run: | + pip install twine + twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000000..97cfda589a --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,27 @@ +name: lint + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install pre-commit hook + run: | + python -m pip install pre-commit + pre-commit install + - name: Linting + run: pre-commit run --all-files + - name: Check docstring coverage + run: | + python -m pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 75 mmseg diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml new file mode 100644 index 0000000000..1ed57f5bea --- /dev/null +++ b/.github/workflows/merge_stage_test.yml @@ -0,0 +1,264 @@ +name: merge_stage_test + +on: + push: + paths-ignore: + - 'README.md' + - 'README_zh-CN.md' + - 'docs/**' + - 'demo/**' + - '.dev_scripts/**' + - '.circleci/**' + - 'projects/**' + branches: + - dev-1.x + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu_py: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.6, 3.8, 3.9] + torch: [1.8.1] + include: + - torch: 1.8.1 + torchvision: 0.9.1 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + pip install wheel + - name: Install Pillow + run: pip install Pillow==6.2.2 + if: ${{matrix.torchvision == '0.4.2'}} + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + pip install timm + coverage run --branch --source mmseg -m pytest tests/ + coverage xml + coverage report -m + build_cpu_pt: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.6.0, 1.7.1, 1.8.1, 1.9.1, 1.10.1, 1.11.0, 1.12.1, 1.13.0] + include: + - torch: 1.6.0 + torchvision: 0.7.0 + - torch: 1.7.1 + torchvision: 0.8.2 + - torch: 1.8.1 + torchvision: 0.9.1 + - torch: 1.9.1 + torchvision: 0.10.1 + - torch: 1.10.1 + torchvision: 0.11.2 + - torch: 1.11.0 + torchvision: 0.12.0 + - torch: 1.12.1 + torchvision: 0.13.0 + - torch: 1.13.0 + torchvision: + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + pip install wheel + - name: Install Pillow + run: pip install Pillow==6.2.2 + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + # timm from v0.6.11 requires torch>=1.7 + if: ${{matrix.torch >= '1.7.0'}} + run: | + pip install timm + coverage run --branch --source mmseg -m pytest tests/ + coverage xml + coverage report -m + - name: Skip timm unittests and generate coverage report + run: | + coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py + coverage xml + coverage report -m + # Only upload coverage report for python3.7 && pytorch1.8.1 without timm + - name: Upload coverage to Codecov + if: ${{matrix.torch == '1.8.1' && matrix.python-version == '3.7'}} + uses: codecov/codecov-action@v2 + with: + files: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cu102: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel + strategy: + matrix: + python-version: [3.7] + include: + - torch: 1.8.1 + cuda: 10.2 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + pip install wheel + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install Python-dev + run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev + if: ${{matrix.python-version != 3.9}} + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: | + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install -e . + build_cu116: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel + strategy: + matrix: + python-version: [3.7] + include: + - torch: 1.13.0 + cuda: 11.6 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + pip install wheel + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install Python-dev + run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev + if: ${{matrix.python-version != 3.9}} + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: | + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install -e . + build_windows: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-2022] + python: [3.7] + platform: [cpu, cu111] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + python -m pip install pip --upgrade + pip install wheel + - name: Install OpenCV + run: pip install opencv-python>=3 + - name: Install PyTorch + run: pip install torch==1.8.1+${{matrix.platform}} torchvision==0.9.1+${{matrix.platform}} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: pip install -e . + - name: Run unittests + run: | + pip install timm + coverage run --branch --source mmseg -m pytest tests/ --ignore tests\test_models\test_forward.py tests\test_models\test_backbones\test_beit.py + - name: Generate coverage report + run: | + coverage xml + coverage report -m diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml new file mode 100644 index 0000000000..370eb8a042 --- /dev/null +++ b/.github/workflows/pr_stage_test.yml @@ -0,0 +1,151 @@ +name: pr_stage_test + +on: + pull_request: + paths-ignore: + - 'README.md' + - 'README_zh-CN.md' + - 'docs/**' + - 'demo/**' + - '.dev_scripts/**' + - '.circleci/**' + - 'projects/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.7] + include: + - torch: 1.8.1 + torchvision: 0.9.1 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + # Install wheel for source distribution build. https://github.com/pypa/pip/blob/main/src/pip/_internal/wheel_builder.py#L94 + pip install wheel + - name: Install Pillow + run: pip install Pillow==6.2.2 + if: ${{matrix.torchvision == '0.4.2'}} + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install other dependencies + run: | + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py + coverage xml + coverage report -m + # Upload coverage report for python3.7 && pytorch1.8.1 cpu without timm + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1.0.14 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cu102: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel + strategy: + matrix: + python-version: [3.7] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + pip install pip --upgrade + pip install wheel + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install Python-dev + run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev + if: ${{matrix.python-version != 3.9}} + - name: Install system dependencies + run: | + apt-get update + apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: | + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install -e . + build_windows: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-2022] + python: [3.7] + platform: [cpu, cu111] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + python -m pip install pip --upgrade + pip install wheel + - name: Install OpenCV + run: pip install opencv-python>=3 + - name: Install PyTorch + run: pip install torch==1.8.1+${{matrix.platform}} torchvision==0.9.1+${{matrix.platform}} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html + - name: Install mmseg dependencies + run: | + python -V + pip install -U openmim + pip install git+https://github.com/open-mmlab/mmengine.git + mim install 'mmcv>=2.0.0rc3' + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: pip install -e . + - name: Run unittests + run: | + pip install timm + coverage run --branch --source mmseg -m pytest tests/ --ignore tests\test_models\test_forward.py tests\test_models\test_backbones\test_beit.py + - name: Generate coverage report + run: | + coverage xml + coverage report -m diff --git a/.github/workflows/test_mim.yml b/.github/workflows/test_mim.yml new file mode 100644 index 0000000000..390fcf87d7 --- /dev/null +++ b/.github/workflows/test_mim.yml @@ -0,0 +1,45 @@ + +name: test-mim + +on: + push: + paths: + - 'model-index.yml' + - 'configs/**' + + pull_request: + paths: + - 'model-index.yml' + - 'configs/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.8.0] + include: + - torch: 1.8.0 + torch_version: torch1.8 + torchvision: 0.9.0 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install openmim + run: pip install openmim + - name: Build and install + run: rm -rf .eggs && mim install -e . + - name: test commands of mim + run: mim search mmsegmentation>=1.0.0rc0 diff --git a/.gitignore b/.gitignore index 77824a97a7..787d13ec67 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,8 @@ instance/ .scrapy # Sphinx documentation -docs/_build/ +docs/en/_build/ +docs/zh_cn/_build/ # PyBuilder target/ @@ -89,6 +90,7 @@ venv/ ENV/ env.bak/ venv.bak/ +.DS_Store # Spyder project settings .spyderproject @@ -103,7 +105,6 @@ venv.bak/ # mypy .mypy_cache/ -mmseg/version.py data .vscode .idea @@ -113,6 +114,7 @@ data *.pkl.json *.log.json work_dirs/ +mmseg/.mim # Pytorch *.pth diff --git a/.owners.yml b/.owners.yml new file mode 100644 index 0000000000..b850b09507 --- /dev/null +++ b/.owners.yml @@ -0,0 +1,10 @@ +assign: + strategy: + # random + # daily-shift-based + round-robin + assignees: + - MeowZheng + - MengzhangLI + - linfangjian01 + - xiaoachen98 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e6d30895b..03b537683a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,22 +1,18 @@ repos: - - repo: https://gitlab.com/pycqa/flake8.git - rev: 3.8.3 + - repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 hooks: - id: flake8 - - repo: https://github.com/asottile/seed-isort-config - rev: v2.2.0 - hooks: - - id: seed-isort-config - - repo: https://github.com/timothycrosley/isort - rev: 4.3.21 + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-yapf - rev: v0.30.0 + rev: v0.32.0 hooks: - id: yapf - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.1.0 + rev: v4.3.0 hooks: - id: trailing-whitespace - id: check-yaml @@ -28,8 +24,42 @@ repos: args: ["--remove"] - id: mixed-line-ending args: ["--fix=lf"] + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.9 + hooks: + - id: mdformat + args: ["--number"] + additional_dependencies: + - mdformat-openmmlab + - mdformat_frontmatter + - linkify-it-py + - repo: https://github.com/codespell-project/codespell + rev: v2.2.1 + hooks: + - id: codespell - repo: https://github.com/myint/docformatter rev: v1.3.1 hooks: - id: docformatter args: ["--in-place", "--wrap-descriptions", "79"] + - repo: local + hooks: + - id: update-model-index + name: update-model-index + description: Collect model information and update model-index.yml + entry: .dev/md2yml.py + additional_dependencies: [mmengine, lxml, opencv-python] + language: python + files: ^configs/.*\.md$ + require_serial: true + - repo: https://github.com/asottile/pyupgrade + rev: v3.0.0 + hooks: + - id: pyupgrade + args: ["--py36-plus"] + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 # Use the rev to fix revision + hooks: + - id: check-algo-readme + - id: check-copyright + args: ["mmseg", "tools", "tests", "demo"] # the dir_to_check with expected directory to check diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..6cfbf5d310 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - requirements: requirements/readthedocs.txt diff --git a/.style.yapf b/.style.yapf deleted file mode 100644 index 286a3f1d7a..0000000000 --- a/.style.yapf +++ /dev/null @@ -1,4 +0,0 @@ -[style] -BASED_ON_STYLE = pep8 -BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true -SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000..cfd7cab05d --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,8 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: + - name: "MMSegmentation Contributors" +title: "OpenMMLab Semantic Segmentation Toolbox and Benchmark" +date-released: 2020-07-10 +url: "https://github.com/open-mmlab/mmsegmentation" +license: Apache-2.0 diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..e307d81817 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include requirements/*.txt +include mmseg/.mim/model-index.yml +recursive-include mmseg/.mim/configs *.py *.yml +recursive-include mmseg/.mim/tools *.py *.sh diff --git a/README.md b/README.md index 9e7cf39b3f..056f9029b1 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,44 @@
+
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+
+ +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mmsegmentation)](https://pypi.org/project/mmsegmentation/) +[![PyPI](https://img.shields.io/pypi/v/mmsegmentation)](https://pypi.org/project/mmsegmentation) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmsegmentation.readthedocs.io/en/1.x/) +[![badge](https://github.com/open-mmlab/mmsegmentation/workflows/build/badge.svg)](https://github.com/open-mmlab/mmsegmentation/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmsegmentation/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmsegmentation) +[![license](https://img.shields.io/github/license/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/blob/1.x/LICENSE) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) + +Documentation: https://mmsegmentation.readthedocs.io/en/1.x/ + +English | [简体中文](README_zh-CN.md) ## Introduction MMSegmentation is an open source semantic segmentation toolbox based on PyTorch. It is a part of the OpenMMLab project. -The master branch works with **PyTorch 1.3 to 1.5**. +The 1.x branch works with **PyTorch 1.6+**. ![demo image](resources/seg_demo.gif) @@ -29,41 +60,107 @@ The master branch works with **PyTorch 1.3 to 1.5**. The training speed is faster than or comparable to other codebases. -## License +## What's New -This project is released under the [Apache 2.0 license](LICENSE). +v1.0.0rc2 was released in 6/12/2022. +Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. -## Benchmark and model zoo +- Support MaskFormer and Mask2Former ([#2215](https://github.com/open-mmlab/mmsegmentation/pull/2215), [2255](https://github.com/open-mmlab/mmsegmentation/pull/2255)) -Results and models are available in the [model zoo](docs/model_zoo.md). +## Installation -Supported backbones: -- [x] ResNet -- [x] ResNeXt -- [x] HRNet +Please refer to [get_started.md](docs/en/get_started.md#installation) for installation and [dataset_prepare.md](docs/en/user_guides/2_dataset_prepare.md#prepare-datasets) for dataset preparation. -Supported methods: -- [x] [FCN](configs/fcn) -- [x] [PSPNet](configs/pspnet) -- [x] [DeepLabV3](configs/deeplabv3) -- [x] [PSANet](configs/psanet) -- [x] [DeepLabV3+](configs/deeplabv3plus) -- [x] [UPerNet](configs/upernet) -- [x] [NonLocal Net](configs/nonlocal_net) -- [x] [CCNet](configs/ccnet) -- [x] [DANet](configs/danet) -- [x] [GCNet](configs/gcnet) -- [x] [ANN](configs/ann) -- [x] [OCRNet](configs/ocrnet) +## Get Started -## Installation +Please see [Overview](docs/en/overview.md) for the general introduction of MMSegmentation. -Please refer to [INSTALL.md](docs/install.md) for installation and dataset preparation. +Please see [user guides](https://mmsegmentation.readthedocs.io/en/1.x/user_guides/index.html#) for the basic usage of MMSegmentation. +There are also [advanced tutorials](https://mmsegmentation.readthedocs.io/en/dev-1.x/advanced_guides/index.html) for in-depth understanding of mmseg design and implementation . -## Get Started +A Colab tutorial is also provided. You may preview the notebook [here](demo/MMSegmentation_Tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/1.x/demo/MMSegmentation_Tutorial.ipynb) on Colab. + +To migrate from MMSegmentation 1.x, please refer to [migration](docs/en/migration.md). -Please see [getting_started.md](docs/getting_started.md) for the basic usage of MMSegmentation. -There are also tutorials for [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), and [adding new modules](docs/tutorials/new_modules.md). +## Benchmark and model zoo + +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +Supported backbones: + +- [x] ResNet (CVPR'2016) +- [x] ResNeXt (CVPR'2017) +- [x] [HRNet (CVPR'2019)](configs/hrnet) +- [x] [ResNeSt (ArXiv'2020)](configs/resnest) +- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2) +- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3) +- [x] [Vision Transformer (ICLR'2021)](configs/vit) +- [x] [Swin Transformer (ICCV'2021)](configs/swin) +- [x] [Twins (NeurIPS'2021)](configs/twins) +- [x] [BEiT (ICLR'2022)](configs/beit) +- [x] [ConvNeXt (CVPR'2022)](configs/convnext) +- [x] [MAE (CVPR'2022)](configs/mae) +- [x] [PoolFormer (CVPR'2022)](configs/poolformer) + +Supported methods: + +- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn) +- [x] [ERFNet (T-ITS'2017)](configs/erfnet) +- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet) +- [x] [PSPNet (CVPR'2017)](configs/pspnet) +- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3) +- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1) +- [x] [PSANet (ECCV'2018)](configs/psanet) +- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus) +- [x] [UPerNet (ECCV'2018)](configs/upernet) +- [x] [ICNet (ECCV'2018)](configs/icnet) +- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net) +- [x] [EncNet (CVPR'2018)](configs/encnet) +- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn) +- [x] [DANet (CVPR'2019)](configs/danet) +- [x] [APCNet (CVPR'2019)](configs/apcnet) +- [x] [EMANet (ICCV'2019)](configs/emanet) +- [x] [CCNet (ICCV'2019)](configs/ccnet) +- [x] [DMNet (ICCV'2019)](configs/dmnet) +- [x] [ANN (ICCV'2019)](configs/ann) +- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet) +- [x] [FastFCN (ArXiv'2019)](configs/fastfcn) +- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn) +- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet) +- [x] [OCRNet (ECCV'2020)](configs/ocrnet) +- [x] [DNLNet (ECCV'2020)](configs/dnlnet) +- [x] [PointRend (CVPR'2020)](configs/point_rend) +- [x] [CGNet (TIP'2020)](configs/cgnet) +- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2) +- [x] [STDC (CVPR'2021)](configs/stdc) +- [x] [SETR (CVPR'2021)](configs/setr) +- [x] [DPT (ArXiv'2021)](configs/dpt) +- [x] [Segmenter (ICCV'2021)](configs/segmenter) +- [x] [SegFormer (NeurIPS'2021)](configs/segformer) +- [x] [K-Net (NeurIPS'2021)](configs/knet) +- [x] [MaskFormer (NeurIPS'2021)](configs/maskformer) +- [x] [Mask2Former (CVPR'2022)](configs/mask2former) + +Supported datasets: + +- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#cityscapes) +- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#pascal-voc) +- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#ade20k) +- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#pascal-context) +- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#coco-stuff-10k) +- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#coco-stuff-164k) +- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#chase-db1) +- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#drive) +- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#hrf) +- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#stare) +- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#dark-zurich) +- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#nighttime-driving) +- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#loveda) +- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isprs-potsdam) +- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isprs-vaihingen) +- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/user_guides/2_dataset_prepare.md#isaid) + +Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions. ## Contributing @@ -76,21 +173,44 @@ We wish that the toolbox and benchmark could serve the growing research community by providing a flexible as well as standardized toolkit to reimplement existing methods and develop their own new semantic segmentation methods. -Many thanks to Ruobing Han ([@drcut](https://github.com/drcut)), Xiaoming Ma([@aishangmaxiaoming](https://github.com/aishangmaxiaoming)), Shiguang Wang ([@sunnyxiaohu](https://github.com/sunnyxiaohu)) for deployment support. - ## Citation -If you use this toolbox or benchmark in your research, please cite this project. +If you find this project useful in your research, please consider cite: -``` +```bibtex @misc{mmseg2020, - author={Xu, Jiarui and Chen, Kai and Lin, Dahua}, - title={{MMSegmenation}}, - howpublished={\url{https://github.com/open-mmlab/mmsegmentation}}, - year={2020} + title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark}, + author={MMSegmentation Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}}, + year={2020} } ``` -## Contact +## License + +This project is released under the [Apache 2.0 license](LICENSE). -This repo is currently maintained by Jiarui Xu ([@xvjiarui](https://github.com/xvjiarui)), Kai Chen ([@hellock](http://github.com/hellock)). +## Projects in OpenMMLab + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. diff --git a/README_zh-CN.md b/README_zh-CN.md new file mode 100644 index 0000000000..72abc867a9 --- /dev/null +++ b/README_zh-CN.md @@ -0,0 +1,228 @@ +
+ +
 
+
+ OpenMMLab 官网 + + + HOT + + +      + OpenMMLab 开放平台 + + + TRY IT OUT + + +
+
 
+
+
+ +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mmsegmentation)](https://pypi.org/project/mmsegmentation/) +[![PyPI](https://img.shields.io/pypi/v/mmsegmentation)](https://pypi.org/project/mmsegmentation) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmsegmentation.readthedocs.io/zh_CN/1.x/) +[![badge](https://github.com/open-mmlab/mmsegmentation/workflows/build/badge.svg)](https://github.com/open-mmlab/mmsegmentation/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmsegmentation/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmsegmentation) +[![license](https://img.shields.io/github/license/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/blob/1.x/LICENSE) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) + +文档: https://mmsegmentation.readthedocs.io/zh_CN/latest + +[English](README.md) | 简体中文 + +## 简介 + +MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 OpenMMLab 项目的一部分。 + +1.x 分支代码目前支持 PyTorch 1.6 以上的版本。 + +![示例图片](resources/seg_demo.gif) + +### 主要特性 + +- **统一的基准平台** + + 我们将各种各样的语义分割算法集成到了一个统一的工具箱,进行基准测试。 + +- **模块化设计** + + MMSegmentation 将分割框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的分割模型。 + +- **丰富的即插即用的算法和模型** + + MMSegmentation 支持了众多主流的和最新的检测算法,例如 PSPNet,DeepLabV3,PSANet,DeepLabV3+ 等. + +- **速度快** + + 训练速度比其他语义分割代码库更快或者相当。 + +## 更新日志 + +最新版本 v1.0.0rc2 在 2022.12.6 发布。 +如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/notes/changelog.md)。 + +## 安装 + +请参考[快速入门文档](docs/zh_cn/get_started.md#installation)进行安装,参考[数据集准备](docs/zh_cn/user_guides/2_dataset_prepare.md)处理数据。 + +## 快速入门 + +请参考[概述](docs/zh_cn/overview.md)对 MMSegmetation 进行初步了解 + +请参考[用户指南](https://mmsegmentation.readthedocs.io/zh_CN/1.x/user_guides/index.html)了解 mmseg 的基本使用,以及[进阶指南](https://mmsegmentation.readthedocs.io/zh_CN/1.x/advanced_guides/index.html)深入了解 mmseg 设计和代码实现。 + +同时,我们提供了 Colab 教程。你可以在[这里](demo/MMSegmentation_Tutorial.ipynb)浏览教程,或者直接在 Colab 上[运行](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/1.x/demo/MMSegmentation_Tutorial.ipynb)。 + +若需要将0.x版本的代码迁移至新版,请参考[迁移文档](docs/zh_cn/migration.md)。 + +## 基准测试和模型库 + +测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。 + +已支持的骨干网络: + +- [x] ResNet (CVPR'2016) +- [x] ResNeXt (CVPR'2017) +- [x] [HRNet (CVPR'2019)](configs/hrnet) +- [x] [ResNeSt (ArXiv'2020)](configs/resnest) +- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2) +- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3) +- [x] [Vision Transformer (ICLR'2021)](configs/vit) +- [x] [Swin Transformer (ICCV'2021)](configs/swin) +- [x] [Twins (NeurIPS'2021)](configs/twins) +- [x] [BEiT (ICLR'2022)](configs/beit) +- [x] [ConvNeXt (CVPR'2022)](configs/convnext) +- [x] [MAE (CVPR'2022)](configs/mae) +- [x] [PoolFormer (CVPR'2022)](configs/poolformer) + +已支持的算法: + +- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn) +- [x] [ERFNet (T-ITS'2017)](configs/erfnet) +- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet) +- [x] [PSPNet (CVPR'2017)](configs/pspnet) +- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3) +- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1) +- [x] [PSANet (ECCV'2018)](configs/psanet) +- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus) +- [x] [UPerNet (ECCV'2018)](configs/upernet) +- [x] [ICNet (ECCV'2018)](configs/icnet) +- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net) +- [x] [EncNet (CVPR'2018)](configs/encnet) +- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn) +- [x] [DANet (CVPR'2019)](configs/danet) +- [x] [APCNet (CVPR'2019)](configs/apcnet) +- [x] [EMANet (ICCV'2019)](configs/emanet) +- [x] [CCNet (ICCV'2019)](configs/ccnet) +- [x] [DMNet (ICCV'2019)](configs/dmnet) +- [x] [ANN (ICCV'2019)](configs/ann) +- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet) +- [x] [FastFCN (ArXiv'2019)](configs/fastfcn) +- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn) +- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet) +- [x] [OCRNet (ECCV'2020)](configs/ocrnet) +- [x] [DNLNet (ECCV'2020)](configs/dnlnet) +- [x] [PointRend (CVPR'2020)](configs/point_rend) +- [x] [CGNet (TIP'2020)](configs/cgnet) +- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2) +- [x] [STDC (CVPR'2021)](configs/stdc) +- [x] [SETR (CVPR'2021)](configs/setr) +- [x] [DPT (ArXiv'2021)](configs/dpt) +- [x] [Segmenter (ICCV'2021)](configs/segmenter) +- [x] [SegFormer (NeurIPS'2021)](configs/segformer) +- [x] [K-Net (NeurIPS'2021)](configs/knet) +- [x] [MaskFormer (NeurIPS'2021)](configs/maskformer) +- [x] [Mask2Former (CVPR'2022)](configs/mask2former) + +已支持的数据集: + +- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#cityscapes) +- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#pascal-voc) +- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#ade20k) +- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#pascal-context) +- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#coco-stuff-10k) +- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#coco-stuff-164k) +- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#chase-db1) +- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#drive) +- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#hrf) +- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#stare) +- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#dark-zurich) +- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#nighttime-driving) +- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#loveda) +- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isprs-potsdam) +- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isprs-vaihingen) +- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/zh_cn/dataset_prepare.md#isaid) + +如果遇到问题,请参考 [常见问题解答](docs/zh_cn/notes/faq.md)。 + +## 贡献指南 + +我们感谢所有的贡献者为改进和提升 MMSegmentation 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。 + +## 致谢 + +MMSegmentation 是一个由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。 + +## 引用 + +如果你觉得本项目对你的研究工作有所帮助,请参考如下 bibtex 引用 MMSegmentation。 + +```bibtex +@misc{mmseg2020, + title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark}, + author={MMSegmentation Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}}, + year={2020} +} +``` + +## 开源许可证 + +该项目采用 [Apache 2.0 开源许可证](LICENSE)。 + +## OpenMMLab 的其他项目 + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练库 +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 +- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口 +- [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库 +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱 +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱与测试基准 +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准 +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱 +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 + +## 欢迎加入 OpenMMLab 社区 + +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 [OpenMMLab 团队](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) 以及 [MMSegmentation](https://jq.qq.com/?_wv=1027&k=ukevz6Ie) 的 QQ 群。 + +
+ +
+ +我们会在 OpenMMLab 社区为大家 + +- 📢 分享 AI 框架的前沿核心技术 +- 💻 解读 PyTorch 常用模块源码 +- 📰 发布 OpenMMLab 的相关新闻 +- 🚀 介绍 OpenMMLab 开发的前沿算法 +- 🏃 获取更高效的问题答疑和意见反馈 +- 🔥 提供与各行各业开发者充分交流的平台 + +干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 diff --git a/configs/_base_/datasets/ade20k.py b/configs/_base_/datasets/ade20k.py index a1d9baba7c..4303b094c5 100644 --- a/configs/_base_/datasets/ade20k.py +++ b/configs/_base_/datasets/ade20k.py @@ -1,54 +1,52 @@ # dataset settings dataset_type = 'ADE20KDataset' data_root = 'data/ade/ADEChallengeData2016' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), - dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), + dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), + dict(type='PackSegInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 512), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') ] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/training', - ann_dir='annotations/training', - pipeline=train_pipeline), - val=dict( +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - pipeline=test_pipeline), - test=dict( + data_prefix=dict( + img_path='images/training', seg_map_path='annotations/training'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/ade20k_640x640.py b/configs/_base_/datasets/ade20k_640x640.py new file mode 100644 index 0000000000..8478585915 --- /dev/null +++ b/configs/_base_/datasets/ade20k_640x640.py @@ -0,0 +1,52 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2560, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2560, 640), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', seg_map_path='annotations/training'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/chase_db1.py b/configs/_base_/datasets/chase_db1.py new file mode 100644 index 0000000000..8cd4f3c284 --- /dev/null +++ b/configs/_base_/datasets/chase_db1.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ChaseDB1Dataset' +data_root = 'data/CHASE_DB1' +img_scale = (960, 999) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + pipeline=train_pipeline))) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cityscapes.py b/configs/_base_/datasets/cityscapes.py index 21cf5c3958..c2fdee473b 100644 --- a/configs/_base_/datasets/cityscapes.py +++ b/configs/_base_/datasets/cityscapes.py @@ -1,54 +1,51 @@ # dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 1024) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), + dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), + dict(type='PackSegInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') ] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='leftImg8bit/train', - ann_dir='gtFine/train', - pipeline=train_pipeline), - val=dict( +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', - pipeline=test_pipeline), - test=dict( + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cityscapes_1024x1024.py b/configs/_base_/datasets/cityscapes_1024x1024.py new file mode 100644 index 0000000000..72be307b40 --- /dev/null +++ b/configs/_base_/datasets/cityscapes_1024x1024.py @@ -0,0 +1,29 @@ +_base_ = './cityscapes.py' +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cityscapes_768x768.py b/configs/_base_/datasets/cityscapes_768x768.py new file mode 100644 index 0000000000..fcee0143ac --- /dev/null +++ b/configs/_base_/datasets/cityscapes_768x768.py @@ -0,0 +1,29 @@ +_base_ = './cityscapes.py' +crop_size = (768, 768) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2049, 1025), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2049, 1025), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cityscapes_769x769.py b/configs/_base_/datasets/cityscapes_769x769.py index a5bcff3710..ae40ac8c5f 100644 --- a/configs/_base_/datasets/cityscapes_769x769.py +++ b/configs/_base_/datasets/cityscapes_769x769.py @@ -1,35 +1,29 @@ _base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (769, 769) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), + dict( + type='RandomResize', + scale=(2049, 1025), + ratio_range=(0.5, 2.0), + keep_ratio=True), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), + dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), + dict(type='PackSegInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type='Resize', scale=(2049, 1025), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') ] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cityscapes_832x832.py b/configs/_base_/datasets/cityscapes_832x832.py new file mode 100644 index 0000000000..0254580357 --- /dev/null +++ b/configs/_base_/datasets/cityscapes_832x832.py @@ -0,0 +1,29 @@ +_base_ = './cityscapes.py' +crop_size = (832, 832) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco-stuff10k.py b/configs/_base_/datasets/coco-stuff10k.py new file mode 100644 index 0000000000..b00db24691 --- /dev/null +++ b/configs/_base_/datasets/coco-stuff10k.py @@ -0,0 +1,53 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff10k' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + data_prefix=dict( + img_path='images/train2014', seg_map_path='annotations/train2014'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + data_prefix=dict( + img_path='images/test2014', seg_map_path='annotations/test2014'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco-stuff164k.py b/configs/_base_/datasets/coco-stuff164k.py new file mode 100644 index 0000000000..e879bdb2aa --- /dev/null +++ b/configs/_base_/datasets/coco-stuff164k.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff164k' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/train2017', seg_map_path='annotations/val2017'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/val2017', seg_map_path='annotations/val2017'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/drive.py b/configs/_base_/datasets/drive.py new file mode 100644 index 0000000000..248dc8b102 --- /dev/null +++ b/configs/_base_/datasets/drive.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'DRIVEDataset' +data_root = 'data/DRIVE' +img_scale = (584, 565) +crop_size = (64, 64) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/hrf.py b/configs/_base_/datasets/hrf.py new file mode 100644 index 0000000000..11b66e7d52 --- /dev/null +++ b/configs/_base_/datasets/hrf.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'HRFDataset' +data_root = 'data/HRF' +img_scale = (2336, 3504) +crop_size = (256, 256) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/isaid.py b/configs/_base_/datasets/isaid.py new file mode 100644 index 0000000000..8dafae8fd4 --- /dev/null +++ b/configs/_base_/datasets/isaid.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'iSAIDDataset' +data_root = 'data/iSAID' +""" +This crop_size setting is followed by the implementation of +`PointFlow: Flowing Semantics Through Points for Aerial Image +Segmentation `_. +""" + +crop_size = (896, 896) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(896, 896), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(896, 896), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/loveda.py b/configs/_base_/datasets/loveda.py new file mode 100644 index 0000000000..fcdb05865e --- /dev/null +++ b/configs/_base_/datasets/loveda.py @@ -0,0 +1,50 @@ +# dataset settings +dataset_type = 'LoveDADataset' +data_root = 'data/loveDA' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1024, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/pascal_context.py b/configs/_base_/datasets/pascal_context.py new file mode 100644 index 0000000000..dfb1f858b3 --- /dev/null +++ b/configs/_base_/datasets/pascal_context.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'PascalContextDataset' +data_root = 'data/VOCdevkit/VOC2010/' + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/pascal_context_59.py b/configs/_base_/datasets/pascal_context_59.py new file mode 100644 index 0000000000..9103fe7e3f --- /dev/null +++ b/configs/_base_/datasets/pascal_context_59.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'PascalContextDataset59' +data_root = 'data/VOCdevkit/VOC2010/' + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/pascal_voc12.py b/configs/_base_/datasets/pascal_voc12.py index 6a367c7f1d..aeb38d0613 100644 --- a/configs/_base_/datasets/pascal_voc12.py +++ b/configs/_base_/datasets/pascal_voc12.py @@ -1,57 +1,53 @@ # dataset settings dataset_type = 'PascalVOCDataset' data_root = 'data/VOCdevkit/VOC2012' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), + dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), + dict(type='PackSegInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 512), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') ] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/train.txt', - pipeline=train_pipeline), - val=dict( +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline), - test=dict( + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClass'), + ann_file='ImageSets/Segmentation/train.txt', + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( type=dataset_type, data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClass'), + ann_file='ImageSets/Segmentation/val.txt', pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/pascal_voc12_aug.py b/configs/_base_/datasets/pascal_voc12_aug.py index 3f23b6717d..cd0d3e8682 100644 --- a/configs/_base_/datasets/pascal_voc12_aug.py +++ b/configs/_base_/datasets/pascal_voc12_aug.py @@ -1,9 +1,66 @@ -_base_ = './pascal_voc12.py' # dataset settings -data = dict( - train=dict( - ann_dir=['SegmentationClass', 'SegmentationClassAug'], - split=[ - 'ImageSets/Segmentation/train.txt', - 'ImageSets/Segmentation/aug.txt' - ])) +dataset_type = 'PascalVOCDataset' +data_root = 'data/VOCdevkit/VOC2012' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Pad', size=crop_size), + dict(type='PackSegInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] + +dataset_train = dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='JPEGImages', seg_map_path='SegmentationClass'), + ann_file='ImageSets/Segmentation/train.txt', + pipeline=train_pipeline) + +dataset_aug = dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClassAug'), + ann_file='ImageSets/Segmentation/aug.txt', + pipeline=train_pipeline) + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict(type='ConcatDataset', datasets=[dataset_train, dataset_aug])) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='JPEGImages', seg_map_path='SegmentationClass'), + ann_file='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/potsdam.py b/configs/_base_/datasets/potsdam.py new file mode 100644 index 0000000000..ef9761c76e --- /dev/null +++ b/configs/_base_/datasets/potsdam.py @@ -0,0 +1,50 @@ +# dataset settings +dataset_type = 'PotsdamDataset' +data_root = 'data/potsdam' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(512, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(512, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/stare.py b/configs/_base_/datasets/stare.py new file mode 100644 index 0000000000..7fccd71a54 --- /dev/null +++ b/configs/_base_/datasets/stare.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'STAREDataset' +data_root = 'data/STARE' +img_scale = (605, 700) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/synapse.py b/configs/_base_/datasets/synapse.py new file mode 100644 index 0000000000..86852918cd --- /dev/null +++ b/configs/_base_/datasets/synapse.py @@ -0,0 +1,41 @@ +dataset_type = 'SynapseDataset' +data_root = 'data/synapse/' +img_scale = (224, 224) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='RandomRotFlip', rotate_prob=0.5, flip_prob=0.5, degree=20), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=6, + num_workers=2, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/vaihingen.py b/configs/_base_/datasets/vaihingen.py new file mode 100644 index 0000000000..2b52135567 --- /dev/null +++ b/configs/_base_/datasets/vaihingen.py @@ -0,0 +1,50 @@ +# dataset settings +dataset_type = 'ISPRSDataset' +data_root = 'data/vaihingen' +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(512, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(512, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py index b564cc4e7e..e9fa6e1035 100644 --- a/configs/_base_/default_runtime.py +++ b/configs/_base_/default_runtime.py @@ -1,14 +1,13 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') +default_scope = 'mmseg' +env_cfg = dict( + cudnn_benchmark=True, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(by_epoch=False) log_level = 'INFO' load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True +resume = False diff --git a/configs/_base_/models/ann_r50-d8.py b/configs/_base_/models/ann_r50-d8.py index c2287b4790..a1ef956948 100644 --- a/configs/_base_/models/ann_r50-d8.py +++ b/configs/_base_/models/ann_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -22,7 +30,7 @@ project_channels=256, query_scales=(1, ), key_pool_scales=(1, 3, 6, 8), - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -35,12 +43,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/apcnet_r50-d8.py b/configs/_base_/models/apcnet_r50-d8.py new file mode 100644 index 0000000000..63269f9987 --- /dev/null +++ b/configs/_base_/models/apcnet_r50-d8.py @@ -0,0 +1,52 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='APCHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/bisenetv1_r18-d32.py b/configs/_base_/models/bisenetv1_r18-d32.py new file mode 100644 index 0000000000..2aecb9e2ef --- /dev/null +++ b/configs/_base_/models/bisenetv1_r18-d32.py @@ -0,0 +1,76 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='BiSeNetV1', + in_channels=3, + context_channels=(128, 256, 512), + spatial_channels=(64, 64, 64, 128), + out_indices=(0, 1, 2), + out_channels=256, + backbone_cfg=dict( + type='ResNet', + in_channels=3, + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + norm_cfg=norm_cfg, + align_corners=False, + init_cfg=None), + decode_head=dict( + type='FCNHead', + in_channels=256, + in_index=0, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/bisenetv2.py b/configs/_base_/models/bisenetv2.py new file mode 100644 index 0000000000..ae845129db --- /dev/null +++ b/configs/_base_/models/bisenetv2.py @@ -0,0 +1,88 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='BiSeNetV2', + detail_channels=(64, 64, 128), + semantic_channels=(16, 32, 64, 128), + semantic_expansion_ratio=6, + bga_channels=128, + out_indices=(0, 1, 2, 3, 4), + init_cfg=None, + align_corners=False), + decode_head=dict( + type='FCNHead', + in_channels=128, + in_index=0, + channels=1024, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=16, + channels=16, + num_convs=2, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=32, + channels=64, + num_convs=2, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=64, + channels=256, + num_convs=2, + num_classes=19, + in_index=3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=1024, + num_convs=2, + num_classes=19, + in_index=4, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/ccnet_r50-d8.py b/configs/_base_/models/ccnet_r50-d8.py index 9f2794c33c..575d8eb459 100644 --- a/configs/_base_/models/ccnet_r50-d8.py +++ b/configs/_base_/models/ccnet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -20,7 +28,7 @@ in_index=3, channels=512, recurrence=2, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -33,12 +41,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/cgnet.py b/configs/_base_/models/cgnet.py new file mode 100644 index 0000000000..93c6f5b6d1 --- /dev/null +++ b/configs/_base_/models/cgnet.py @@ -0,0 +1,43 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[72.39239876, 82.90891754, 73.15835921], + std=[1, 1, 1], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='CGNet', + norm_cfg=norm_cfg, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16)), + decode_head=dict( + type='FCNHead', + in_channels=256, + in_index=2, + channels=256, + num_convs=0, + concat_input=False, + dropout_ratio=0, + num_classes=19, + norm_cfg=norm_cfg, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + class_weight=[ + 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352, + 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905, + 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587, + 10.396974, 10.055647 + ])), + # model training and testing settings + train_cfg=dict(sampler=None), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/danet_r50-d8.py b/configs/_base_/models/danet_r50-d8.py index 76a27054ed..8163b3d691 100644 --- a/configs/_base_/models/danet_r50-d8.py +++ b/configs/_base_/models/danet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -20,7 +28,7 @@ in_index=3, channels=512, pam_channels=64, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -33,12 +41,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/deeplabv3_r50-d8.py b/configs/_base_/models/deeplabv3_r50-d8.py index 00c1f8796d..22efe9a6ca 100644 --- a/configs/_base_/models/deeplabv3_r50-d8.py +++ b/configs/_base_/models/deeplabv3_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -20,7 +28,7 @@ in_index=3, channels=512, dilations=(1, 12, 24, 36), - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -33,12 +41,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/deeplabv3_unet_s5-d16.py b/configs/_base_/models/deeplabv3_unet_s5-d16.py new file mode 100644 index 0000000000..92df52c35d --- /dev/null +++ b/configs/_base_/models/deeplabv3_unet_s5-d16.py @@ -0,0 +1,58 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='ASPPHead', + in_channels=64, + in_index=4, + channels=16, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/configs/_base_/models/deeplabv3plus_r50-d8.py b/configs/_base_/models/deeplabv3plus_r50-d8.py index f930b154f5..74dbed5593 100644 --- a/configs/_base_/models/deeplabv3plus_r50-d8.py +++ b/configs/_base_/models/deeplabv3plus_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -22,7 +30,7 @@ dilations=(1, 12, 24, 36), c1_in_channels=256, c1_channels=48, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -35,12 +43,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/dmnet_r50-d8.py b/configs/_base_/models/dmnet_r50-d8.py new file mode 100644 index 0000000000..f66a042f1d --- /dev/null +++ b/configs/_base_/models/dmnet_r50-d8.py @@ -0,0 +1,52 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DMHead', + in_channels=2048, + in_index=3, + channels=512, + filter_sizes=(1, 3, 5, 7), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/dnl_r50-d8.py b/configs/_base_/models/dnl_r50-d8.py new file mode 100644 index 0000000000..ee64056c0e --- /dev/null +++ b/configs/_base_/models/dnl_r50-d8.py @@ -0,0 +1,54 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DNLHead', + in_channels=2048, + in_index=3, + channels=512, + dropout_ratio=0.1, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/dpt_vit-b16.py b/configs/_base_/models/dpt_vit-b16.py new file mode 100644 index 0000000000..90845b37b5 --- /dev/null +++ b/configs/_base_/models/dpt_vit-b16.py @@ -0,0 +1,39 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='pretrain/vit-b16_p16_224-80ecf9dd.pth', # noqa + backbone=dict( + type='VisionTransformer', + img_size=224, + embed_dims=768, + num_layers=12, + num_heads=12, + out_indices=(2, 5, 8, 11), + final_norm=False, + with_cls_token=True, + output_cls_token=True), + decode_head=dict( + type='DPTHead', + in_channels=(768, 768, 768, 768), + channels=256, + embed_dims=768, + post_process_channels=[96, 192, 384, 768], + num_classes=150, + readout_type='project', + input_transform='multiple_select', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=None, + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable diff --git a/configs/_base_/models/emanet_r50-d8.py b/configs/_base_/models/emanet_r50-d8.py new file mode 100644 index 0000000000..c55af4f11d --- /dev/null +++ b/configs/_base_/models/emanet_r50-d8.py @@ -0,0 +1,55 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='EMAHead', + in_channels=2048, + in_index=3, + channels=256, + ema_channels=512, + num_bases=64, + num_stages=3, + momentum=0.1, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/encnet_r50-d8.py b/configs/_base_/models/encnet_r50-d8.py index 46fffa1f8c..63cec9e3cb 100644 --- a/configs/_base_/models/encnet_r50-d8.py +++ b/configs/_base_/models/encnet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -22,7 +30,7 @@ num_codes=32, use_se_loss=True, add_lateral=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -37,12 +45,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/erfnet_fcn.py b/configs/_base_/models/erfnet_fcn.py new file mode 100644 index 0000000000..4d68a72296 --- /dev/null +++ b/configs/_base_/models/erfnet_fcn.py @@ -0,0 +1,40 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='ERFNet', + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + init_cfg=None), + decode_head=dict( + type='FCNHead', + in_channels=16, + channels=128, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fast_scnn.py b/configs/_base_/models/fast_scnn.py new file mode 100644 index 0000000000..11127b0115 --- /dev/null +++ b/configs/_base_/models/fast_scnn.py @@ -0,0 +1,65 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='FastSCNN', + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + norm_cfg=norm_cfg, + align_corners=False), + decode_head=dict( + type='DepthwiseSeparableFCNHead', + in_channels=128, + channels=128, + concat_input=False, + num_classes=19, + in_index=-1, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=32, + num_convs=1, + num_classes=19, + in_index=-2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=64, + channels=32, + num_convs=1, + num_classes=19, + in_index=-3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py b/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py new file mode 100644 index 0000000000..a200b4bac6 --- /dev/null +++ b/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py @@ -0,0 +1,61 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + dilations=(1, 1, 2, 4), + strides=(1, 2, 2, 2), + out_indices=(1, 2, 3), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='JPU', + in_channels=(512, 1024, 2048), + mid_channels=512, + start_level=0, + end_level=-1, + dilations=(1, 2, 4, 8), + align_corners=False, + norm_cfg=norm_cfg), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=2, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=1, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fcn_hr18.py b/configs/_base_/models/fcn_hr18.py index e2583a2ac8..01a447aabe 100644 --- a/configs/_base_/models/fcn_hr18.py +++ b/configs/_base_/models/fcn_hr18.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://msra/hrnetv2_w18', backbone=dict( type='HRNet', @@ -41,12 +49,12 @@ kernel_size=1, num_convs=1, concat_input=False, - drop_out_ratio=-1, + dropout_ratio=-1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fcn_r50-d8.py b/configs/_base_/models/fcn_r50-d8.py index 08546755c9..9a76a6c3fb 100644 --- a/configs/_base_/models/fcn_r50-d8.py +++ b/configs/_base_/models/fcn_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -21,7 +29,7 @@ channels=512, num_convs=2, concat_input=True, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -34,12 +42,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fcn_unet_s5-d16.py b/configs/_base_/models/fcn_unet_s5-d16.py new file mode 100644 index 0000000000..9f880d21e2 --- /dev/null +++ b/configs/_base_/models/fcn_unet_s5-d16.py @@ -0,0 +1,59 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='FCNHead', + in_channels=64, + in_index=4, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/configs/_base_/models/fpn_poolformer_s12.py b/configs/_base_/models/fpn_poolformer_s12.py new file mode 100644 index 0000000000..483d823308 --- /dev/null +++ b/configs/_base_/models/fpn_poolformer_s12.py @@ -0,0 +1,50 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth' # noqa +custom_imports = dict(imports='mmcls.models', allow_failed_imports=False) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.PoolFormer', + arch='s12', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.'), + in_patch_size=7, + in_stride=4, + in_pad=2, + down_patch_size=3, + down_stride=2, + down_pad=1, + drop_rate=0., + drop_path_rate=0., + out_indices=(0, 2, 4, 6), + frozen_stages=0, + ), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/fpn_r50.py b/configs/_base_/models/fpn_r50.py new file mode 100644 index 0000000000..3baa0970fb --- /dev/null +++ b/configs/_base_/models/fpn_r50.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/gcnet_r50-d8.py b/configs/_base_/models/gcnet_r50-d8.py index 9057687c06..8238d4b588 100644 --- a/configs/_base_/models/gcnet_r50-d8.py +++ b/configs/_base_/models/gcnet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -22,7 +30,7 @@ ratio=1 / 4., pooling_type='att', fusion_types=('channel_add', ), - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -35,12 +43,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/icnet_r50-d8.py b/configs/_base_/models/icnet_r50-d8.py new file mode 100644 index 0000000000..4377053bda --- /dev/null +++ b/configs/_base_/models/icnet_r50-d8.py @@ -0,0 +1,82 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ICNet', + backbone_cfg=dict( + type='ResNetV1c', + in_channels=3, + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + in_channels=3, + layer_channels=(512, 2048), + light_branch_middle_channels=32, + psp_out_channels=512, + out_channels=(64, 256, 256), + norm_cfg=norm_cfg, + align_corners=False, + ), + neck=dict( + type='ICNeck', + in_channels=(64, 256, 256), + out_channels=128, + norm_cfg=norm_cfg, + align_corners=False), + decode_head=dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + in_index=2, + dropout_ratio=0, + num_classes=19, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + num_classes=19, + in_index=0, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/isanet_r50-d8.py b/configs/_base_/models/isanet_r50-d8.py new file mode 100644 index 0000000000..e028ba85b4 --- /dev/null +++ b/configs/_base_/models/isanet_r50-d8.py @@ -0,0 +1,53 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ISAHead', + in_channels=2048, + in_index=3, + channels=512, + isa_channels=256, + down_factor=(8, 8), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/lraspp_m-v3-d8.py b/configs/_base_/models/lraspp_m-v3-d8.py new file mode 100644 index 0000000000..acf70e7107 --- /dev/null +++ b/configs/_base_/models/lraspp_m-v3-d8.py @@ -0,0 +1,33 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='MobileNetV3', + arch='large', + out_indices=(1, 3, 16), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 24, 960), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/nonlocal_r50-d8.py b/configs/_base_/models/nonlocal_r50-d8.py index 7fa88f9a59..7d73a84860 100644 --- a/configs/_base_/models/nonlocal_r50-d8.py +++ b/configs/_base_/models/nonlocal_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -19,7 +27,7 @@ in_channels=2048, in_index=3, channels=512, - drop_out_ratio=0.1, + dropout_ratio=0.1, reduction=2, use_scale=True, mode='embedded_gaussian', @@ -35,12 +43,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/ocrnet_hr18.py b/configs/_base_/models/ocrnet_hr18.py index 4053daa0b0..6c7fcfe3d6 100644 --- a/configs/_base_/models/ocrnet_hr18.py +++ b/configs/_base_/models/ocrnet_hr18.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='CascadeEncoderDecoder', + data_preprocessor=data_preprocessor, num_stages=2, pretrained='open-mmlab://msra/hrnetv2_w18', backbone=dict( @@ -43,7 +51,7 @@ kernel_size=1, num_convs=1, concat_input=False, - drop_out_ratio=-1, + dropout_ratio=-1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -56,13 +64,13 @@ input_transform='resize_concat', channels=512, ocr_channels=256, - drop_out_ratio=-1, + dropout_ratio=-1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - ]) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/ocrnet_r50-d8.py b/configs/_base_/models/ocrnet_r50-d8.py new file mode 100644 index 0000000000..0a2588f983 --- /dev/null +++ b/configs/_base_/models/ocrnet_r50-d8.py @@ -0,0 +1,55 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='CascadeEncoderDecoder', + data_preprocessor=data_preprocessor, + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=[ + dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=2048, + in_index=3, + channels=512, + ocr_channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/pointrend_r50.py b/configs/_base_/models/pointrend_r50.py new file mode 100644 index 0000000000..8a27e856f5 --- /dev/null +++ b/configs/_base_/models/pointrend_r50.py @@ -0,0 +1,64 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='CascadeEncoderDecoder', + data_preprocessor=data_preprocessor, + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=[ + dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='PointHead', + in_channels=[256], + in_index=[0], + channels=256, + num_fcs=3, + coarse_pred_each_layer=True, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict( + num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), + test_cfg=dict( + mode='whole', + subdivision_steps=2, + subdivision_num_points=8196, + scale_factor=2)) diff --git a/configs/_base_/models/psanet_r50-d8.py b/configs/_base_/models/psanet_r50-d8.py index 170b48f457..40fd5a9137 100644 --- a/configs/_base_/models/psanet_r50-d8.py +++ b/configs/_base_/models/psanet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -25,7 +33,7 @@ shrink_factor=2, normalization_factor=1.0, psa_softmax=True, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -38,12 +46,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/pspnet_r50-d8.py b/configs/_base_/models/pspnet_r50-d8.py index c5bb885c58..c257b8ba27 100644 --- a/configs/_base_/models/pspnet_r50-d8.py +++ b/configs/_base_/models/pspnet_r50-d8.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -20,7 +28,7 @@ in_index=3, channels=512, pool_scales=(1, 2, 3, 6), - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -33,12 +41,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/pspnet_unet_s5-d16.py b/configs/_base_/models/pspnet_unet_s5-d16.py new file mode 100644 index 0000000000..834a22ad00 --- /dev/null +++ b/configs/_base_/models/pspnet_unet_s5-d16.py @@ -0,0 +1,58 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='PSPHead', + in_channels=64, + in_index=4, + channels=16, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/configs/_base_/models/segformer_mit-b0.py b/configs/_base_/models/segformer_mit-b0.py new file mode 100644 index 0000000000..46841adc07 --- /dev/null +++ b/configs/_base_/models/segformer_mit-b0.py @@ -0,0 +1,42 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='MixVisionTransformer', + in_channels=3, + embed_dims=32, + num_stages=4, + num_layers=[2, 2, 2, 2], + num_heads=[1, 2, 5, 8], + patch_sizes=[7, 3, 3, 3], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratio=4, + qkv_bias=True, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.1), + decode_head=dict( + type='SegformerHead', + in_channels=[32, 64, 160, 256], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/segmenter_vit-b16_mask.py b/configs/_base_/models/segmenter_vit-b16_mask.py new file mode 100644 index 0000000000..8f3dad1536 --- /dev/null +++ b/configs/_base_/models/segmenter_vit-b16_mask.py @@ -0,0 +1,44 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_base_p16_384_20220308-96dfe169.pth' # noqa +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=checkpoint, + backbone=dict( + type='VisionTransformer', + img_size=(512, 512), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + drop_path_rate=0.1, + attn_drop_rate=0.0, + drop_rate=0.0, + final_norm=True, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bicubic', + ), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=768, + channels=768, + num_classes=150, + num_layers=2, + num_heads=12, + embed_dims=768, + dropout_ratio=0.0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ), + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(480, 480)), +) diff --git a/configs/_base_/models/setr_mla.py b/configs/_base_/models/setr_mla.py new file mode 100644 index 0000000000..dedf169cac --- /dev/null +++ b/configs/_base_/models/setr_mla.py @@ -0,0 +1,103 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(5, 11, 17, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=False, + interpolate_mode='bilinear', + ), + neck=dict( + type='MLANeck', + in_channels=[1024, 1024, 1024, 1024], + out_channels=256, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + ), + decode_head=dict( + type='SETRMLAHead', + in_channels=(256, 256, 256, 256), + channels=512, + in_index=(0, 1, 2, 3), + dropout_ratio=0, + mla_channels=128, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=0, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=1, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=2, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=3, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/setr_naive.py b/configs/_base_/models/setr_naive.py new file mode 100644 index 0000000000..ccf5b3398b --- /dev/null +++ b/configs/_base_/models/setr_naive.py @@ -0,0 +1,88 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(9, 14, 19, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bilinear', + ), + decode_head=dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=3, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/setr_pup.py b/configs/_base_/models/setr_pup.py new file mode 100644 index 0000000000..df1bc1890d --- /dev/null +++ b/configs/_base_/models/setr_pup.py @@ -0,0 +1,88 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(9, 14, 19, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bilinear', + ), + decode_head=dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=3, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=4, + up_scale=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/stdc.py b/configs/_base_/models/stdc.py new file mode 100644 index 0000000000..01bf2b925e --- /dev/null +++ b/configs/_base_/models/stdc.py @@ -0,0 +1,91 @@ +norm_cfg = dict(type='BN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='STDCContextPathNet', + backbone_cfg=dict( + type='STDCNet', + stdc_type='STDCNet1', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='cat', + num_convs=4, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + with_final_conv=False), + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4)), + decode_head=dict( + type='FCNHead', + in_channels=256, + channels=256, + num_convs=1, + num_classes=19, + in_index=3, + concat_input=False, + dropout_ratio=0.1, + norm_cfg=norm_cfg, + align_corners=True, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='STDCHead', + in_channels=256, + channels=64, + num_convs=1, + num_classes=2, + boundary_threshold=0.1, + in_index=0, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=True, + loss_decode=[ + dict( + type='CrossEntropyLoss', + loss_name='loss_ce', + use_sigmoid=True, + loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=1.0) + ]), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/twins_pcpvt-s_fpn.py b/configs/_base_/models/twins_pcpvt-s_fpn.py new file mode 100644 index 0000000000..059210b5e1 --- /dev/null +++ b/configs/_base_/models/twins_pcpvt-s_fpn.py @@ -0,0 +1,53 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa + +# model settings +backbone_norm_cfg = dict(type='LN') +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='PCPVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + in_channels=3, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + mlp_ratios=[8, 8, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=True, + norm_cfg=backbone_norm_cfg, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.2), + neck=dict( + type='FPN', + in_channels=[64, 128, 320, 512], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/twins_pcpvt-s_upernet.py b/configs/_base_/models/twins_pcpvt-s_upernet.py new file mode 100644 index 0000000000..585a76f858 --- /dev/null +++ b/configs/_base_/models/twins_pcpvt-s_upernet.py @@ -0,0 +1,61 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa + +# model settings +backbone_norm_cfg = dict(type='LN') +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='PCPVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + in_channels=3, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + mlp_ratios=[8, 8, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=True, + norm_cfg=backbone_norm_cfg, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.2), + decode_head=dict( + type='UPerHead', + in_channels=[64, 128, 320, 512], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=320, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_beit.py b/configs/_base_/models/upernet_beit.py new file mode 100644 index 0000000000..691e288dbf --- /dev/null +++ b/configs/_base_/models/upernet_beit.py @@ -0,0 +1,58 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='BEiT', + img_size=(640, 640), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=(3, 5, 7, 11), + qv_bias=True, + attn_drop_rate=0.0, + drop_path_rate=0.1, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + norm_eval=False, + init_values=0.1), + neck=dict(type='Feature2Pyramid', embed_dim=768, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + type='UPerHead', + in_channels=[768, 768, 768, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=768, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=768, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_convnext.py b/configs/_base_/models/upernet_convnext.py new file mode 100644 index 0000000000..7595295871 --- /dev/null +++ b/configs/_base_/models/upernet_convnext.py @@ -0,0 +1,52 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +custom_imports = dict(imports='mmcls.models', allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth' # noqa +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='mmcls.ConvNeXt', + arch='base', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + type='UPerHead', + in_channels=[128, 256, 512, 1024], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=384, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_mae.py b/configs/_base_/models/upernet_mae.py new file mode 100644 index 0000000000..b833b67645 --- /dev/null +++ b/configs/_base_/models/upernet_mae.py @@ -0,0 +1,57 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='MAE', + img_size=(640, 640), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=(3, 5, 7, 11), + attn_drop_rate=0.0, + drop_path_rate=0.1, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + norm_eval=False, + init_values=0.1), + neck=dict(type='Feature2Pyramid', embed_dim=768, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + type='UPerHead', + in_channels=[384, 384, 384, 384], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=384, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_r50.py b/configs/_base_/models/upernet_r50.py index 7d736f6bcf..97f2eb8c48 100644 --- a/configs/_base_/models/upernet_r50.py +++ b/configs/_base_/models/upernet_r50.py @@ -1,7 +1,15 @@ # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) model = dict( type='EncoderDecoder', + data_preprocessor=data_preprocessor, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', @@ -20,7 +28,7 @@ in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, @@ -33,12 +41,12 @@ channels=256, num_convs=1, concat_input=False, - drop_out_ratio=0.1, + dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))) -# model training and testing settings -train_cfg = dict() -test_cfg = dict(mode='whole') + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_swin.py b/configs/_base_/models/upernet_swin.py new file mode 100644 index 0000000000..61cfce035e --- /dev/null +++ b/configs/_base_/models/upernet_swin.py @@ -0,0 +1,62 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +backbone_norm_cfg = dict(type='LN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='SwinTransformer', + pretrain_img_size=224, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=backbone_norm_cfg), + decode_head=dict( + type='UPerHead', + in_channels=[96, 192, 384, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=384, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/_base_/models/upernet_vit-b16_ln_mln.py b/configs/_base_/models/upernet_vit-b16_ln_mln.py new file mode 100644 index 0000000000..776525ad98 --- /dev/null +++ b/configs/_base_/models/upernet_vit-b16_ln_mln.py @@ -0,0 +1,65 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='pretrain/jx_vit_base_p16_224-80ecf9dd.pth', + backbone=dict( + type='VisionTransformer', + img_size=(512, 512), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=(2, 5, 8, 11), + qkv_bias=True, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + with_cls_token=True, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + norm_eval=False, + interpolate_mode='bicubic'), + neck=dict( + type='MultiLevelNeck', + in_channels=[768, 768, 768, 768], + out_channels=768, + scales=[4, 2, 1, 0.5]), + decode_head=dict( + type='UPerHead', + in_channels=[768, 768, 768, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=768, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable diff --git a/configs/_base_/schedules/schedule_160k.py b/configs/_base_/schedules/schedule_160k.py index 8fe4b04d22..60d7bec762 100644 --- a/configs/_base_/schedules/schedule_160k.py +++ b/configs/_base_/schedules/schedule_160k.py @@ -1,9 +1,25 @@ # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) # learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -total_iters = 160000 -checkpoint_config = dict(by_epoch=False, interval=16000) -evaluation = dict(interval=16000, metric='mIoU') +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=160000, + by_epoch=False) +] +# training schedule for 160k +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=160000, val_interval=16000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=16000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/configs/_base_/schedules/schedule_20k.py b/configs/_base_/schedules/schedule_20k.py index d3903d6452..e809e3e880 100644 --- a/configs/_base_/schedules/schedule_20k.py +++ b/configs/_base_/schedules/schedule_20k.py @@ -1,9 +1,24 @@ # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) # learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -total_iters = 20000 -checkpoint_config = dict(by_epoch=False, interval=2000) -evaluation = dict(interval=2000, metric='mIoU') +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=20000, + by_epoch=False) +] +# training schedule for 20k +train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/configs/_base_/schedules/schedule_320k.py b/configs/_base_/schedules/schedule_320k.py new file mode 100644 index 0000000000..70b063afc9 --- /dev/null +++ b/configs/_base_/schedules/schedule_320k.py @@ -0,0 +1,25 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=320000, + by_epoch=False) +] +# training schedule for 320k +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=320000, val_interval=32000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=32000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/configs/_base_/schedules/schedule_40k.py b/configs/_base_/schedules/schedule_40k.py index b1449219cb..4b823339a2 100644 --- a/configs/_base_/schedules/schedule_40k.py +++ b/configs/_base_/schedules/schedule_40k.py @@ -1,9 +1,24 @@ # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) # learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -total_iters = 40000 -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=40000, + by_epoch=False) +] +# training schedule for 40k +train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/configs/_base_/schedules/schedule_80k.py b/configs/_base_/schedules/schedule_80k.py index 3a77b41d45..0dcd6c4d1b 100644 --- a/configs/_base_/schedules/schedule_80k.py +++ b/configs/_base_/schedules/schedule_80k.py @@ -1,9 +1,24 @@ # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) # learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -total_iters = 80000 -checkpoint_config = dict(by_epoch=False, interval=8000) -evaluation = dict(interval=8000, metric='mIoU') +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=80000, + by_epoch=False) +] +# training schedule for 80k +train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=8000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/configs/ann/README.md b/configs/ann/README.md index e3e217c4bd..6cb8bc70b0 100644 --- a/configs/ann/README.md +++ b/configs/ann/README.md @@ -1,46 +1,68 @@ -# Asymmetric Non-local Neural Networks for Semantic Segmentation +# ANN + +[Asymmetric Non-local Neural Networks for Semantic Segmentation](https://arxiv.org/abs/1908.07678) ## Introduction -``` -@inproceedings{annn, - author = {Zhen Zhu and - Mengde Xu and - Song Bai and - Tengteng Huang and - Xiang Bai}, - title = {Asymmetric Non-local Neural Networks for Semantic Segmentation}, - booktitle={International Conference on Computer Vision}, - year = {2019}, - url = {http://arxiv.org/abs/1908.07678}, + + + +Official Repo + +Code Snippet + +## Abstract + + + +The non-local module works as a particularly useful technique for semantic segmentation while criticized for its prohibitive computation and GPU memory occupation. In this paper, we present Asymmetric Non-local Neural Network to semantic segmentation, which has two prominent components: Asymmetric Pyramid Non-local Block (APNB) and Asymmetric Fusion Non-local Block (AFNB). APNB leverages a pyramid sampling module into the non-local block to largely reduce the computation and memory consumption without sacrificing the performance. AFNB is adapted from APNB to fuse the features of different levels under a sufficient consideration of long range dependencies and thus considerably improves the performance. Extensive experiments on semantic segmentation benchmarks demonstrate the effectiveness and efficiency of our work. In particular, we report the state-of-the-art performance of 81.3 mIoU on the Cityscapes test set. For a 256x128 input, APNB is around 6 times faster than a non-local block on GPU while 28 times smaller in GPU running memory occupation. Code is available at: [this https URL](https://github.com/MendelXu/ANN). + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{zhu2019asymmetric, + title={Asymmetric non-local neural networks for semantic segmentation}, + author={Zhu, Zhen and Xu, Mengde and Bai, Song and Huang, Tengteng and Bai, Xiang}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={593--602}, + year={2019} } ``` ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ANN | R-50-D8 | 512x1024 | 40000 | 6 | 3.71 | 77.40 | 78.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json) | -| ANN | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.55 | 76.55 | 78.85 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json) | -| ANN | R-50-D8 | 769x769 | 40000 | 6.8 | 1.70 | 78.89 | 80.46 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json) | -| ANN | R-101-D8 | 769x769 | 40000 | 10.7 | 1.15 | 79.32 | 80.94 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json) | -| ANN | R-50-D8 | 512x1024 | 80000 | - | - | 77.34 | 78.65 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json) | -| ANN | R-101-D8 | 512x1024 | 80000 | - | - | 77.14 | 78.81 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json) | -| ANN | R-50-D8 | 769x769 | 80000 | - | - | 78.88 | 80.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json) | -| ANN | R-101-D8 | 769x769 | 80000 | - | - | 78.80 | 80.34 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ANN | R-50-D8 | 512x1024 | 40000 | 6 | 3.71 | 77.40 | 78.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json) | +| ANN | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.55 | 76.55 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json) | +| ANN | R-50-D8 | 769x769 | 40000 | 6.8 | 1.70 | 78.89 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json) | +| ANN | R-101-D8 | 769x769 | 40000 | 10.7 | 1.15 | 79.32 | 80.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json) | +| ANN | R-50-D8 | 512x1024 | 80000 | - | - | 77.34 | 78.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json) | +| ANN | R-101-D8 | 512x1024 | 80000 | - | - | 77.14 | 78.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json) | +| ANN | R-50-D8 | 769x769 | 80000 | - | - | 78.88 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json) | +| ANN | R-101-D8 | 769x769 | 80000 | - | - | 78.80 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ANN | R-50-D8 | 512x512 | 80000 | 9.1 | 21.01 | 41.01 | 42.30 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json) | -| ANN | R-101-D8 | 512x512 | 80000 | 12.5 | 14.12 | 42.94 | 44.18 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json) | -| ANN | R-50-D8 | 512x512 | 160000 | - | - | 41.74 | 42.62 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json) | -| ANN | R-101-D8 | 512x512 | 160000 | - | - | 42.94 | 44.06 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ANN | R-50-D8 | 512x512 | 80000 | 9.1 | 21.01 | 41.01 | 42.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json) | +| ANN | R-101-D8 | 512x512 | 80000 | 12.5 | 14.12 | 42.94 | 44.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json) | +| ANN | R-50-D8 | 512x512 | 160000 | - | - | 41.74 | 42.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json) | +| ANN | R-101-D8 | 512x512 | 160000 | - | - | 42.94 | 44.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ANN | R-50-D8 | 512x512 | 20000 | 6 | 20.92 | 74.86 | 76.13 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json) | -| ANN | R-101-D8 | 512x512 | 20000 | 9.5 | 13.94 | 77.47 | 78.70 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json) | -| ANN | R-50-D8 | 512x512 | 40000 | - | - | 76.56 | 77.51 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json) | -| ANN | R-101-D8 | 512x512 | 40000 | - | - | 76.70 | 78.06 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ANN | R-50-D8 | 512x512 | 20000 | 6 | 20.92 | 74.86 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json) | +| ANN | R-101-D8 | 512x512 | 20000 | 9.5 | 13.94 | 77.47 | 78.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json) | +| ANN | R-50-D8 | 512x512 | 40000 | - | - | 76.56 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json) | +| ANN | R-101-D8 | 512x512 | 40000 | - | - | 76.70 | 78.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ann/ann_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json) | diff --git a/configs/ann/ann.yml b/configs/ann/ann.yml new file mode 100644 index 0000000000..36721992f9 --- /dev/null +++ b/configs/ann/ann.yml @@ -0,0 +1,305 @@ +Collections: +- Name: ANN + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1908.07678 + Title: Asymmetric Non-local Neural Networks for Semantic Segmentation + README: configs/ann/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ann_head.py#L185 + Version: v0.17.0 + Converted From: + Code: https://github.com/MendelXu/ANN +Models: +- Name: ann_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 269.54 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.4 + mIoU(ms+flip): 78.57 + Config: configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth +- Name: ann_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 392.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.55 + mIoU(ms+flip): 78.85 + Config: configs/ann/ann_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth +- Name: ann_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 588.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.89 + mIoU(ms+flip): 80.46 + Config: configs/ann/ann_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth +- Name: ann_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.32 + mIoU(ms+flip): 80.94 + Config: configs/ann/ann_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth +- Name: ann_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.34 + mIoU(ms+flip): 78.65 + Config: configs/ann/ann_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth +- Name: ann_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.14 + mIoU(ms+flip): 78.81 + Config: configs/ann/ann_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth +- Name: ann_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.88 + mIoU(ms+flip): 80.57 + Config: configs/ann/ann_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth +- Name: ann_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.34 + Config: configs/ann/ann_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth +- Name: ann_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.01 + mIoU(ms+flip): 42.3 + Config: configs/ann/ann_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth +- Name: ann_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.94 + mIoU(ms+flip): 44.18 + Config: configs/ann/ann_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth +- Name: ann_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.74 + mIoU(ms+flip): 42.62 + Config: configs/ann/ann_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth +- Name: ann_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.94 + mIoU(ms+flip): 44.06 + Config: configs/ann/ann_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth +- Name: ann_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.86 + mIoU(ms+flip): 76.13 + Config: configs/ann/ann_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth +- Name: ann_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 71.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.47 + mIoU(ms+flip): 78.7 + Config: configs/ann/ann_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth +- Name: ann_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.56 + mIoU(ms+flip): 77.51 + Config: configs/ann/ann_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth +- Name: ann_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.7 + mIoU(ms+flip): 78.06 + Config: configs/ann/ann_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth diff --git a/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0da7e0b702 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..08459c0a50 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..46781fa9f2 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..c951d8704c --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/ann/ann_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9f14327542 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/ann/ann_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..c3c1a3f706 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/ann/ann_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..c3c1a3f706 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/ann/ann_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..3cc5b8e300 --- /dev/null +++ b/configs/ann/ann_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py b/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index d494e07333..0000000000 --- a/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py b/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 1eeff0b030..0000000000 --- a/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x512_160k_ade20k.py b/configs/ann/ann_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 9e43af541f..0000000000 --- a/configs/ann/ann_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py b/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index d854f2e422..0000000000 --- a/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py b/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 893c53b1ca..0000000000 --- a/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_512x512_80k_ade20k.py b/configs/ann/ann_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index a64dac670e..0000000000 --- a/configs/ann/ann_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py b/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 5950824849..0000000000 --- a/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py b/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index a9c712d1cc..0000000000 --- a/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..119eb76447 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..3152b929a6 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..793437f7a8 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..4e392ca166 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/ann/ann_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/ann/ann_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..900381dd1f --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/ann/ann_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/ann/ann_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..6921218c31 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/ann/ann_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/ann/ann_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..e1c236049c --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/ann/ann_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/ann/ann_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..9fb26efcf4 --- /dev/null +++ b/configs/ann/ann_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py b/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 00b2594ba8..0000000000 --- a/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py b/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index ef7b369dd9..0000000000 --- a/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/ann/ann_r50-d8_512x512_160k_ade20k.py b/configs/ann/ann_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index ca6bb248ac..0000000000 --- a/configs/ann/ann_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py b/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 071f190261..0000000000 --- a/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py b/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 82a1c9386c..0000000000 --- a/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/ann/ann_r50-d8_512x512_80k_ade20k.py b/configs/ann/ann_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 5e04aa7c6a..0000000000 --- a/configs/ann/ann_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py b/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 393a400beb..0000000000 --- a/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py b/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 7861a372e9..0000000000 --- a/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/apcnet/README.md b/configs/apcnet/README.md new file mode 100644 index 0000000000..11fb1a1454 --- /dev/null +++ b/configs/apcnet/README.md @@ -0,0 +1,59 @@ +# APCNet + +[Adaptive Pyramid Context Network for Semantic Segmentation](https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Recent studies witnessed that context features can significantly improve the performance of deep semantic segmentation networks. Current context based segmentation methods differ with each other in how to construct context features and perform differently in practice. This paper firstly introduces three desirable properties of context features in segmentation task. Specially, we find that Global-guided Local Affinity (GLA) can play a vital role in constructing effective context features, while this property has been largely ignored in previous works. Based on this analysis, this paper proposes Adaptive Pyramid Context Network (APCNet)for semantic segmentation. APCNet adaptively constructs multi-scale contextual representations with multiple welldesigned Adaptive Context Modules (ACMs). Specifically, each ACM leverages a global image representation as a guidance to estimate the local affinity coefficients for each sub-region, and then calculates a context vector with these affinities. We empirically evaluate our APCNet on three semantic segmentation and scene parsing datasets, including PASCAL VOC 2012, Pascal-Context, and ADE20K dataset. Experimental results show that APCNet achieves state-ofthe-art performance on all three benchmarks, and obtains a new record 84.2% on PASCAL VOC 2012 test set without MS COCO pre-trained and any post-processing. + + + +
+ +
+ +## Citation + +```bibtex +@InProceedings{He_2019_CVPR, +author = {He, Junjun and Deng, Zhongying and Zhou, Lei and Wang, Yali and Qiao, Yu}, +title = {Adaptive Pyramid Context Network for Semantic Segmentation}, +booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| APCNet | R-50-D8 | 512x1024 | 40000 | 7.7 | 3.57 | 78.02 | 79.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes-20201214_115717.log.json) | +| APCNet | R-101-D8 | 512x1024 | 40000 | 11.2 | 2.15 | 79.08 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes-20201214_115716.log.json) | +| APCNet | R-50-D8 | 769x769 | 40000 | 8.7 | 1.52 | 77.89 | 79.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes-20201214_115717.log.json) | +| APCNet | R-101-D8 | 769x769 | 40000 | 12.7 | 1.03 | 77.96 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes-20201214_115718.log.json) | +| APCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.96 | 79.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes-20201214_115716.log.json) | +| APCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes-20201214_115705.log.json) | +| APCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.79 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes-20201214_115718.log.json) | +| APCNet | R-101-D8 | 769x769 | 80000 | - | - | 78.45 | 79.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes-20201214_115716.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| APCNet | R-50-D8 | 512x512 | 80000 | 10.1 | 19.61 | 42.20 | 43.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k-20201214_115705.log.json) | +| APCNet | R-101-D8 | 512x512 | 80000 | 13.6 | 13.10 | 45.54 | 46.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k-20201214_115704.log.json) | +| APCNet | R-50-D8 | 512x512 | 160000 | - | - | 43.40 | 43.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k-20201214_115706.log.json) | +| APCNet | R-101-D8 | 512x512 | 160000 | - | - | 45.41 | 46.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/apcnet/apcnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k-20201214_115705.log.json) | diff --git a/configs/apcnet/apcnet.yml b/configs/apcnet/apcnet.yml new file mode 100644 index 0000000000..737da973d4 --- /dev/null +++ b/configs/apcnet/apcnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: APCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html + Title: Adaptive Pyramid Context Network for Semantic Segmentation + README: configs/apcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 + Version: v0.17.0 + Converted From: + Code: https://github.com/Junjun2016/APCNet +Models: +- Name: apcnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 280.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.02 + mIoU(ms+flip): 79.26 + Config: configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth +- Name: apcnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 465.12 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.08 + mIoU(ms+flip): 80.34 + Config: configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth +- Name: apcnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 657.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.89 + mIoU(ms+flip): 79.75 + Config: configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth +- Name: apcnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 970.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.96 + mIoU(ms+flip): 79.24 + Config: configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth +- Name: apcnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.96 + mIoU(ms+flip): 79.94 + Config: configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth +- Name: apcnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.64 + mIoU(ms+flip): 80.61 + Config: configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth +- Name: apcnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.79 + mIoU(ms+flip): 80.35 + Config: configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth +- Name: apcnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.45 + mIoU(ms+flip): 79.91 + Config: configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth +- Name: apcnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.2 + mIoU(ms+flip): 43.3 + Config: configs/apcnet/apcnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth +- Name: apcnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 76.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.54 + mIoU(ms+flip): 46.65 + Config: configs/apcnet/apcnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth +- Name: apcnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.4 + mIoU(ms+flip): 43.94 + Config: configs/apcnet/apcnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth +- Name: apcnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.41 + mIoU(ms+flip): 46.63 + Config: configs/apcnet/apcnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth diff --git a/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..754b2d1a08 --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..d2b5fe1360 --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..03b018d2ff --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..0cbbfadbdd --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/apcnet/apcnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..f0aacc06e0 --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/apcnet/apcnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..219d07ae55 --- /dev/null +++ b/configs/apcnet/apcnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b440771d01 --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..9ff897c977 --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6de10330b3 --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..d6ec8985cc --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/apcnet/apcnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/apcnet/apcnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..37b23d1c53 --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/apcnet/apcnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/apcnet/apcnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..b0fbe275b6 --- /dev/null +++ b/configs/apcnet/apcnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/beit/README.md b/configs/beit/README.md new file mode 100644 index 0000000000..380d788741 --- /dev/null +++ b/configs/beit/README.md @@ -0,0 +1,85 @@ +# BEiT + +[BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%). The code and pretrained models are available at [this https URL](https://github.com/microsoft/unilm/tree/master/beit). + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{beit, + title={{BEiT}: {BERT} Pre-Training of Image Transformers}, + author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=p-BhZSz59o4} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`beit2mmseg.py`](../../tools/model_converters/beit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/microsoft/unilm/tree/master/beit/semantic_segmentation) to MMSegmentation style. + +```shell +python tools/model_converters/beit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/beit2mmseg.py https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth pretrain/beit_base_patch16_224_pt22k_ft22k.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models could be defined below: + +| pretrained models | original models | +| ----------------- | --------------------------------------------------------------------------------------------------------------------------- | +| BEiT_base.pth | ['BEiT_base'](https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth) | +| BEiT_large.pth | ['BEiT_large'](https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth) | + +Verify the single-scale results of the model: + +```shell +sh tools/dist_test.sh \ +configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py \ +upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU +``` + +Since relative position embedding requires the input length and width to be equal, the sliding window is adopted for multi-scale inference. So we set min_size=640, that is, the shortest edge is 640. So the multi-scale inference of config is performed separately, instead of '--aug-test'. For multi-scale inference: + +```shell +sh tools/dist_test.sh \ +configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py \ +upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------------ | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | BEiT-B | 640x640 | ImageNet-22K | 224x224 | 16 | 160000 | 15.88 | 2.00 | 53.08 | 53.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k.log.json) | +| UPerNet | BEiT-L | 640x640 | ImageNet-22K | 224x224 | 8 | 320000 | 22.64 | 0.96 | 56.33 | 56.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.log.json) | diff --git a/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640.py b/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640.py new file mode 100644 index 0000000000..1cd7d0e8a8 --- /dev/null +++ b/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/beit_base_patch16_224_pt22k_ft22k.pth', + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05), + constructor='LayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9)) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640_ms.py b/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640_ms.py new file mode 100644 index 0000000000..02480222c4 --- /dev/null +++ b/configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640_ms.py @@ -0,0 +1,16 @@ +_base_ = './beit-base_upernet_8xb2-160k_ade20k-640x640.py' + +test_pipeline = [ + dict(type='LoadImageFromFile'), + # TODO: Refactor 'MultiScaleFlipAug' which supports + # `min_size` feature in `Resize` class + # img_ratios is [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] + # original image scale is (2560, 640) + dict(type='Resize', scale=(2560, 640), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs'), +] +val_dataloader = dict(batch_size=1, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py b/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py new file mode 100644 index 0000000000..4fd5cd20ad --- /dev/null +++ b/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py @@ -0,0 +1,50 @@ +_base_ = [ + '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_320k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/beit_large_patch16_224_pt22k_ft22k.pth', + backbone=dict( + type='BEiT', + embed_dims=1024, + num_layers=24, + num_heads=16, + mlp_ratio=4, + qv_bias=True, + init_values=1e-6, + drop_path_rate=0.2, + out_indices=[7, 11, 15, 23]), + neck=dict(embed_dim=1024, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + in_channels=[1024, 1024, 1024, 1024], num_classes=150, channels=1024), + auxiliary_head=dict(in_channels=1024, num_classes=150), + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05), + constructor='LayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95), + accumulative_counts=2) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=3000), + dict( + type='PolyLR', + power=1.0, + begin=3000, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +train_dataloader = dict(batch_size=1) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640_ms.py b/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640_ms.py new file mode 100644 index 0000000000..fc6f049d11 --- /dev/null +++ b/configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640_ms.py @@ -0,0 +1,16 @@ +_base_ = './beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py' + +test_pipeline = [ + dict(type='LoadImageFromFile'), + # TODO: Refactor 'MultiScaleFlipAug' which supports + # `min_size` feature in `Resize` class + # img_ratios is [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] + # original image scale is (2560, 640) + dict(type='Resize', scale=(2560, 640), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs'), +] +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/configs/beit/beit.yml b/configs/beit/beit.yml new file mode 100644 index 0000000000..f6cc0160bf --- /dev/null +++ b/configs/beit/beit.yml @@ -0,0 +1,45 @@ +Models: +- Name: beit-base_upernet_8xb2-160k_ade20k-640x640 + In Collection: UPerNet + Metadata: + backbone: BEiT-B + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 500.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 15.88 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.08 + mIoU(ms+flip): 53.84 + Config: configs/beit/beit-base_upernet_8xb2-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth +- Name: beit-large_upernet_8xb1-amp-160k_ade20k-640x640 + In Collection: UPerNet + Metadata: + backbone: BEiT-L + crop size: (640,640) + lr schd: 320000 + inference time (ms/im): + - value: 1041.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (640,640) + Training Memory (GB): 22.64 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 56.33 + mIoU(ms+flip): 56.84 + Config: configs/beit/beit-large_upernet_8xb1-amp-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth diff --git a/configs/bisenetv1/README.md b/configs/bisenetv1/README.md new file mode 100644 index 0000000000..72fdd474cb --- /dev/null +++ b/configs/bisenetv1/README.md @@ -0,0 +1,64 @@ +# BiSeNetV1 + +[BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation](https://arxiv.org/abs/1808.00897) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic segmentation requires both rich spatial information and sizeable receptive field. However, modern approaches usually compromise spatial resolution to achieve real-time inference speed, which leads to poor performance. In this paper, we address this dilemma with a novel Bilateral Segmentation Network (BiSeNet). We first design a Spatial Path with a small stride to preserve the spatial information and generate high-resolution features. Meanwhile, a Context Path with a fast downsampling strategy is employed to obtain sufficient receptive field. On top of the two paths, we introduce a new Feature Fusion Module to combine features efficiently. The proposed architecture makes a right balance between the speed and segmentation performance on Cityscapes, CamVid, and COCO-Stuff datasets. Specifically, for a 2048x1024 input, we achieve 68.4% Mean IOU on the Cityscapes test dataset with speed of 105 FPS on one NVIDIA Titan XP card, which is significantly faster than the existing methods with comparable performance. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{yu2018bisenet, + title={Bisenet: Bilateral segmentation network for real-time semantic segmentation}, + author={Yu, Changqian and Wang, Jingbo and Peng, Chao and Gao, Changxin and Yu, Gang and Sang, Nong}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={325--341}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BiSeNetV1 (No Pretrain) | R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.44 | 77.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239.log.json) | +| BiSeNetV1 | R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.37 | 76.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251.log.json) | +| BiSeNetV1 (4x8) | R-18-D32 | 1024x1024 | 160000 | 11.17 | 31.77 | 75.16 | 77.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322.log.json) | +| BiSeNetV1 (No Pretrain) | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 76.92 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639.log.json) | +| BiSeNetV1 | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 77.68 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------------------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| BiSeNetV1 (No Pretrain) | R-18-D32 | 512x512 | 160000 | - | - | 25.45 | 26.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328.log.json) | +| BiSeNetV1 | R-18-D32 | 512x512 | 160000 | 6.33 | 74.24 | 28.55 | 29.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100.log.json) | +| BiSeNetV1 (No Pretrain) | R-50-D32 | 512x512 | 160000 | - | - | 29.82 | 30.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616.log.json) | +| BiSeNetV1 | R-50-D32 | 512x512 | 160000 | 9.28 | 32.60 | 34.88 | 35.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932.log.json) | +| BiSeNetV1 (No Pretrain) | R-101-D32 | 512x512 | 160000 | - | - | 31.14 | 31.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147.log.json) | +| BiSeNetV1 | R-101-D32 | 512x512 | 160000 | 10.36 | 25.25 | 37.38 | 37.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv1/bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220.log.json) | + +Note: + +- `4x8`: Using 4 GPUs with 8 samples per GPU in training. +- For BiSeNetV1 on Cityscapes dataset, default setting is 4 GPUs with 4 samples per GPU in training. +- `No Pretrain` means the model is trained from scratch. diff --git a/configs/bisenetv1/bisenetv1.yml b/configs/bisenetv1/bisenetv1.yml new file mode 100644 index 0000000000..f5aade4de4 --- /dev/null +++ b/configs/bisenetv1/bisenetv1.yml @@ -0,0 +1,234 @@ +Collections: +- Name: BiSeNetV1 + Metadata: + Training Data: + - Cityscapes + - COCO-Stuff 164k + Paper: + URL: https://arxiv.org/abs/1808.00897 + Title: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation' + README: configs/bisenetv1/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv1.py#L266 + Version: v0.18.0 + Converted From: + Code: https://github.com/ycszen/TorchSeg/tree/master/model/bisenet +Models: +- Name: bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024 + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.44 + mIoU(ms+flip): 77.05 + Config: configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth +- Name: bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024 + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.37 + mIoU(ms+flip): 76.91 + Config: configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth +- Name: bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024 + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 11.17 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.16 + mIoU(ms+flip): 77.24 + Config: configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth +- Name: bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024 + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.39 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.92 + mIoU(ms+flip): 78.87 + Config: configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth +- Name: bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024 + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.39 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.68 + mIoU(ms+flip): 79.57 + Config: configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth +- Name: bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 25.45 + mIoU(ms+flip): 26.15 + Config: configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth +- Name: bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 13.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.33 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 28.55 + mIoU(ms+flip): 29.26 + Config: configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth +- Name: bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 29.82 + mIoU(ms+flip): 30.33 + Config: configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth +- Name: bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 30.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.28 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 34.88 + mIoU(ms+flip): 35.37 + Config: configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth +- Name: bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 31.14 + mIoU(ms+flip): 31.76 + Config: configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth +- Name: bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512 + In Collection: BiSeNetV1 + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 39.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.36 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 37.38 + mIoU(ms+flip): 37.99 + Config: configs/bisenetv1/bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth diff --git a/configs/bisenetv1/bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..ac63447d47 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r101-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,6 @@ +_base_ = './bisenetv1_r101-d32_4xb4-160k_coco-stuff164k-512x512.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/configs/bisenetv1/bisenetv1_r101-d32_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r101-d32_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..02e4e9be05 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r101-d32_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=101)), + decode_head=dict(in_channels=1024, channels=1024, num_classes=171), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=171, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=171, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..da3e598127 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..9de889f001 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,10 @@ +_base_ = './bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py' +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))), +) diff --git a/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024.py b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..0580ce11e6 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r18-d32-in1k-pre_4xb8-160k_cityscapes-1024x1024.py @@ -0,0 +1,4 @@ +_base_ = './bisenetv1_r18-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py' +train_dataloader = dict(batch_size=8, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py b/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..6c3e12b24f --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..2109d689d0 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,53 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=171, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=171, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py b/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..013c4ff162 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_cityscapes-1024x1024.py @@ -0,0 +1,7 @@ +_base_ = './bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py' +model = dict( + type='EncoderDecoder', + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..b35259c725 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r50-d32-in1k-pre_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,7 @@ +_base_ = './bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py' + +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py b/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..9753c10231 --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_cityscapes-1024x1024.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='BiSeNetV1', + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=50)), + decode_head=dict( + type='FCNHead', in_channels=1024, in_index=0, channels=1024), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False), + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False), + ]) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py b/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..8b6ef74c1a --- /dev/null +++ b/configs/bisenetv1/bisenetv1_r50-d32_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=50)), + decode_head=dict(in_channels=1024, channels=1024, num_classes=171), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=171, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=171, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv2/README.md b/configs/bisenetv2/README.md new file mode 100644 index 0000000000..7cde5c02c1 --- /dev/null +++ b/configs/bisenetv2/README.md @@ -0,0 +1,53 @@ +# BiSeNetV2 + +[Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation](https://arxiv.org/abs/2004.02147) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The low-level details and high-level semantics are both essential to the semantic segmentation task. However, to speed up the model inference, current approaches almost always sacrifice the low-level details, which leads to a considerable accuracy decrease. We propose to treat these spatial details and categorical semantics separately to achieve high accuracy and high efficiency for realtime semantic segmentation. To this end, we propose an efficient and effective architecture with a good trade-off between speed and accuracy, termed Bilateral Segmentation Network (BiSeNet V2). This architecture involves: (i) a Detail Branch, with wide channels and shallow layers to capture low-level details and generate high-resolution feature representation; (ii) a Semantic Branch, with narrow channels and deep layers to obtain high-level semantic context. The Semantic Branch is lightweight due to reducing the channel capacity and a fast-downsampling strategy. Furthermore, we design a Guided Aggregation Layer to enhance mutual connections and fuse both types of feature representation. Besides, a booster training strategy is designed to improve the segmentation performance without any extra inference cost. Extensive quantitative and qualitative evaluations demonstrate that the proposed architecture performs favourably against a few state-of-the-art real-time semantic segmentation approaches. Specifically, for a 2,048x1,024 input, we achieve 72.6% Mean IoU on the Cityscapes test set with a speed of 156 FPS on one NVIDIA GeForce GTX 1080 Ti card, which is significantly faster than existing methods, yet we achieve better segmentation accuracy. + + + +
+ +
+ +## Citation + +```bibtex +@article{yu2021bisenet, + title={Bisenet v2: Bilateral network with guided aggregation for real-time semantic segmentation}, + author={Yu, Changqian and Gao, Changxin and Wang, Jingbo and Yu, Gang and Shen, Chunhua and Sang, Nong}, + journal={International Journal of Computer Vision}, + pages={1--18}, + year={2021}, + publisher={Springer} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| BiSeNetV2 | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | 31.77 | 73.21 | 75.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551.log.json) | +| BiSeNetV2 (OHEM) | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | - | 73.57 | 75.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv2/bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947-5f8103b4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947.log.json) | +| BiSeNetV2 (4x8) | BiSeNetV2 | 1024x1024 | 160000 | 15.05 | - | 75.76 | 77.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv2/bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032.log.json) | +| BiSeNetV2 (FP16) | BiSeNetV2 | 1024x1024 | 160000 | 5.77 | 36.65 | 73.07 | 75.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/bisenetv2/bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942.log.json) | + +Note: + +- `OHEM` means Online Hard Example Mining (OHEM) is adopted in training. +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `4x8` means 4 GPUs with 8 samples per GPU in training. diff --git a/configs/bisenetv2/bisenetv2.yml b/configs/bisenetv2/bisenetv2.yml new file mode 100644 index 0000000000..70c4326a55 --- /dev/null +++ b/configs/bisenetv2/bisenetv2.yml @@ -0,0 +1,88 @@ +Collections: +- Name: BiSeNetV2 + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2004.02147 + Title: 'Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic + Segmentation' + README: configs/bisenetv2/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv2.py#L545 + Version: v0.18.0 +Models: +- Name: bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024 + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 7.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.21 + mIoU(ms+flip): 75.74 + Config: configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth +- Name: bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024 + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + Training Memory (GB): 7.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.57 + mIoU(ms+flip): 75.8 + Config: configs/bisenetv2/bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947-5f8103b4.pth +- Name: bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024 + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + Training Memory (GB): 15.05 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.76 + mIoU(ms+flip): 77.79 + Config: configs/bisenetv2/bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth +- Name: bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024 + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 27.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (1024,1024) + Training Memory (GB): 5.77 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.07 + mIoU(ms+flip): 75.13 + Config: configs/bisenetv2/bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth diff --git a/configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py b/configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..6462ce7624 --- /dev/null +++ b/configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv2/bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024.py b/configs/bisenetv2/bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..8ed338c00b --- /dev/null +++ b/configs/bisenetv2/bisenetv2_fcn_4xb4-amp-160k_cityscapes-1024x1024.py @@ -0,0 +1,6 @@ +_base_ = './bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py' +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005), + loss_scale=512.) diff --git a/configs/bisenetv2/bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024.py b/configs/bisenetv2/bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..8d5cbcb4e5 --- /dev/null +++ b/configs/bisenetv2/bisenetv2_fcn_4xb4-ohem-160k_cityscapes-1024x1024.py @@ -0,0 +1,83 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +models = dict( + data_preprocessor=data_preprocessor, + decode_head=dict( + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=16, + channels=16, + num_convs=2, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=32, + channels=64, + num_convs=2, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=64, + channels=256, + num_convs=2, + num_classes=19, + in_index=3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=1024, + num_convs=2, + num_classes=19, + in_index=4, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], +) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/bisenetv2/bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024.py b/configs/bisenetv2/bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..8fcba64713 --- /dev/null +++ b/configs/bisenetv2/bisenetv2_fcn_4xb8-160k_cityscapes-1024x1024.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=160000, + by_epoch=False, + ) +] +optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict(batch_size=8, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/ccnet/README.md b/configs/ccnet/README.md index 436a962340..9d110f4df9 100644 --- a/configs/ccnet/README.md +++ b/configs/ccnet/README.md @@ -1,7 +1,30 @@ -# CCNet: Criss-Cross Attention for Semantic Segmentation +# CCNet + +[CCNet: Criss-Cross Attention for Semantic Segmentation](https://arxiv.org/abs/1811.11721) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Contextual information is vital in visual understanding problems, such as semantic segmentation and object detection. We propose a Criss-Cross Network (CCNet) for obtaining full-image contextual information in a very effective and efficient way. Concretely, for each pixel, a novel criss-cross attention module harvests the contextual information of all the pixels on its criss-cross path. By taking a further recurrent operation, each pixel can finally capture the full-image dependencies. Besides, a category consistent loss is proposed to enforce the criss-cross attention module to produce more discriminative features. Overall, CCNet is with the following merits: 1) GPU memory friendly. Compared with the non-local block, the proposed recurrent criss-cross attention module requires 11x less GPU memory usage. 2) High computational efficiency. The recurrent criss-cross attention significantly reduces FLOPs by about 85% of the non-local block. 3) The state-of-the-art performance. We conduct extensive experiments on semantic segmentation benchmarks including Cityscapes, ADE20K, human parsing benchmark LIP, instance segmentation benchmark COCO, video segmentation benchmark CamVid. In particular, our CCNet achieves the mIoU scores of 81.9%, 45.76% and 55.47% on the Cityscapes test set, the ADE20K validation set and the LIP validation set respectively, which are the new state-of-the-art results. The source codes are available at [this https URL](https://github.com/speedinghzl/CCNet). + + + +
+ +
+ +## Citation + +```bibtex @article{huang2018ccnet, title={CCNet: Criss-Cross Attention for Semantic Segmentation}, author={Huang, Zilong and Wang, Xinggang and Huang, Lichao and Huang, Chang and Wei, Yunchao and Liu, Wenyu}, @@ -13,29 +36,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CCNet | R-50-D8 | 512x1024 | 40000 | 6 | 3.32 | 77.76 | 78.87 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517.log.json) | -| CCNet | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.31 | 76.35 | 78.19 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540.log.json) | -| CCNet | R-50-D8 | 769x769 | 40000 | 6.8 | 1.43 | 78.46 | 79.93 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125.log.json) | -| CCNet | R-101-D8 | 769x769 | 40000 | 10.7 | 1.01 | 76.94 | 78.62 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428.log.json) | -| CCNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.03 | 80.16 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421.log.json) | -| CCNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.87 | 79.90 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935.log.json) | -| CCNet | R-50-D8 | 769x769 | 80000 | - | - | 79.29 | 81.08 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421.log.json) | -| CCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.45 | 80.66 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CCNet | R-50-D8 | 512x1024 | 40000 | 6 | 3.32 | 77.76 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517.log.json) | +| CCNet | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.31 | 76.35 | 78.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540.log.json) | +| CCNet | R-50-D8 | 769x769 | 40000 | 6.8 | 1.43 | 78.46 | 79.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125.log.json) | +| CCNet | R-101-D8 | 769x769 | 40000 | 10.7 | 1.01 | 76.94 | 78.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428.log.json) | +| CCNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.03 | 80.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421.log.json) | +| CCNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.87 | 79.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935.log.json) | +| CCNet | R-50-D8 | 769x769 | 80000 | - | - | 79.29 | 81.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421.log.json) | +| CCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.45 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CCNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.89 | 41.78 | 42.98 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848.log.json) | -| CCNet | R-101-D8 | 512x512 | 80000 | 12.2 | 14.11 | 43.97 | 45.13 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848.log.json) | -| CCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.08 | 43.13 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435.log.json) | -| CCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.71 | 45.04 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CCNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.89 | 41.78 | 42.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848.log.json) | +| CCNet | R-101-D8 | 512x512 | 80000 | 12.2 | 14.11 | 43.97 | 45.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848.log.json) | +| CCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.08 | 43.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435.log.json) | +| CCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.71 | 45.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CCNet | R-50-D8 | 512x512 | 20000 | 6 | 20.45 | 76.17 | 77.51 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212.log.json) | -| CCNet | R-101-D8 | 512x512 | 20000 | 9.5 | 13.64 | 77.27 | 79.02 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212.log.json) | -| CCNet | R-50-D8 | 512x512 | 40000 | - | - | 75.96 | 77.04 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127.log.json) | -| CCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.87 | 78.90 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CCNet | R-50-D8 | 512x512 | 20000 | 6 | 20.45 | 76.17 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212.log.json) | +| CCNet | R-101-D8 | 512x512 | 20000 | 9.5 | 13.64 | 77.27 | 79.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212.log.json) | +| CCNet | R-50-D8 | 512x512 | 40000 | - | - | 75.96 | 77.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127.log.json) | +| CCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.87 | 78.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ccnet/ccnet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127.log.json) | diff --git a/configs/ccnet/ccnet.yml b/configs/ccnet/ccnet.yml new file mode 100644 index 0000000000..b05863dacb --- /dev/null +++ b/configs/ccnet/ccnet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: CCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1811.11721 + Title: 'CCNet: Criss-Cross Attention for Semantic Segmentation' + README: configs/ccnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 + Version: v0.17.0 + Converted From: + Code: https://github.com/speedinghzl/CCNet +Models: +- Name: ccnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 301.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.76 + mIoU(ms+flip): 78.87 + Config: configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth +- Name: ccnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 432.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.35 + mIoU(ms+flip): 78.19 + Config: configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth +- Name: ccnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 699.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 79.93 + Config: configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth +- Name: ccnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 990.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.94 + mIoU(ms+flip): 78.62 + Config: configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth +- Name: ccnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 80.16 + Config: configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth +- Name: ccnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.87 + mIoU(ms+flip): 79.9 + Config: configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth +- Name: ccnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.29 + mIoU(ms+flip): 81.08 + Config: configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth +- Name: ccnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.45 + mIoU(ms+flip): 80.66 + Config: configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth +- Name: ccnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.78 + mIoU(ms+flip): 42.98 + Config: configs/ccnet/ccnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth +- Name: ccnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.97 + mIoU(ms+flip): 45.13 + Config: configs/ccnet/ccnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth +- Name: ccnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.08 + mIoU(ms+flip): 43.13 + Config: configs/ccnet/ccnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth +- Name: ccnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.71 + mIoU(ms+flip): 45.04 + Config: configs/ccnet/ccnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth +- Name: ccnet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 48.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.17 + mIoU(ms+flip): 77.51 + Config: configs/ccnet/ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth +- Name: ccnet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 73.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.27 + mIoU(ms+flip): 79.02 + Config: configs/ccnet/ccnet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth +- Name: ccnet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.96 + mIoU(ms+flip): 77.04 + Config: configs/ccnet/ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth +- Name: ccnet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.87 + mIoU(ms+flip): 78.9 + Config: configs/ccnet/ccnet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth diff --git a/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0c49e1edc2 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..f24f5a70ed --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b358e12c4e --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..75750768b2 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/ccnet/ccnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..a29d118f41 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/ccnet/ccnet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..fd421a2ed5 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/ccnet/ccnet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..425dfcf339 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/ccnet/ccnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..f6dcb9cf50 --- /dev/null +++ b/configs/ccnet/ccnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py b/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index d2bac38ca6..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py b/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 989928ab7f..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py b/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index c32bf48751..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py b/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 53eb77c0cd..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py b/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index d7eb668f39..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py b/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 029c1d525b..0000000000 --- a/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py b/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 43f05fab05..0000000000 --- a/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py b/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 654f377b6f..0000000000 --- a/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..84fc51a6b3 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..a930794065 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..89bfe81825 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..5f7c954aec --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/ccnet/ccnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/ccnet/ccnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..cee810cd85 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/ccnet/ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/ccnet/ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..76a90d98a8 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/ccnet/ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/ccnet/ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..a8aeb85dc6 --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/ccnet/ccnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/ccnet/ccnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..f7fced0a7b --- /dev/null +++ b/configs/ccnet/ccnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py b/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 6a4316dde5..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py b/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 16e34356e9..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py b/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 1ad94d8988..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py b/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index bbcd29ccea..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py b/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 947b8ac8ce..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py b/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 1a1f49cf6b..0000000000 --- a/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py b/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index d7fd8ccc59..0000000000 --- a/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py b/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 6d3b3498bf..0000000000 --- a/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/cgnet/README.md b/configs/cgnet/README.md new file mode 100644 index 0000000000..709d0c0b8f --- /dev/null +++ b/configs/cgnet/README.md @@ -0,0 +1,46 @@ +# CGNet + +[CGNet: A Light-weight Context Guided Network for Semantic Segmentation](https://arxiv.org/abs/1811.08201) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The demand of applying semantic segmentation model on mobile devices has been increasing rapidly. Current state-of-the-art networks have enormous amount of parameters hence unsuitable for mobile devices, while other small memory footprint models follow the spirit of classification network and ignore the inherent characteristic of semantic segmentation. To tackle this problem, we propose a novel Context Guided Network (CGNet), which is a light-weight and efficient network for semantic segmentation. We first propose the Context Guided (CG) block, which learns the joint feature of both local feature and surrounding context, and further improves the joint feature with the global context. Based on the CG block, we develop CGNet which captures contextual information in all stages of the network and is specially tailored for increasing segmentation accuracy. CGNet is also elaborately designed to reduce the number of parameters and save memory footprint. Under an equivalent number of parameters, the proposed CGNet significantly outperforms existing segmentation networks. Extensive experiments on Cityscapes and CamVid datasets verify the effectiveness of the proposed approach. Specifically, without any post-processing and multi-scale testing, the proposed CGNet achieves 64.8% mean IoU on Cityscapes with less than 0.5 M parameters. The source code for the complete system can be found at [this https URL](https://github.com/wutianyiRosun/CGNet). + + + +
+ +
+ +## Citation + +```bibtext +@article{wu2020cgnet, + title={Cgnet: A light-weight context guided network for semantic segmentation}, + author={Wu, Tianyi and Tang, Sheng and Zhang, Rui and Cao, Juan and Zhang, Yongdong}, + journal={IEEE Transactions on Image Processing}, + volume={30}, + pages={1169--1179}, + year={2020}, + publisher={IEEE} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CGNet | M3N21 | 680x680 | 60000 | 7.5 | 30.51 | 65.63 | 68.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/cgnet/cgnet_fcn_4xb4-60k_cityscapes-680x680.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes-20201101_110253.log.json) | +| CGNet | M3N21 | 512x1024 | 60000 | 8.3 | 31.14 | 68.27 | 70.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes-20201101_110254.log.json) | diff --git a/configs/cgnet/cgnet.yml b/configs/cgnet/cgnet.yml new file mode 100644 index 0000000000..be79b89355 --- /dev/null +++ b/configs/cgnet/cgnet.yml @@ -0,0 +1,59 @@ +Collections: +- Name: CGNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1811.08201 + Title: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation' + README: configs/cgnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/cgnet.py#L187 + Version: v0.17.0 + Converted From: + Code: https://github.com/wutianyiRosun/CGNet +Models: +- Name: cgnet_fcn_4xb4-60k_cityscapes-680x680 + In Collection: CGNet + Metadata: + backbone: M3N21 + crop size: (680,680) + lr schd: 60000 + inference time (ms/im): + - value: 32.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (680,680) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 65.63 + mIoU(ms+flip): 68.04 + Config: configs/cgnet/cgnet_fcn_4xb4-60k_cityscapes-680x680.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth +- Name: cgnet_fcn_4xb8-60k_cityscapes-512x1024 + In Collection: CGNet + Metadata: + backbone: M3N21 + crop size: (512,1024) + lr schd: 60000 + inference time (ms/im): + - value: 32.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.27 + mIoU(ms+flip): 70.33 + Config: configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth diff --git a/configs/cgnet/cgnet_fcn_4xb4-60k_cityscapes-680x680.py b/configs/cgnet/cgnet_fcn_4xb4-60k_cityscapes-680x680.py new file mode 100644 index 0000000000..6a2c0ed125 --- /dev/null +++ b/configs/cgnet/cgnet_fcn_4xb4-60k_cityscapes-680x680.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/models/cgnet.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py' +] + +# optimizer +optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + by_epoch=False, + begin=0, + end=60000) +] +# runtime settings +total_iters = 60000 +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=total_iters, val_interval=4000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), + sampler_seed=dict(type='DistSamplerSeedHook')) + +crop_size = (680, 680) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size), + dict(type='RandomFlip', prob=0.5), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, num_workers=4, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py b/configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py new file mode 100644 index 0000000000..8be29de479 --- /dev/null +++ b/configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/cgnet.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py' +] + +# optimizer +optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + by_epoch=False, + begin=0, + end=60000) +] +# runtime settings +total_iters = 60000 +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=total_iters, val_interval=4000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), + sampler_seed=dict(type='DistSamplerSeedHook')) + +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +train_dataloader = dict(batch_size=8) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/README.md b/configs/convnext/README.md new file mode 100644 index 0000000000..6a826b9d6a --- /dev/null +++ b/configs/convnext/README.md @@ -0,0 +1,72 @@ +# ConvNeXt + +[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. + + + +
+ +
+ +```bibtex +@article{liu2022convnet, + title={A ConvNet for the 2020s}, + author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining}, + journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` + +### Usage + +- ConvNeXt backbone needs to install [MMClassification](https://github.com/open-mmlab/mmclassification) first, which has abundant backbones for downstream tasks. + +```shell +pip install mmcls>=0.20.1 +``` + +### Pre-trained Models + +The pre-trained models on ImageNet-1k or ImageNet-21k are used to fine-tune on the downstream tasks. + +| Model | Training Data | Params(M) | Flops(G) | Download | +| :-----------: | :-----------: | :-------: | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------: | +| ConvNeXt-T\* | ImageNet-1k | 28.59 | 4.46 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth) | +| ConvNeXt-S\* | ImageNet-1k | 50.22 | 8.69 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth) | +| ConvNeXt-B\* | ImageNet-1k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth) | +| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_in21k_20220301-262fd037.pth) | +| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-large_3rdparty_in21k_20220301-e6e0ea0a.pth) | +| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-xlarge_3rdparty_in21k_20220301-08aa5ddc.pth) | + +*Models with* are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt/tree/main/semantic_segmentation#results-and-fine-tuned-models).\* + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | ----------- | --------- | ------- | -------- | -------------- | ----- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | ConvNeXt-T | 512x512 | 160000 | 4.23 | 19.90 | 46.11 | 46.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553.log.json) | +| UPerNet | ConvNeXt-S | 512x512 | 160000 | 5.16 | 15.18 | 48.56 | 49.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-small_upernet_8xb2-amp-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208.log.json) | +| UPerNet | ConvNeXt-B | 512x512 | 160000 | 6.33 | 14.41 | 48.71 | 49.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227.log.json) | +| UPerNet | ConvNeXt-B | 640x640 | 160000 | 8.53 | 10.88 | 52.13 | 52.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859-9280e39b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859.log.json) | +| UPerNet | ConvNeXt-L | 640x640 | 160000 | 12.08 | 7.69 | 53.16 | 53.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-large_upernet_8xb2-amp-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532.log.json) | +| UPerNet | ConvNeXt-XL | 640x640 | 160000 | 26.16\* | 6.33 | 53.58 | 54.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/convnext/convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344.log.json) | + +Note: + +- `Mem (GB)` with * is collected when `cudnn_benchmark=True`, and hardware is V100. diff --git a/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-512x512.py b/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-512x512.py new file mode 100644 index 0000000000..09c2aa6961 --- /dev/null +++ b/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-512x512.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-640x640.py b/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-640x640.py new file mode 100644 index 0000000000..a743e9322a --- /dev/null +++ b/configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-640x640.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_in21k_20220301-262fd037.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.ConvNeXt', + arch='base', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[128, 256, 512, 1024], + num_classes=150, + ), + auxiliary_head=dict(in_channels=512, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext-large_upernet_8xb2-amp-160k_ade20k-640x640.py b/configs/convnext/convnext-large_upernet_8xb2-amp-160k_ade20k-640x640.py new file mode 100644 index 0000000000..6d94989ee1 --- /dev/null +++ b/configs/convnext/convnext-large_upernet_8xb2-amp-160k_ade20k-640x640.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-large_3rdparty_in21k_20220301-e6e0ea0a.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.ConvNeXt', + arch='large', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[192, 384, 768, 1536], + num_classes=150, + ), + auxiliary_head=dict(in_channels=768, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext-small_upernet_8xb2-amp-160k_ade20k-512x512.py b/configs/convnext/convnext-small_upernet_8xb2-amp-160k_ade20k-512x512.py new file mode 100644 index 0000000000..3cbf09902d --- /dev/null +++ b/configs/convnext/convnext-small_upernet_8xb2-amp-160k_ade20k-512x512.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.ConvNeXt', + arch='small', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.3, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[96, 192, 384, 768], + num_classes=150, + ), + auxiliary_head=dict(in_channels=384, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512.py b/configs/convnext/convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9d4968df60 --- /dev/null +++ b/configs/convnext/convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[96, 192, 384, 768], + num_classes=150, + ), + auxiliary_head=dict(in_channels=384, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 6 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640.py b/configs/convnext/convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640.py new file mode 100644 index 0000000000..749391cac1 --- /dev/null +++ b/configs/convnext/convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-xlarge_3rdparty_in21k_20220301-08aa5ddc.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='mmcls.ConvNeXt', + arch='xlarge', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[256, 512, 1024, 2048], + num_classes=150, + ), + auxiliary_head=dict(in_channels=1024, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00008, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/convnext/convnext.yml b/configs/convnext/convnext.yml new file mode 100644 index 0000000000..2162e0c50c --- /dev/null +++ b/configs/convnext/convnext.yml @@ -0,0 +1,133 @@ +Models: +- Name: convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 50.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,512) + Training Memory (GB): 4.23 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.11 + mIoU(ms+flip): 46.62 + Config: configs/convnext/convnext-tiny_upernet_8xb2-amp-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth +- Name: convnext-small_upernet_8xb2-amp-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 65.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,512) + Training Memory (GB): 5.16 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.56 + mIoU(ms+flip): 49.02 + Config: configs/convnext/convnext-small_upernet_8xb2-amp-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth +- Name: convnext-base_upernet_8xb2-amp-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 69.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,512) + Training Memory (GB): 6.33 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.71 + mIoU(ms+flip): 49.54 + Config: configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth +- Name: convnext-base_upernet_8xb2-amp-160k_ade20k-640x640 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-B + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 91.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (640,640) + Training Memory (GB): 8.53 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.13 + mIoU(ms+flip): 52.66 + Config: configs/convnext/convnext-base_upernet_8xb2-amp-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859-9280e39b.pth +- Name: convnext-large_upernet_8xb2-amp-160k_ade20k-640x640 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-L + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 130.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (640,640) + Training Memory (GB): 12.08 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.16 + mIoU(ms+flip): 53.38 + Config: configs/convnext/convnext-large_upernet_8xb2-amp-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth +- Name: convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640 + In Collection: UPerNet + Metadata: + backbone: ConvNeXt-XL + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 157.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (640,640) + Training Memory (GB): 26.16 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.58 + mIoU(ms+flip): 54.11 + Config: configs/convnext/convnext-xlarge_upernet_8xb2-amp-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth diff --git a/configs/danet/README.md b/configs/danet/README.md index 5550890de4..52059e93a5 100644 --- a/configs/danet/README.md +++ b/configs/danet/README.md @@ -1,7 +1,30 @@ -# Dual Attention Network for Scene Segmentation +# DANet + +[Dual Attention Network for Scene Segmentation](https://arxiv.org/abs/1809.02983) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we address the scene segmentation task by capturing rich contextual dependencies based on the selfattention mechanism. Unlike previous works that capture contexts by multi-scale features fusion, we propose a Dual Attention Networks (DANet) to adaptively integrate local features with their global dependencies. Specifically, we append two types of attention modules on top of traditional dilated FCN, which model the semantic interdependencies in spatial and channel dimensions respectively. The position attention module selectively aggregates the features at each position by a weighted sum of the features at all positions. Similar features would be related to each other regardless of their distances. Meanwhile, the channel attention module selectively emphasizes interdependent channel maps by integrating associated features among all channel maps. We sum the outputs of the two attention modules to further improve feature representation which contributes to more precise segmentation results. We achieve new state-of-the-art segmentation performance on three challenging scene segmentation datasets, i.e., Cityscapes, PASCAL Context and COCO Stuff dataset. In particular, a Mean IoU score of 81.5% on Cityscapes test set is achieved without using coarse data. We make the code and trained model publicly available at [this https URL](https://github.com/junfu1115/DANet). + + + +
+ +
+ +## Citation + +```bibtex @article{fu2018dual, title={Dual Attention Network for Scene Segmentation}, author={Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu}, @@ -13,29 +36,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DANet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.66 | 78.74 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json) | -| DANet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.99 | 80.52 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json) | -| DANet | R-50-D8 | 769x769 | 40000 | 8.8 | 1.56 | 78.88 | 80.62 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json) | -| DANet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.07 | 79.88 | 81.47 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json) | -| DANet | R-50-D8 | 512x1024 | 80000 | - | - | 79.34 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json) | -| DANet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json) | -| DANet | R-50-D8 | 769x769 | 80000 | - | - | 79.27 | 80.96 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json) | -| DANet | R-101-D8 | 769x769 | 80000 | - | - | 80.47 | 82.02 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DANet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.66 | 78.74 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json) | +| DANet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.99 | 80.52 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json) | +| DANet | R-50-D8 | 769x769 | 40000 | 8.8 | 1.56 | 78.88 | 80.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json) | +| DANet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.07 | 79.88 | 81.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json) | +| DANet | R-50-D8 | 512x1024 | 80000 | - | - | 79.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json) | +| DANet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json) | +| DANet | R-50-D8 | 769x769 | 80000 | - | - | 79.27 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json) | +| DANet | R-101-D8 | 769x769 | 80000 | - | - | 80.47 | 82.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DANet | R-50-D8 | 512x512 | 80000 | 11.5 | 21.20 | 41.66 | 42.90 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json) | -| DANet | R-101-D8 | 512x512 | 80000 | 15 | 14.18 | 43.64 | 45.19 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json) | -| DANet | R-50-D8 | 512x512 | 160000 | - | - | 42.45 | 43.25 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json) | -| DANet | R-101-D8 | 512x512 | 160000 | - | - | 44.17 | 45.02 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DANet | R-50-D8 | 512x512 | 80000 | 11.5 | 21.20 | 41.66 | 42.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json) | +| DANet | R-101-D8 | 512x512 | 80000 | 15 | 14.18 | 43.64 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json) | +| DANet | R-50-D8 | 512x512 | 160000 | - | - | 42.45 | 43.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json) | +| DANet | R-101-D8 | 512x512 | 160000 | - | - | 44.17 | 45.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DANet | R-50-D8 | 512x512 | 20000 | 6.5 | 20.94 | 74.45 | 75.69 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json) | -| DANet | R-101-D8 | 512x512 | 20000 | 9.9 | 13.76 | 76.02 | 77.23 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json) | -| DANet | R-50-D8 | 512x512 | 40000 | - | - | 76.37 | 77.29 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json) | -| DANet | R-101-D8 | 512x512 | 40000 | - | - | 76.51 | 77.32 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DANet | R-50-D8 | 512x512 | 20000 | 6.5 | 20.94 | 74.45 | 75.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json) | +| DANet | R-101-D8 | 512x512 | 20000 | 9.9 | 13.76 | 76.02 | 77.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json) | +| DANet | R-50-D8 | 512x512 | 40000 | - | - | 76.37 | 77.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json) | +| DANet | R-101-D8 | 512x512 | 40000 | - | - | 76.51 | 77.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/danet/danet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json) | diff --git a/configs/danet/danet.yml b/configs/danet/danet.yml new file mode 100644 index 0000000000..2a6658e428 --- /dev/null +++ b/configs/danet/danet.yml @@ -0,0 +1,301 @@ +Collections: +- Name: DANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1809.02983 + Title: Dual Attention Network for Scene Segmentation + README: configs/danet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/da_head.py#L76 + Version: v0.17.0 + Converted From: + Code: https://github.com/junfu1115/DANet/ +Models: +- Name: danet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.74 + Config: configs/danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth +- Name: danet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 502.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.52 + Config: configs/danet/danet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth +- Name: danet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 641.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.88 + mIoU(ms+flip): 80.62 + Config: configs/danet/danet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth +- Name: danet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 934.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.88 + mIoU(ms+flip): 81.47 + Config: configs/danet/danet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth +- Name: danet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.34 + Config: configs/danet/danet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth +- Name: danet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.41 + Config: configs/danet/danet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth +- Name: danet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.27 + mIoU(ms+flip): 80.96 + Config: configs/danet/danet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth +- Name: danet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.47 + mIoU(ms+flip): 82.02 + Config: configs/danet/danet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth +- Name: danet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.17 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.66 + mIoU(ms+flip): 42.9 + Config: configs/danet/danet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth +- Name: danet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 15.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.64 + mIoU(ms+flip): 45.19 + Config: configs/danet/danet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth +- Name: danet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.45 + mIoU(ms+flip): 43.25 + Config: configs/danet/danet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth +- Name: danet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.17 + mIoU(ms+flip): 45.02 + Config: configs/danet/danet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth +- Name: danet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.45 + mIoU(ms+flip): 75.69 + Config: configs/danet/danet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth +- Name: danet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.02 + mIoU(ms+flip): 77.23 + Config: configs/danet/danet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth +- Name: danet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.37 + mIoU(ms+flip): 77.29 + Config: configs/danet/danet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth +- Name: danet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.51 + mIoU(ms+flip): 77.32 + Config: configs/danet/danet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth diff --git a/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4602f3318f --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..a08c18ee46 --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..98b1c6490b --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..9affe306cb --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/danet/danet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..0079ad65e8 --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/danet/danet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..48444514b7 --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/danet/danet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..2f2df7a595 --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/danet/danet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..dd75bc16b8 --- /dev/null +++ b/configs/danet/danet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py b/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 3bfb9bdb30..0000000000 --- a/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py b/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index d80b2ec160..0000000000 --- a/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x512_160k_ade20k.py b/configs/danet/danet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 0f22d0fb63..0000000000 --- a/configs/danet/danet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py b/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 709f93cba3..0000000000 --- a/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py b/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 5c623eb568..0000000000 --- a/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_512x512_80k_ade20k.py b/configs/danet/danet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index bd31bc8f28..0000000000 --- a/configs/danet/danet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py b/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 597d76de79..0000000000 --- a/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py b/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 70f9b31966..0000000000 --- a/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './danet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..3bc2a7792d --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..3a01fb9eb5 --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..95d5df09cb --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..42557164da --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/danet/danet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/danet/danet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..a8f082d1ea --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/danet/danet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/danet/danet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..fab574fa5b --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/danet/danet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/danet/danet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..148fa39d72 --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/danet/danet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/danet/danet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..efbd908a92 --- /dev/null +++ b/configs/danet/danet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py b/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 1b70c5b8d4..0000000000 --- a/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py b/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 03734310d7..0000000000 --- a/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/danet/danet_r50-d8_512x512_160k_ade20k.py b/configs/danet/danet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 22aaf857c3..0000000000 --- a/configs/danet/danet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py b/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 010f86f1aa..0000000000 --- a/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py b/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 0cef0f09bf..0000000000 --- a/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/danet/danet_r50-d8_512x512_80k_ade20k.py b/configs/danet/danet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 154e84890e..0000000000 --- a/configs/danet/danet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py b/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index b8fba930a8..0000000000 --- a/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py b/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 8b8915d856..0000000000 --- a/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/deeplabv3/README.md b/configs/deeplabv3/README.md index 37e2ee6baa..4c80ffaf91 100644 --- a/configs/deeplabv3/README.md +++ b/configs/deeplabv3/README.md @@ -1,7 +1,30 @@ -# Rethinking atrous convolution for semantic image segmentation +# DeepLabV3 + +[Rethinking atrous convolution for semantic image segmentation](https://arxiv.org/abs/1706.05587) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this work, we revisit atrous convolution, a powerful tool to explicitly adjust filter's field-of-view as well as control the resolution of feature responses computed by Deep Convolutional Neural Networks, in the application of semantic image segmentation. To handle the problem of segmenting objects at multiple scales, we design modules which employ atrous convolution in cascade or in parallel to capture multi-scale context by adopting multiple atrous rates. Furthermore, we propose to augment our previously proposed Atrous Spatial Pyramid Pooling module, which probes convolutional features at multiple scales, with image-level features encoding global context and further boost performance. We also elaborate on implementation details and share our experience on training our system. The proposed \`DeepLabv3' system significantly improves over our previous DeepLab versions without DenseCRF post-processing and attains comparable performance with other state-of-art models on the PASCAL VOC 2012 semantic image segmentation benchmark. + + + +
+ +
+ +## Citation + +```bibtext @article{chen2017rethinking, title={Rethinking atrous convolution for semantic image segmentation}, author={Chen, Liang-Chieh and Papandreou, George and Schroff, Florian and Adam, Hartwig}, @@ -12,32 +35,83 @@ ## Results and models -Note: `D-8` here corresponding to the output stride 8 setting for DeepLab series. - ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|-----------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3 | R-50-D8 | 512x1024 | 40000 | 6.1 | 2.57 | 79.09 | 80.45 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449.log.json) | -| DeepLabV3 | R-101-D8 | 512x1024 | 40000 | 9.6 | 1.92 | 77.12 | 79.61 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241.log.json) | -| DeepLabV3 | R-50-D8 | 769x769 | 40000 | 6.9 | 1.11 | 78.58 | 79.89 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723.log.json) | -| DeepLabV3 | R-101-D8 | 769x769 | 40000 | 10.9 | 0.83 | 79.27 | 80.11 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809.log.json) | -| DeepLabV3 | R-50-D8 | 512x1024 | 80000 | - | - | 79.32 | 80.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404.log.json) | -| DeepLabV3 | R-101-D8 | 512x1024 | 80000 | - | - | 80.20 | 81.21 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503.log.json) | -| DeepLabV3 | R-50-D8 | 769x769 | 80000 | - | - | 79.89 | 81.06 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338.log.json) | -| DeepLabV3 | R-101-D8 | 769x769 | 80000 | - | - | 79.67 | 80.81 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3 | R-50-D8 | 512x1024 | 40000 | 6.1 | 2.57 | 79.09 | 80.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449.log.json) | +| DeepLabV3 | R-101-D8 | 512x1024 | 40000 | 9.6 | 1.92 | 77.12 | 79.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241.log.json) | +| DeepLabV3 | R-50-D8 | 769x769 | 40000 | 6.9 | 1.11 | 78.58 | 79.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723.log.json) | +| DeepLabV3 | R-101-D8 | 769x769 | 40000 | 10.9 | 0.83 | 79.27 | 80.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809.log.json) | +| DeepLabV3 | R-18-D8 | 512x1024 | 80000 | 1.7 | 13.78 | 76.70 | 78.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes-20201225_021506.log.json) | +| DeepLabV3 | R-50-D8 | 512x1024 | 80000 | - | - | 79.32 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404.log.json) | +| DeepLabV3 | R-101-D8 | 512x1024 | 80000 | - | - | 80.20 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503.log.json) | +| DeepLabV3 (FP16) | R-101-D8 | 512x1024 | 80000 | 5.75 | 3.86 | 80.48 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-774d9cec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920.log.json) | +| DeepLabV3 | R-18-D8 | 769x769 | 80000 | 1.9 | 5.55 | 76.60 | 78.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes-20201225_021506.log.json) | +| DeepLabV3 | R-50-D8 | 769x769 | 80000 | - | - | 79.89 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338.log.json) | +| DeepLabV3 | R-101-D8 | 769x769 | 80000 | - | - | 79.67 | 80.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353.log.json) | +| DeepLabV3 | R-101-D16-MG124 | 512x1024 | 40000 | 4.7 | - 6.96 | 76.71 | 78.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-67b0c992.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) | +| DeepLabV3 | R-101-D16-MG124 | 512x1024 | 80000 | - | - | 78.36 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) | +| DeepLabV3 | R-18b-D8 | 512x1024 | 80000 | 1.6 | 13.93 | 76.26 | 77.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes-20201225_094144.log.json) | +| DeepLabV3 | R-50b-D8 | 512x1024 | 80000 | 6.0 | 2.74 | 79.63 | 80.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes-20201225_155148.log.json) | +| DeepLabV3 | R-101b-D8 | 512x1024 | 80000 | 9.5 | 1.81 | 80.01 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes-20201226_171821.log.json) | +| DeepLabV3 | R-18b-D8 | 769x769 | 80000 | 1.8 | 5.79 | 75.63 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes-20201225_094144.log.json) | +| DeepLabV3 | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.16 | 78.80 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes-20201225_155404.log.json) | +| DeepLabV3 | R-101b-D8 | 769x769 | 80000 | 10.7 | 0.82 | 79.41 | 80.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes-20201226_190843.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|-----------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3 | R-50-D8 | 512x512 | 80000 | 8.9 | 14.76 | 42.42 | 43.28 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | -| DeepLabV3 | R-101-D8 | 512x512 | 80000 | 12.4 | 10.14 | 44.08 | 45.19 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256.log.json) | -| DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 42.66 | 44.09 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227.log.json) | -| DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 45.00 | 46.66 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 80000 | 8.9 | 14.76 | 42.42 | 43.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 80000 | 12.4 | 10.14 | 44.08 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 42.66 | 44.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 45.00 | 46.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|-----------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3 | R-50-D8 | 512x512 | 20000 | 6.1 | 13.88 | 76.17 | 77.42 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906.log.json) | -| DeepLabV3 | R-101-D8 | 512x512 | 20000 | 9.6 | 9.81 | 78.70 | 79.95 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932.log.json) | -| DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 77.68 | 78.78 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546.log.json) | -| DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 77.92 | 79.18 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 20000 | 6.1 | 13.88 | 76.17 | 77.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 20000 | 9.6 | 9.81 | 78.70 | 79.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 77.68 | 78.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 77.92 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-101-D8 | 480x480 | 40000 | 9.2 | 7.09 | 46.55 | 47.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context-20200911_204118.log.json) | +| DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 46.42 | 47.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context-20200911_170155.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-101-D8 | 480x480 | 40000 | - | - | 52.61 | 54.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59-20210416_110332.log.json) | +| DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 52.46 | 54.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59-20210416_113002.log.json) | + +### COCO-Stuff 10k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 20000 | 9.6 | 10.8 | 34.66 | 36.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-b35f789d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 20000 | 13.2 | 8.7 | 37.30 | 38.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-c49752cb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 35.73 | 37.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-dc76f3ff.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 37.81 | 38.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-636cb433.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 80000 | 9.6 | 10.8 | 39.38 | 40.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016-88675c24.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 80000 | 13.2 | 8.7 | 40.87 | 41.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252-13600dc2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 41.09 | 41.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016-49f2812b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 41.82 | 42.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402-f035acfd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 320000 | - | - | 41.37 | 42.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403-51b21115.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 320000 | - | - | 42.61 | 43.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402.log.json) | + +Note: + +- `D-8` here corresponding to the output stride 8 setting for DeepLab series. +- `FP16` means Mixed Precision (FP16) is adopted in training. diff --git a/configs/deeplabv3/deeplabv3.yml b/configs/deeplabv3/deeplabv3.yml new file mode 100644 index 0000000000..6196212992 --- /dev/null +++ b/configs/deeplabv3/deeplabv3.yml @@ -0,0 +1,756 @@ +Collections: +- Name: DeepLabV3 + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - COCO-Stuff 10k + - COCO-Stuff 164k + Paper: + URL: https://arxiv.org/abs/1706.05587 + Title: Rethinking atrous convolution for semantic image segmentation + README: configs/deeplabv3/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/aspp_head.py#L54 + Version: v0.17.0 + Converted From: + Code: https://github.com/tensorflow/models/tree/master/research/deeplab +Models: +- Name: deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 389.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.09 + mIoU(ms+flip): 80.45 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth +- Name: deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 520.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.12 + mIoU(ms+flip): 79.61 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth +- Name: deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 900.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.58 + mIoU(ms+flip): 79.89 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth +- Name: deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1204.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.27 + mIoU(ms+flip): 80.11 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth +- Name: deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 72.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.7 + mIoU(ms+flip): 78.27 + Config: configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth +- Name: deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.32 + mIoU(ms+flip): 80.57 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth +- Name: deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.2 + mIoU(ms+flip): 81.21 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth +- Name: deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 259.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,1024) + Training Memory (GB): 5.75 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.48 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-774d9cec.pth +- Name: deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 180.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.6 + mIoU(ms+flip): 78.26 + Config: configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth +- Name: deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.89 + mIoU(ms+flip): 81.06 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth +- Name: deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.67 + mIoU(ms+flip): 80.81 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth +- Name: deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.36 + mIoU(ms+flip): 79.84 + Config: configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth +- Name: deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 71.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.26 + mIoU(ms+flip): 77.88 + Config: configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth +- Name: deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 364.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.63 + mIoU(ms+flip): 80.98 + Config: configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth +- Name: deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 552.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.01 + mIoU(ms+flip): 81.21 + Config: configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth +- Name: deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 172.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.63 + mIoU(ms+flip): 77.51 + Config: configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth +- Name: deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 862.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.27 + Config: configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth +- Name: deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3 + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 1219.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.73 + Config: configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth +- Name: deeplabv3_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.42 + mIoU(ms+flip): 43.28 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth +- Name: deeplabv3_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 98.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.08 + mIoU(ms+flip): 45.19 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth +- Name: deeplabv3_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.66 + mIoU(ms+flip): 44.09 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth +- Name: deeplabv3_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.0 + mIoU(ms+flip): 46.66 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth +- Name: deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.17 + mIoU(ms+flip): 77.42 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth +- Name: deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 101.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 79.95 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth +- Name: deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.68 + mIoU(ms+flip): 78.78 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth +- Name: deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.92 + mIoU(ms+flip): 79.18 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth +- Name: deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 141.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.55 + mIoU(ms+flip): 47.81 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth +- Name: deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.42 + mIoU(ms+flip): 47.53 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth +- Name: deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.61 + mIoU(ms+flip): 54.28 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth +- Name: deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.46 + mIoU(ms+flip): 54.09 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth +- Name: deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 34.66 + mIoU(ms+flip): 36.08 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-b35f789d.pth +- Name: deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.3 + mIoU(ms+flip): 38.42 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-c49752cb.pth +- Name: deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 35.73 + mIoU(ms+flip): 37.09 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-dc76f3ff.pth +- Name: deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.81 + mIoU(ms+flip): 38.8 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-636cb433.pth +- Name: deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 39.38 + mIoU(ms+flip): 40.03 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016-88675c24.pth +- Name: deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.87 + mIoU(ms+flip): 41.5 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252-13600dc2.pth +- Name: deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.09 + mIoU(ms+flip): 41.69 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016-49f2812b.pth +- Name: deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.82 + mIoU(ms+flip): 42.49 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402-f035acfd.pth +- Name: deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.37 + mIoU(ms+flip): 42.22 + Config: configs/deeplabv3/deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403-51b21115.pth +- Name: deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 42.61 + mIoU(ms+flip): 43.42 + Config: configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth diff --git a/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b9f3c178df --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..da3a88f998 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..d01803ce1f --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..7964b51446 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..1d1a6201a0 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..78205468d7 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..84174166ce --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = './deeplabv3_r101-d8_4xb2-40k_cityscapes-512x1024.py' +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=optimizer, + loss_scale=512.) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..0ed6eee833 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..add008345f --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..349cc88f0a --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_coco-stuff10k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..1c527e0c53 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..ea27bedc04 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..a43a786e0e --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_coco-stuff10k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..8879d5394f --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-40k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..54671d4dc6 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-40k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..1b2635d1c2 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..b7bb0b6448 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..2d4f6f747b --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..9d64ca29fe --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-80k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..54671d4dc6 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb4-40k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py b/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 8c707c79d6..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py b/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 6804a57813..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py b/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index df6f36ef7c..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py b/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 40f5f62373..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py b/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index fb2be22f8b..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py b/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 796ba3fb14..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py b/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index e6d58a67b3..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py b/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 13094a98ee..0000000000 --- a/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..708932da85 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..a0f634d081 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r101b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bc353bb564 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..021c98c376 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..c747cd74a2 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..6506abf696 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r18b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4a2a971eb9 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..a52f29e4ce --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..1bd29b96e1 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..27f0fc4cae --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..04e15f0f0f --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..ba76a59419 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..d0559c8bfc --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_coco-stuff10k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..c5458d908b --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..c3b4f94f8c --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-320k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_320k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..40dbffad45 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_coco-stuff10k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-480x480.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..3c4e753a2d --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..e3b6c36909 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..8333cc6701 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..0bcdab5149 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..519df5a23b --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-480x480.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..ba8c7ded96 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..d34bd89339 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py b/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 8e7420d24a..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py b/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 132787db98..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py b/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 742e17d749..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py b/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index f62da1a809..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py b/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 492bd3dfdc..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py b/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 5ddef212f7..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py b/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index fb067d2117..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py b/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 8b8692140b..0000000000 --- a/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..818519f263 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..07a234be75 --- /dev/null +++ b/configs/deeplabv3/deeplabv3_r50b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/deeplabv3plus/README.md b/configs/deeplabv3plus/README.md index 591554daea..b3d3ce7678 100644 --- a/configs/deeplabv3plus/README.md +++ b/configs/deeplabv3plus/README.md @@ -1,7 +1,30 @@ -# Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation +# DeepLabV3+ + +[Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Spatial pyramid pooling module or encode-decoder structure are used in deep neural networks for semantic segmentation task. The former networks are able to encode multi-scale contextual information by probing the incoming features with filters or pooling operations at multiple rates and multiple effective fields-of-view, while the latter networks can capture sharper object boundaries by gradually recovering the spatial information. In this work, we propose to combine the advantages from both methods. Specifically, our proposed model, DeepLabv3+, extends DeepLabv3 by adding a simple yet effective decoder module to refine the segmentation results especially along object boundaries. We further explore the Xception model and apply the depthwise separable convolution to both Atrous Spatial Pyramid Pooling and decoder modules, resulting in a faster and stronger encoder-decoder network. We demonstrate the effectiveness of the proposed model on PASCAL VOC 2012 and Cityscapes datasets, achieving the test set performance of 89.0% and 82.1% without any post-processing. Our paper is accompanied with a publicly available reference implementation of the proposed models in Tensorflow at [this https URL](https://github.com/tensorflow/models/tree/master/research/deeplab). + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{deeplabv3plus2018, title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, @@ -12,32 +35,98 @@ ## Results and models -Note: `D-8` here corresponding to the output stride 8 setting for DeepLab series. - ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|------------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3+ | R-50-D8 | 512x1024 | 40000 | 7.5 | 3.94 | 79.61 | 81.01 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json) | -| DeepLabV3+ | R-101-D8 | 512x1024 | 40000 | 11 | 2.60 | 80.21 | 81.82 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614.log.json) | -| DeepLabV3+ | R-50-D8 | 769x769 | 40000 | 8.5 | 1.72 | 78.97 | 80.46 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json) | -| DeepLabV3+ | R-101-D8 | 769x769 | 40000 | 12.5 | 1.15 | 79.46 | 80.50 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304.log.json) | -| DeepLabV3+ | R-50-D8 | 512x1024 | 80000 | - | - | 80.09 | 81.13 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049.log.json) | -| DeepLabV3+ | R-101-D8 | 512x1024 | 80000 | - | - | 80.97 | 82.03 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143.log.json) | -| DeepLabV3+ | R-50-D8 | 769x769 | 80000 | - | - | 79.83 | 81.48 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233.log.json) | -| DeepLabV3+ | R-101-D8 | 769x769 | 80000 | - | - | 80.98 | 82.18 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-50-D8 | 512x1024 | 40000 | 7.5 | 3.94 | 79.61 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json) | +| DeepLabV3+ | R-101-D8 | 512x1024 | 40000 | 11 | 2.60 | 80.21 | 81.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614.log.json) | +| DeepLabV3+ | R-50-D8 | 769x769 | 40000 | 8.5 | 1.72 | 78.97 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json) | +| DeepLabV3+ | R-101-D8 | 769x769 | 40000 | 12.5 | 1.15 | 79.46 | 80.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304.log.json) | +| DeepLabV3+ | R-18-D8 | 512x1024 | 80000 | 2.2 | 14.27 | 76.89 | 78.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes-20201226_080942.log.json) | +| DeepLabV3+ | R-50-D8 | 512x1024 | 80000 | - | - | 80.09 | 81.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049.log.json) | +| DeepLabV3+ | R-101-D8 | 512x1024 | 80000 | - | - | 80.97 | 82.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143.log.json) | +| DeepLabV3+ (FP16) | R-101-D8 | 512x1024 | 80000 | 6.35 | 7.87 | 80.46 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920.log.json) | +| DeepLabV3+ | R-18-D8 | 769x769 | 80000 | 2.5 | 5.74 | 76.26 | 77.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes-20201226_083346.log.json) | +| DeepLabV3+ | R-50-D8 | 769x769 | 80000 | - | - | 79.83 | 81.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233.log.json) | +| DeepLabV3+ | R-101-D8 | 769x769 | 80000 | - | - | 80.65 | 81.47 | [config\[1\]](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20220406_154720-dfcc0b68.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20220406_154720.log.json) | +| DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 40000 | 5.8 | 7.48 | 79.09 | 80.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/ddeeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) | +| DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 80000 | 9.9 | - | 79.90 | 81.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-ee6158e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) | +| DeepLabV3+ | R-18b-D8 | 512x1024 | 80000 | 2.1 | 14.95 | 75.87 | 77.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes-20201226_090828.log.json) | +| DeepLabV3+ | R-50b-D8 | 512x1024 | 80000 | 7.4 | 3.94 | 80.28 | 81.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes_20201225_213645-a97e4e43.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes-20201225_213645.log.json) | +| DeepLabV3+ | R-101b-D8 | 512x1024 | 80000 | 10.9 | 2.60 | 80.16 | 81.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes_20201226_190843-9c3c93a4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes-20201226_190843.log.json) | +| DeepLabV3+ | R-18b-D8 | 769x769 | 80000 | 2.4 | 5.96 | 76.36 | 78.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes-20201226_151312.log.json) | +| DeepLabV3+ | R-50b-D8 | 769x769 | 80000 | 8.4 | 1.72 | 79.41 | 80.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes_20201225_224655-8b596d1c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes-20201225_224655.log.json) | +| DeepLabV3+ | R-101b-D8 | 769x769 | 80000 | 12.3 | 1.10 | 79.88 | 81.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes_20201226_205041-227cdf7c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes-20201226_205041.log.json) | + +\[1\] The training of the model is sensitive to random seed, and the seed to train it is 1111. ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|------------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 10.6 | 21.01 | 42.72 | 43.75 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | -| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 14.1 | 14.16 | 44.60 | 46.06 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139.log.json) | -| DeepLabV3+ | R-50-D8 | 512x512 | 160000 | - | - | 43.95 | 44.93 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504.log.json) | -| DeepLabV3+ | R-101-D8 | 512x512 | 160000 | - | - | 45.47 | 46.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232.log.json) | - -#### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|------------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DeepLabV3+ | R-50-D8 | 512x512 | 20000 | 7.6 | 21 | 75.93 | 77.50 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323.log.json) | -| DeepLabV3+ | R-101-D8 | 512x512 | 20000 | 11 | 13.88 | 77.22 | 78.59 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345.log.json) | -| DeepLabV3+ | R-50-D8 | 512x512 | 40000 | - | - | 76.81 | 77.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759.log.json) | -| DeepLabV3+ | R-101-D8 | 512x512 | 40000 | - | - | 78.62 | 79.53 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 10.6 | 21.01 | 42.72 | 43.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 14.1 | 14.16 | 44.60 | 46.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 160000 | - | - | 43.95 | 44.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 160000 | - | - | 45.47 | 46.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-50-D8 | 512x512 | 20000 | 7.6 | 21 | 75.93 | 77.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 20000 | 11 | 13.88 | 77.22 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 40000 | - | - | 76.81 | 77.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 40000 | - | - | 78.62 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | 9.09 | 47.30 | 48.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context_20200911_165459-d3c8a29e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context-20200911_165459.log.json) | +| DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 47.23 | 48.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context_20200911_155322-145d3ee8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context-20200911_155322.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | - | 52.86 | 54.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59_20210416_111233-ed937f15.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59-20210416_111233.log.json) | +| DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 53.2 | 54.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59_20210416_111127-7ca0331d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59-20210416_111127.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.93 | 25.57 | 50.28 | 50.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800-ce0fa0ca.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.37 | 6.00 | 50.99 | 50.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442-f0720392.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.84 | 4.33 | 51.47 | 51.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759-4c1f297e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.91 | 81.68 | 77.09 | 78.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601-75fd5bc3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.36 | 26.44 | 78.33 | 79.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508-7e7a2b24.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.83 | 17.56 | 78.7 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508-8b112708.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.91 | 72.79 | 72.50 | 74.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805-7626a263.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.36 | 26.91 | 73.97 | 75.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816-5040938d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.83 | 18.59 | 73.06 | 74.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816-8a095afa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-18-D8 | 896x896 | 80000 | 6.19 | 24.81 | 61.35 | 62.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526-7059991d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | +| DeepLabV3+ | R-50-D8 | 896x896 | 80000 | 21.45 | 8.42 | 67.06 | 68.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | + +Note: + +- `D-8`/`D-16` here corresponding to the output stride 8/16 setting for DeepLab series. +- `MG-124` stands for multi-grid dilation in the last stage of ResNet. +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) diff --git a/configs/deeplabv3plus/deeplabv3plus.yml b/configs/deeplabv3plus/deeplabv3plus.yml new file mode 100644 index 0000000000..755c1fd4be --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus.yml @@ -0,0 +1,850 @@ +Collections: +- Name: DeepLabV3+ + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - LoveDA + - Potsdam + - Vaihingen + - iSAID + Paper: + URL: https://arxiv.org/abs/1802.02611 + Title: Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation + README: configs/deeplabv3plus/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/sep_aspp_head.py#L30 + Version: v0.17.0 + Converted From: + Code: https://github.com/tensorflow/models/tree/master/research/deeplab +Models: +- Name: deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 253.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.61 + mIoU(ms+flip): 81.01 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth +- Name: deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 384.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.21 + mIoU(ms+flip): 81.82 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth +- Name: deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 581.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.97 + mIoU(ms+flip): 80.46 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth +- Name: deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.46 + mIoU(ms+flip): 80.5 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth +- Name: deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 70.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.89 + mIoU(ms+flip): 78.76 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth +- Name: deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.09 + mIoU(ms+flip): 81.13 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth +- Name: deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.97 + mIoU(ms+flip): 82.03 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth +- Name: deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 127.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,1024) + Training Memory (GB): 6.35 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.46 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth +- Name: deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 174.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 2.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.26 + mIoU(ms+flip): 77.91 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth +- Name: deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.83 + mIoU(ms+flip): 81.48 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth +- Name: deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.65 + mIoU(ms+flip): 81.47 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20220406_154720-dfcc0b68.pth +- Name: deeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 133.69 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.09 + mIoU(ms+flip): 80.36 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth +- Name: deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.9 + mIoU(ms+flip): 81.33 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-ee6158e0.pth +- Name: deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 66.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.87 + mIoU(ms+flip): 77.52 + Config: configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth +- Name: deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 253.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.28 + mIoU(ms+flip): 81.44 + Config: configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes_20201225_213645-a97e4e43.pth +- Name: deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 384.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.16 + mIoU(ms+flip): 81.41 + Config: configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes_20201226_190843-9c3c93a4.pth +- Name: deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 167.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 2.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.36 + mIoU(ms+flip): 78.24 + Config: configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth +- Name: deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 581.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.56 + Config: configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes_20201225_224655-8b596d1c.pth +- Name: deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 909.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.88 + mIoU(ms+flip): 81.46 + Config: configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes_20201226_205041-227cdf7c.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.72 + mIoU(ms+flip): 43.75 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth +- Name: deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.6 + mIoU(ms+flip): 46.06 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth +- Name: deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.95 + mIoU(ms+flip): 44.93 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth +- Name: deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.47 + mIoU(ms+flip): 46.35 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth +- Name: deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.93 + mIoU(ms+flip): 77.5 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth +- Name: deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.22 + mIoU(ms+flip): 78.59 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth +- Name: deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.81 + mIoU(ms+flip): 77.57 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth +- Name: deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.62 + mIoU(ms+flip): 79.53 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth +- Name: deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 110.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 47.3 + mIoU(ms+flip): 48.47 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context_20200911_165459-d3c8a29e.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 47.23 + mIoU(ms+flip): 48.26 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context_20200911_155322-145d3ee8.pth +- Name: deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.86 + mIoU(ms+flip): 54.54 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59_20210416_111233-ed937f15.pth +- Name: deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 53.2 + mIoU(ms+flip): 54.67 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59_20210416_111127-7ca0331d.pth +- Name: deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 39.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.93 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.28 + mIoU(ms+flip): 50.47 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800-ce0fa0ca.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 166.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.37 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.99 + mIoU(ms+flip): 50.65 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442-f0720392.pth +- Name: deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 230.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.84 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.47 + mIoU(ms+flip): 51.32 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759-4c1f297e.pth +- Name: deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 12.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.91 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.09 + mIoU(ms+flip): 78.44 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601-75fd5bc3.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.36 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.33 + mIoU(ms+flip): 79.27 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508-7e7a2b24.pth +- Name: deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 56.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.83 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 79.47 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508-8b112708.pth +- Name: deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 13.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.91 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.5 + mIoU(ms+flip): 74.13 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805-7626a263.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.36 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 73.97 + mIoU(ms+flip): 75.05 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816-5040938d.pth +- Name: deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 53.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.83 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 73.06 + mIoU(ms+flip): 74.14 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816-8a095afa.pth +- Name: deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896 + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 40.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 6.19 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 61.35 + mIoU(ms+flip): 62.61 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526-7059991d.pth +- Name: deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896 + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 118.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 21.45 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 67.06 + mIoU(ms+flip): 68.02 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..71c9118e1d --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7d1ccf0b30 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..884b526d48 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..debb0255fc --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bc9334e67d --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..4af9aa2682 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..9c9883dc4f --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = './deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py' +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=optimizer, + loss_scale=512.) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..c38a802e10 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..97bb827722 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..e4b401162d --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..eeefae4927 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-40k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..0755c53aae --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..844ac9613b --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..87c6da9d6a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..115b1c9058 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..9aaa653822 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..5063b1332c --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..b99c2c7ee0 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index d6ce85aea5..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 0ebbd3c70e..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index a75c9d3019..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index ebb1a8eaee..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 3caa6cf8ae..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 53fd3a9095..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index c3c92eb26f..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 5ea9cdb5b6..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..d1bcb09144 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..c78fc1e209 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5f54913e94 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..1b361d6d7a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..3a1a753b26 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_isaid-896x896.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..01bbf9bca9 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..134f2cfc2a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..2194838510 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..ea86219692 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..34ee7ed3df --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r18b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..32f994d9b3 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..8cdf534ef4 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0d249b065a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..863a46e1b3 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9a899fb830 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..1876d0ccf4 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..95b56d03ee --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..459c62dc50 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..0d61b509dc --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..6f872cacf7 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..7edec14bf8 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_isaid-896x896.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (896, 896) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=16), + auxiliary_head=dict(num_classes=16)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..64e262cf88 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=7), + auxiliary_head=dict(num_classes=7)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..5ff7fcb41e --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..84aaf25b52 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..5810d6bece --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/potsdam.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=6), + auxiliary_head=dict(num_classes=6)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..a7f4b2d27a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/vaihingen.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=6), + auxiliary_head=dict(num_classes=6)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 7243d0390f..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 3304d3677f..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index e734880956..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 1056ad4d1e..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index e36c83ba60..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 8705972631..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 4fcc062ca8..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py b/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index e0bfa94576..0000000000 --- a/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3plus_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..3e2813534d --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769.py b/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..6366bd4e3a --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r50b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/dmnet/README.md b/configs/dmnet/README.md new file mode 100644 index 0000000000..535740ddd3 --- /dev/null +++ b/configs/dmnet/README.md @@ -0,0 +1,59 @@ +# DMNet + +[Dynamic Multi-scale Filters for Semantic Segmentation](https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Multi-scale representation provides an effective way toaddress scale variation of objects and stuff in semantic seg-mentation. Previous works construct multi-scale represen-tation by utilizing different filter sizes, expanding filter sizeswith dilated filters or pooling grids, and the parameters ofthese filters are fixed after training. These methods oftensuffer from heavy computational cost or have more param-eters, and are not adaptive to the input image during in-ference. To address these problems, this paper proposes aDynamic Multi-scale Network (DMNet) to adaptively cap-ture multi-scale contents for predicting pixel-level semanticlabels. DMNet is composed of multiple Dynamic Convolu-tional Modules (DCMs) arranged in parallel, each of whichexploits context-aware filters to estimate semantic represen-tation for a specific scale. The outputs of multiple DCMsare further integrated for final segmentation. We conductextensive experiments to evaluate our DMNet on three chal-lenging semantic segmentation and scene parsing datasets,PASCAL VOC 2012, Pascal-Context, and ADE20K. DMNetachieves a new record 84.4% mIoU on PASCAL VOC 2012test set without MS COCO pre-trained and post-processing,and also obtains state-of-the-art performance on Pascal-Context and ADE20K. + + + +
+ +
+ +## Citation + +```bibtex +@InProceedings{He_2019_ICCV, +author = {He, Junjun and Deng, Zhongying and Qiao, Yu}, +title = {Dynamic Multi-Scale Filters for Semantic Segmentation}, +booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, +month = {October}, +year = {2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DMNet | R-50-D8 | 512x1024 | 40000 | 7.0 | 3.66 | 77.78 | 79.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes-20201215_042326.log.json) | +| DMNet | R-101-D8 | 512x1024 | 40000 | 10.6 | 2.54 | 78.37 | 79.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes-20201215_043100.log.json) | +| DMNet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.57 | 78.49 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes-20201215_093706.log.json) | +| DMNet | R-101-D8 | 769x769 | 40000 | 12.0 | 1.01 | 77.62 | 78.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes-20201215_081348.log.json) | +| DMNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.07 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes-20201215_053728.log.json) | +| DMNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes-20201215_031718.log.json) | +| DMNet | R-50-D8 | 769x769 | 80000 | - | - | 79.22 | 80.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes-20201215_034006.log.json) | +| DMNet | R-101-D8 | 769x769 | 80000 | - | - | 79.19 | 80.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes-20201215_082810.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DMNet | R-50-D8 | 512x512 | 80000 | 9.4 | 20.95 | 42.37 | 43.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k-20201215_144744.log.json) | +| DMNet | R-101-D8 | 512x512 | 80000 | 13.0 | 13.88 | 45.34 | 46.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k-20201215_104812.log.json) | +| DMNet | R-50-D8 | 512x512 | 160000 | - | - | 43.15 | 44.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k-20201215_115313.log.json) | +| DMNet | R-101-D8 | 512x512 | 160000 | - | - | 45.42 | 46.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dmnet/dmnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k-20201215_111145.log.json) | diff --git a/configs/dmnet/dmnet.yml b/configs/dmnet/dmnet.yml new file mode 100644 index 0000000000..dfb80ba7e1 --- /dev/null +++ b/configs/dmnet/dmnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: DMNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf + Title: Dynamic Multi-scale Filters for Semantic Segmentation + README: configs/dmnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dm_head.py#L93 + Version: v0.17.0 + Converted From: + Code: https://github.com/Junjun2016/DMNet +Models: +- Name: dmnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 273.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.78 + mIoU(ms+flip): 79.14 + Config: configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth +- Name: dmnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 393.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.37 + mIoU(ms+flip): 79.72 + Config: configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth +- Name: dmnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 636.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.49 + mIoU(ms+flip): 80.27 + Config: configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth +- Name: dmnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 990.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.62 + mIoU(ms+flip): 78.94 + Config: configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth +- Name: dmnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.07 + mIoU(ms+flip): 80.22 + Config: configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth +- Name: dmnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.64 + mIoU(ms+flip): 80.67 + Config: configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth +- Name: dmnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.22 + mIoU(ms+flip): 80.55 + Config: configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth +- Name: dmnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.19 + mIoU(ms+flip): 80.65 + Config: configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth +- Name: dmnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.37 + mIoU(ms+flip): 43.62 + Config: configs/dmnet/dmnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth +- Name: dmnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.34 + mIoU(ms+flip): 46.13 + Config: configs/dmnet/dmnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth +- Name: dmnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.15 + mIoU(ms+flip): 44.17 + Config: configs/dmnet/dmnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth +- Name: dmnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.42 + mIoU(ms+flip): 46.76 + Config: configs/dmnet/dmnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth diff --git a/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..9832b62a29 --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..03346c5d9b --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..fd7e9acd1c --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..2205e601ce --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/dmnet/dmnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..23e215bf2f --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/dmnet/dmnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..5c25587e64 --- /dev/null +++ b/configs/dmnet/dmnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..aa86b01398 --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..8c2dbf31bd --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bc2160634b --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..e32ae71765 --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/dmnet/dmnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/dmnet/dmnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..71d0a046ba --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/dmnet/dmnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/dmnet/dmnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..727bed0ea1 --- /dev/null +++ b/configs/dmnet/dmnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/dnlnet/README.md b/configs/dnlnet/README.md new file mode 100644 index 0000000000..ab24549ed6 --- /dev/null +++ b/configs/dnlnet/README.md @@ -0,0 +1,62 @@ +# DNLNet + +[Disentangled Non-Local Neural Networks](https://arxiv.org/abs/2006.06668) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The non-local block is a popular module for strengthening the context modeling ability of a regular convolutional neural network. This paper first studies the non-local block in depth, where we find that its attention computation can be split into two terms, a whitened pairwise term accounting for the relationship between two pixels and a unary term representing the saliency of every pixel. We also observe that the two terms trained alone tend to model different visual clues, e.g. the whitened pairwise term learns within-region relationships while the unary term learns salient boundaries. However, the two terms are tightly coupled in the non-local block, which hinders the learning of each. Based on these findings, we present the disentangled non-local block, where the two terms are decoupled to facilitate learning for both terms. We demonstrate the effectiveness of the decoupled design on various tasks, such as semantic segmentation on Cityscapes, ADE20K and PASCAL Context, object detection on COCO, and action recognition on Kinetics. + + + +
+ +
+ +## Citation + +This example is to reproduce ["Disentangled Non-Local Neural Networks"](https://arxiv.org/abs/2006.06668) for semantic segmentation. It is still in progress. + +## Citation + +```bibtex +@misc{yin2020disentangled, + title={Disentangled Non-Local Neural Networks}, + author={Minghao Yin and Zhuliang Yao and Yue Cao and Xiu Li and Zheng Zhang and Stephen Lin and Han Hu}, + year={2020}, + booktitle={ECCV} +} +``` + +## Results and models (in progress) + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DNLNet | R-50-D8 | 512x1024 | 40000 | 7.3 | 2.56 | 78.61 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.96 | 78.31 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-50-D8 | 769x769 | 40000 | 9.2 | 1.50 | 78.44 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes-20200820_232206.log.json) | +| DNLNet | R-101-D8 | 769x769 | 40000 | 12.6 | 1.02 | 76.39 | 77.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes-20200820_171256.log.json) | +| DNLNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.33 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-50-D8 | 769x769 | 80000 | - | - | 79.36 | 80.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes-20200820_011925.log.json) | +| DNLNet | R-101-D8 | 769x769 | 80000 | - | - | 79.41 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes-20200821_051111.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DNLNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.66 | 41.76 | 42.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k-20200826_183354.log.json) | +| DNLNet | R-101-D8 | 512x512 | 80000 | 12.8 | 12.54 | 43.76 | 44.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k-20200826_183354.log.json) | +| DNLNet | R-50-D8 | 512x512 | 160000 | - | - | 41.87 | 43.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k-20200826_183350.log.json) | +| DNLNet | R-101-D8 | 512x512 | 160000 | - | - | 44.25 | 45.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dnlnet/dnl_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k-20200826_183350.log.json) | diff --git a/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..310d84e574 --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..a94dbb89b3 --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f9b6d5ee3d --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..9c7d557d02 --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/dnlnet/dnl_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..1edc26fd8c --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/dnlnet/dnl_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..d29c17ef5b --- /dev/null +++ b/configs/dnlnet/dnl_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..be389927ce --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..9eaaa63ef2 --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..2e431783ad --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..cb379c1e08 --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) + +optim_wrapper = dict( + paramwise_cfg=dict( + custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.)))) diff --git a/configs/dnlnet/dnl_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/dnlnet/dnl_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..b2ae2a85da --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/dnlnet/dnl_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/dnlnet/dnl_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..f310a4ebab --- /dev/null +++ b/configs/dnlnet/dnl_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/dnlnet/dnlnet.yml b/configs/dnlnet/dnlnet.yml new file mode 100644 index 0000000000..ae65dbbaca --- /dev/null +++ b/configs/dnlnet/dnlnet.yml @@ -0,0 +1,228 @@ +Collections: +- Name: DNLNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/2006.06668 + Title: Disentangled Non-Local Neural Networks + README: configs/dnlnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dnl_head.py#L88 + Version: v0.17.0 + Converted From: + Code: https://github.com/yinmh17/DNL-Semantic-Segmentation +Models: +- Name: dnl_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 390.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.61 + Config: configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth +- Name: dnl_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 510.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.31 + Config: configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth +- Name: dnl_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 666.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.44 + mIoU(ms+flip): 80.27 + Config: configs/dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth +- Name: dnl_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 980.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.39 + mIoU(ms+flip): 77.77 + Config: configs/dnlnet/dnl_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth +- Name: dnl_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.33 + Config: configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth +- Name: dnl_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.41 + Config: configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth +- Name: dnl_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.36 + mIoU(ms+flip): 80.7 + Config: configs/dnlnet/dnl_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth +- Name: dnl_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.68 + Config: configs/dnlnet/dnl_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth +- Name: dnl_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 48.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.76 + mIoU(ms+flip): 42.99 + Config: configs/dnlnet/dnl_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth +- Name: dnl_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 79.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.76 + mIoU(ms+flip): 44.91 + Config: configs/dnlnet/dnl_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth +- Name: dnl_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.87 + mIoU(ms+flip): 43.01 + Config: configs/dnlnet/dnl_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth +- Name: dnl_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.25 + mIoU(ms+flip): 45.78 + Config: configs/dnlnet/dnl_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth diff --git a/configs/dpt/README.md b/configs/dpt/README.md new file mode 100644 index 0000000000..41d73ea57a --- /dev/null +++ b/configs/dpt/README.md @@ -0,0 +1,67 @@ +# DPT + +[Vision Transformer for Dense Prediction](https://arxiv.org/abs/2103.13413) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We introduce dense vision transformers, an architecture that leverages vision transformers in place of convolutional networks as a backbone for dense prediction tasks. We assemble tokens from various stages of the vision transformer into image-like representations at various resolutions and progressively combine them into full-resolution predictions using a convolutional decoder. The transformer backbone processes representations at a constant and relatively high resolution and has a global receptive field at every stage. These properties allow the dense vision transformer to provide finer-grained and more globally coherent predictions when compared to fully-convolutional networks. Our experiments show that this architecture yields substantial improvements on dense prediction tasks, especially when a large amount of training data is available. For monocular depth estimation, we observe an improvement of up to 28% in relative performance when compared to a state-of-the-art fully-convolutional network. When applied to semantic segmentation, dense vision transformers set a new state of the art on ADE20K with 49.02% mIoU. We further show that the architecture can be fine-tuned on smaller datasets such as NYUv2, KITTI, and Pascal Context where it also sets the new state of the art. Our models are available at [this https URL](https://github.com/isl-org/DPT). + + + +
+ +
+ +## Citation + +```bibtex +@article{dosoViTskiy2020, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={DosoViTskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, + journal={arXiv preprint arXiv:2010.11929}, + year={2020} +} + +@article{Ranftl2021, + author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, + title = {Vision Transformers for Dense Prediction}, + journal = {ArXiv preprint}, + year = {2021}, +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`vit2mmseg.py`](../../tools/model_converters/vit2mmseg.py) in the tools directory to convert the key of models from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) to MMSegmentation style. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth pretrain/jx_vit_base_p16_224-80ecf9dd.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DPT | ViT-B | 512x512 | 160000 | 8.09 | 10.41 | 46.97 | 48.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-20210809_172025.log.json) | diff --git a/configs/dpt/dpt.yml b/configs/dpt/dpt.yml new file mode 100644 index 0000000000..32324d3459 --- /dev/null +++ b/configs/dpt/dpt.yml @@ -0,0 +1,37 @@ +Collections: +- Name: DPT + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2103.13413 + Title: Vision Transformer for Dense Prediction + README: configs/dpt/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215 + Version: v0.17.0 + Converted From: + Code: https://github.com/isl-org/DPT +Models: +- Name: dpt_vit-b16_8xb2-160k_ade20k-512x512 + In Collection: DPT + Metadata: + backbone: ViT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 96.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.09 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.97 + mIoU(ms+flip): 48.34 + Config: configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth diff --git a/configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py b/configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..56b33d96b9 --- /dev/null +++ b/configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/dpt_vit-b16.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/emanet/README.md b/configs/emanet/README.md new file mode 100644 index 0000000000..5a9bfc326a --- /dev/null +++ b/configs/emanet/README.md @@ -0,0 +1,46 @@ +# EMANet + +[Expectation-Maximization Attention Networks for Semantic Segmentation](https://arxiv.org/abs/1907.13426) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Self-attention mechanism has been widely used for various tasks. It is designed to compute the representation of each position by a weighted sum of the features at all positions. Thus, it can capture long-range relations for computer vision tasks. However, it is computationally consuming. Since the attention maps are computed w.r.t all other positions. In this paper, we formulate the attention mechanism into an expectation-maximization manner and iteratively estimate a much more compact set of bases upon which the attention maps are computed. By a weighted summation upon these bases, the resulting representation is low-rank and deprecates noisy information from the input. The proposed Expectation-Maximization Attention (EMA) module is robust to the variance of input and is also friendly in memory and computation. Moreover, we set up the bases maintenance and normalization methods to stabilize its training procedure. We conduct extensive experiments on popular semantic segmentation benchmarks including PASCAL VOC, PASCAL Context and COCO Stuff, on which we set new records. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{li2019expectation, + title={Expectation-maximization attention networks for semantic segmentation}, + author={Li, Xia and Zhong, Zhisheng and Wu, Jianlong and Yang, Yibo and Lin, Zhouchen and Liu, Hong}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={9167--9176}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| EMANet | R-50-D8 | 512x1024 | 80000 | 5.4 | 4.58 | 77.59 | 79.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/eemanet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-101-D8 | 512x1024 | 80000 | 6.2 | 2.87 | 79.10 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-50-D8 | 769x769 | 80000 | 8.9 | 1.97 | 79.33 | 80.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-101-D8 | 769x769 | 80000 | 10.1 | 1.22 | 79.62 | 81.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes-20200901_100301.log.json) | diff --git a/configs/emanet/emanet.yml b/configs/emanet/emanet.yml new file mode 100644 index 0000000000..ac194f2a0f --- /dev/null +++ b/configs/emanet/emanet.yml @@ -0,0 +1,103 @@ +Collections: +- Name: EMANet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1907.13426 + Title: Expectation-Maximization Attention Networks for Semantic Segmentation + README: configs/emanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ema_head.py#L80 + Version: v0.17.0 + Converted From: + Code: https://xialipku.github.io/EMANet +Models: +- Name: emanet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: EMANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.59 + mIoU(ms+flip): 79.44 + Config: configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth +- Name: emanet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: EMANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 348.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.1 + mIoU(ms+flip): 81.21 + Config: configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth +- Name: emanet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: EMANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 507.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.33 + mIoU(ms+flip): 80.49 + Config: configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth +- Name: emanet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: EMANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 819.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.62 + mIoU(ms+flip): 81.0 + Config: configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth diff --git a/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..ee3a3b5167 --- /dev/null +++ b/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..7319a3e4b6 --- /dev/null +++ b/configs/emanet/emanet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './emanet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6198e1f9a2 --- /dev/null +++ b/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..a8e4521b07 --- /dev/null +++ b/configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/emanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/encnet/README.md b/configs/encnet/README.md index 9f1edde82a..7be0c6d926 100644 --- a/configs/encnet/README.md +++ b/configs/encnet/README.md @@ -1,7 +1,30 @@ -# Context Encoding for Semantic Segmentation +# EncNet + +[Context Encoding for Semantic Segmentation](https://arxiv.org/abs/1803.08904) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-the-art results 51.7% mIoU on PASCAL-Context, 85.9% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpass the winning entry of COCO-Place Challenge in 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45%, which is comparable with state-of-the-art approaches with over 10 times more layers. The source code for the complete system are publicly available. + + + +
+ +
+ +## Citation + +```bibtex @InProceedings{Zhang_2018_CVPR, author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit}, title = {Context Encoding for Semantic Segmentation}, @@ -14,21 +37,23 @@ year = {2018} ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| encnet | R-50-D8 | 512x1024 | 40000 | 8.6 | 4.58 | 75.67 | 77.08 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes-20200621_220958.log.json) | -| encnet | R-101-D8 | 512x1024 | 40000 | 12.1 | 2.66 | 75.81 | 77.21 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes-20200621_220933.log.json) | -| encnet | R-50-D8 | 769x769 | 40000 | 9.8 | 1.82 | 76.24 | 77.85 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes-20200621_220958.log.json) | -| encnet | R-101-D8 | 769x769 | 40000 | 13.7 | 1.26 | 74.25 | 76.25 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes-20200621_220933.log.json) | -| encnet | R-50-D8 | 512x1024 | 80000 | - | - | 77.94 | 79.13 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes-20200622_003554.log.json) | -| encnet | R-101-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.47 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes-20200622_003555.log.json) | -| encnet | R-50-D8 | 769x769 | 80000 | - | - | 77.44 | 78.72 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes-20200622_003554.log.json) | -| encnet | R-101-D8 | 769x769 | 80000 | - | - | 76.10 | 76.97 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes-20200622_003555.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| EncNet | R-50-D8 | 512x1024 | 40000 | 8.6 | 4.58 | 75.67 | 77.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes-20200621_220958.log.json) | +| EncNet | R-101-D8 | 512x1024 | 40000 | 12.1 | 2.66 | 75.81 | 77.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes-20200621_220933.log.json) | +| EncNet | R-50-D8 | 769x769 | 40000 | 9.8 | 1.82 | 76.24 | 77.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes-20200621_220958.log.json) | +| EncNet | R-101-D8 | 769x769 | 40000 | 13.7 | 1.26 | 74.25 | 76.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes-20200621_220933.log.json) | +| EncNet | R-50-D8 | 512x1024 | 80000 | - | - | 77.94 | 79.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes-20200622_003554.log.json) | +| EncNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes-20200622_003555.log.json) | +| EncNet | R-50-D8 | 769x769 | 80000 | - | - | 77.44 | 78.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes-20200622_003554.log.json) | +| EncNet | R-101-D8 | 769x769 | 80000 | - | - | 76.10 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes-20200622_003555.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| encnet | R-50-D8 | 512x512 | 80000 | 10.1 | 22.81 | 39.53 | 41.17 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k-20200622_042412.log.json) | -| encnet | R-101-D8 | 512x512 | 80000 | 13.6 | 14.87 | 42.11 | 43.61 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k-20200622_101128.log.json) | -| encnet | R-50-D8 | 512x512 | 160000 | - | - | 40.10 | 41.71 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k-20200622_101059.log.json) | -| encnet | R-101-D8 | 512x512 | 160000 | - | - | 42.61 | 44.01 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k-20200622_073348.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| EncNet | R-50-D8 | 512x512 | 80000 | 10.1 | 22.81 | 39.53 | 41.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k-20200622_042412.log.json) | +| EncNet | R-101-D8 | 512x512 | 80000 | 13.6 | 14.87 | 42.11 | 43.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k-20200622_101128.log.json) | +| EncNet | R-50-D8 | 512x512 | 160000 | - | - | 40.10 | 41.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k-20200622_101059.log.json) | +| EncNet | R-101-D8 | 512x512 | 160000 | - | - | 42.61 | 44.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet/encnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k-20200622_073348.log.json) | diff --git a/configs/encnet/encnet.yml b/configs/encnet/encnet.yml new file mode 100644 index 0000000000..bea147b286 --- /dev/null +++ b/configs/encnet/encnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: EncNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1803.08904 + Title: Context Encoding for Semantic Segmentation + README: configs/encnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/enc_head.py#L63 + Version: v0.17.0 + Converted From: + Code: https://github.com/zhanghang1989/PyTorch-Encoding +Models: +- Name: encnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.67 + mIoU(ms+flip): 77.08 + Config: configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth +- Name: encnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 12.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.81 + mIoU(ms+flip): 77.21 + Config: configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth +- Name: encnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 549.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 9.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 77.85 + Config: configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth +- Name: encnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 793.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 13.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.25 + mIoU(ms+flip): 76.25 + Config: configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth +- Name: encnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.94 + mIoU(ms+flip): 79.13 + Config: configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth +- Name: encnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.55 + mIoU(ms+flip): 79.47 + Config: configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth +- Name: encnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.44 + mIoU(ms+flip): 78.72 + Config: configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth +- Name: encnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.1 + mIoU(ms+flip): 76.97 + Config: configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth +- Name: encnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 43.84 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.53 + mIoU(ms+flip): 41.17 + Config: configs/encnet/encnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth +- Name: encnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.11 + mIoU(ms+flip): 43.61 + Config: configs/encnet/encnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth +- Name: encnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.1 + mIoU(ms+flip): 41.71 + Config: configs/encnet/encnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth +- Name: encnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.61 + mIoU(ms+flip): 44.01 + Config: configs/encnet/encnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth diff --git a/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..13ab367be5 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..7810ac440d --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bec6bd907d --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..e1f6409e63 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/encnet/encnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9599f9c0d3 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/encnet/encnet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..a9edfc28a2 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/encnet/encnet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..d2fbab59e3 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/encnet/encnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..debe8c8331 --- /dev/null +++ b/configs/encnet/encnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py b/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index f34373d9eb..0000000000 --- a/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py b/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 0b0207b314..0000000000 --- a/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py b/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 8fec6ba255..0000000000 --- a/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py b/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index c264af998b..0000000000 --- a/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py b/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 8a6968ea58..0000000000 --- a/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py b/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 94151004ea..0000000000 --- a/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py b/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index d6ade67b76..0000000000 --- a/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py b/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 55648c08b2..0000000000 --- a/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..d5c3027a89 --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..045d0feb0c --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4dafcd5b7d --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..e4d0b8045e --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/encnet/encnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/encnet/encnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..b916798062 --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/encnet/encnet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/encnet/encnet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..e5c917158d --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/encnet/encnet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/encnet/encnet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..8ca126ab02 --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/encnet/encnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/encnet/encnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..931d6c019b --- /dev/null +++ b/configs/encnet/encnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py b/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 4ea6ed0e84..0000000000 --- a/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py b/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index d2feeef7e9..0000000000 --- a/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py b/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 2a5dc203cc..0000000000 --- a/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py b/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 9cb7952ced..0000000000 --- a/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py b/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 81f3cbfbf5..0000000000 --- a/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py b/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 835375cb04..0000000000 --- a/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py b/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 9f44b425d4..0000000000 --- a/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py b/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index aac7f2d443..0000000000 --- a/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/encnet/encnet_r50s-d8_4xb4-80k_ade20k-512x512.py b/configs/encnet/encnet_r50s-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..e98104dbaf --- /dev/null +++ b/configs/encnet/encnet_r50s-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(stem_channels=128), + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py b/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py deleted file mode 100644 index 600b701a71..0000000000 --- a/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - backbone=dict(stem_channels=128), - decode_head=dict(num_classes=150), - auxiliary_head=dict(num_classes=150)) diff --git a/configs/erfnet/README.md b/configs/erfnet/README.md new file mode 100644 index 0000000000..44e4f51c91 --- /dev/null +++ b/configs/erfnet/README.md @@ -0,0 +1,52 @@ +# ERFNet + +[ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation](http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic segmentation is a challenging task that addresses most of the perception needs of intelligent vehicles (IVs) in an unified way. Deep neural networks excel at this task, as they can be trained end-to-end to accurately classify multiple object categories in an image at pixel level. However, a good tradeoff between high quality and computational resources is yet not present in the state-of-the-art semantic segmentation approaches, limiting their application in real vehicles. In this paper, we propose a deep architecture that is able to run in real time while providing accurate semantic segmentation. The core of our architecture is a novel layer that uses residual connections and factorized convolutions in order to remain efficient while retaining remarkable accuracy. Our approach is able to run at over 83 FPS in a single Titan X, and 7 FPS in a Jetson TX1 (embedded device). A comprehensive set of experiments on the publicly available Cityscapes data set demonstrates that our system achieves an accuracy that is similar to the state of the art, while being orders of magnitude faster to compute than other architectures that achieve top precision. The resulting tradeoff makes our model an ideal approach for scene understanding in IV applications. The code is publicly available at: https://github.com/Eromera/erfnet. + + + +
+ +
+ +## Citation + +```bibtex +@article{romera2017erfnet, + title={Erfnet: Efficient residual factorized convnet for real-time semantic segmentation}, + author={Romera, Eduardo and Alvarez, Jos{\'e} M and Bergasa, Luis M and Arroyo, Roberto}, + journal={IEEE Transactions on Intelligent Transportation Systems}, + volume={19}, + number={1}, + pages={263--272}, + year={2017}, + publisher={IEEE} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| ERFNet | ERFNet | 512x1024 | 160000 | 6.04 | 15.26 | 71.08 | 72.6 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056-03d333ed.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056.log.json) | + +Note: + +- The model is trained from scratch. + +- Last deconvolution layer in the [original paper](https://github.com/Eromera/erfnet_pytorch/blob/master/train/erfnet.py#L123) is replaced by a naive `FCNHead` decoder head and a bilinear upsampling layer, found more effective and efficient. diff --git a/configs/erfnet/erfnet.yml b/configs/erfnet/erfnet.yml new file mode 100644 index 0000000000..aeb454cb50 --- /dev/null +++ b/configs/erfnet/erfnet.yml @@ -0,0 +1,37 @@ +Collections: +- Name: ERFNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf + Title: 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation' + README: configs/erfnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/erfnet.py#L321 + Version: v0.20.0 + Converted From: + Code: https://github.com/Eromera/erfnet_pytorch +Models: +- Name: erfnet_fcn_4xb4-160k_cityscapes-512x1024 + In Collection: ERFNet + Metadata: + backbone: ERFNet + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 65.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.04 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.08 + mIoU(ms+flip): 72.6 + Config: configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056-03d333ed.pth diff --git a/configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py b/configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7d65582798 --- /dev/null +++ b/configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/erfnet_fcn.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/fastfcn/README.md b/configs/fastfcn/README.md new file mode 100644 index 0000000000..feedfa33a8 --- /dev/null +++ b/configs/fastfcn/README.md @@ -0,0 +1,63 @@ +# FastFCN + +[FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation](https://arxiv.org/abs/1903.11816) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Modern approaches for semantic segmentation usually employ dilated convolutions in the backbone to extract high-resolution feature maps, which brings heavy computation complexity and memory footprint. To replace the time and memory consuming dilated convolutions, we propose a novel joint upsampling module named Joint Pyramid Upsampling (JPU) by formulating the task of extracting high-resolution feature maps into a joint upsampling problem. With the proposed JPU, our method reduces the computation complexity by more than three times without performance loss. Experiments show that JPU is superior to other upsampling modules, which can be plugged into many existing approaches to reduce computation complexity and improve performance. By replacing dilated convolutions with the proposed JPU module, our method achieves the state-of-the-art performance in Pascal Context dataset (mIoU of 53.13%) and ADE20K dataset (final score of 0.5584) while running 3 times faster. + + + +
+ +
+ +## Citation + +```bibtex +@article{wu2019fastfcn, +title={Fastfcn: Rethinking dilated convolution in the backbone for semantic segmentation}, +author={Wu, Huikai and Zhang, Junge and Huang, Kaiqi and Liang, Kongming and Yu, Yizhou}, +journal={arXiv preprint arXiv:1903.11816}, +year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 80000 | 5.67 | 2.64 | 79.12 | 80.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722.log.json) | +| FastFCN + DeepLabV3 (4x4) | R-50-D32 | 512x1024 | 80000 | 9.79 | - | 79.52 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 80000 | 5.67 | 4.40 | 79.26 | 80.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722.log.json) | +| FastFCN + PSPNet (4x4) | R-50-D32 | 512x1024 | 80000 | 9.94 | - | 78.76 | 80.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 80000 | 8.15 | 4.77 | 77.97 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036.log.json) | +| FastFCN + EncNet (4x4) | R-50-D32 | 512x1024 | 80000 | 15.45 | - | 78.6 | 80.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 80000 | 8.46 | 12.06 | 41.88 | 42.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619.log.json) | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 160000 | - | - | 43.58 | 44.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 80000 | 8.02 | 19.21 | 41.40 | 42.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 160000 | - | - | 42.63 | 43.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 80000 | 9.67 | 17.23 | 40.88 | 42.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 160000 | - | - | 42.50 | 44.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456.log.json) | + +Note: + +- `4x4` means 4 GPUs with 4 samples per GPU in training, default setting is 4 GPUs with 2 samples per GPU in training. +- Results of [DeepLabV3 (mIoU: 79.32)](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/deeplabv3), [PSPNet (mIoU: 78.55)](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet) and [ENCNet (mIoU: 77.94)](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/encnet) can be found in each original repository. diff --git a/configs/fastfcn/fastfcn.yml b/configs/fastfcn/fastfcn.yml new file mode 100644 index 0000000000..6b4d3bc121 --- /dev/null +++ b/configs/fastfcn/fastfcn.yml @@ -0,0 +1,235 @@ +Collections: +- Name: FastFCN + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1903.11816 + Title: 'FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation' + README: configs/fastfcn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/jpu.py#L12 + Version: v0.18.0 + Converted From: + Code: https://github.com/wuhuikai/FastFCN +Models: +- Name: fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 378.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.67 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.12 + mIoU(ms+flip): 80.58 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth +- Name: fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.79 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.52 + mIoU(ms+flip): 80.91 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth +- Name: fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.67 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.26 + mIoU(ms+flip): 80.86 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth +- Name: fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.94 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.76 + mIoU(ms+flip): 80.03 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth +- Name: fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 209.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.15 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.97 + mIoU(ms+flip): 79.92 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth +- Name: fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 15.45 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.6 + mIoU(ms+flip): 80.25 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth +- Name: fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 82.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.46 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.88 + mIoU(ms+flip): 42.91 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth +- Name: fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.58 + mIoU(ms+flip): 44.92 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth +- Name: fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 52.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.02 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.4 + mIoU(ms+flip): 42.12 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth +- Name: fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.63 + mIoU(ms+flip): 43.71 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth +- Name: fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 58.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.67 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.88 + mIoU(ms+flip): 42.36 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth +- Name: fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512 + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.5 + mIoU(ms+flip): 44.21 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..39e6e236b7 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..1913544cfb --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..751689599d --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a8c5dc3232 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb4-80k_cityscapes-512x1024.py @@ -0,0 +1,5 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py' +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4840dd0287 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..619d0862f1 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..a76b026b6a --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6df1527272 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4xb4-80k_cityscapes-512x1024.py @@ -0,0 +1,5 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_enc_4xb2-80k_cityscapes-512x1024.py' +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..dc5c54d553 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..887ace1d87 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..3981e20a47 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_cityscapes-512x1024.py b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..2c7d504160 --- /dev/null +++ b/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4xb4-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/fastscnn/README.md b/configs/fastscnn/README.md new file mode 100644 index 0000000000..3e06903ae5 --- /dev/null +++ b/configs/fastscnn/README.md @@ -0,0 +1,42 @@ +# Fast-SCNN + +[Fast-SCNN for Semantic Segmentation](https://arxiv.org/abs/1902.04502) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The encoder-decoder framework is state-of-the-art for offline semantic image segmentation. Since the rise in autonomous systems, real-time computation is increasingly desirable. In this paper, we introduce fast segmentation convolutional neural network (Fast-SCNN), an above real-time semantic segmentation model on high resolution image data (1024x2048px) suited to efficient computation on embedded devices with low memory. Building on existing two-branch methods for fast segmentation, we introduce our \`learning to downsample' module which computes low-level features for multiple resolution branches simultaneously. Our network combines spatial detail at high resolution with deep features extracted at lower resolution, yielding an accuracy of 68.0% mean intersection over union at 123.5 frames per second on Cityscapes. We also show that large scale pre-training is unnecessary. We thoroughly validate our metric in experiments with ImageNet pre-training and the coarse labeled data of Cityscapes. Finally, we show even faster computation with competitive results on subsampled inputs, without any network modifications. + + + +
+ +
+ +## Citation + +```bibtex +@article{poudel2019fast, + title={Fast-scnn: Fast semantic segmentation network}, + author={Poudel, Rudra PK and Liwicki, Stephan and Cipolla, Roberto}, + journal={arXiv preprint arXiv:1902.04502}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FastSCNN | FastSCNN | 512x1024 | 160000 | 3.3 | 56.45 | 70.96 | 72.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853.log.json) | diff --git a/configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py b/configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..e7f68bfe73 --- /dev/null +++ b/configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +# Re-config the data sampler. +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader + +# Re-config the optimizer. +optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/fastscnn/fastscnn.yml b/configs/fastscnn/fastscnn.yml new file mode 100644 index 0000000000..13215c2fb1 --- /dev/null +++ b/configs/fastscnn/fastscnn.yml @@ -0,0 +1,35 @@ +Collections: +- Name: FastSCNN + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1902.04502 + Title: Fast-SCNN for Semantic Segmentation + README: configs/fastscnn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/fast_scnn.py#L272 + Version: v0.17.0 +Models: +- Name: fast_scnn_8xb4-160k_cityscapes-512x1024 + In Collection: FastSCNN + Metadata: + backbone: FastSCNN + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 17.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.96 + mIoU(ms+flip): 72.65 + Config: configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth diff --git a/configs/fcn/README.md b/configs/fcn/README.md index 6ec2080123..4b887f19e4 100644 --- a/configs/fcn/README.md +++ b/configs/fcn/README.md @@ -1,7 +1,30 @@ -# Fully Convolutional Networks for Semantic Segmentation +# FCN + +[Fully Convolutional Networks for Semantic Segmentation](https://arxiv.org/abs/1411.4038) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Convolutional networks are powerful visual models that yield hierarchies of features. We show that convolutional networks by themselves, trained end-to-end, pixels-to-pixels, exceed the state-of-the-art in semantic segmentation. Our key insight is to build "fully convolutional" networks that take input of arbitrary size and produce correspondingly-sized output with efficient inference and learning. We define and detail the space of fully convolutional networks, explain their application to spatially dense prediction tasks, and draw connections to prior models. We adapt contemporary classification networks (AlexNet, the VGG net, and GoogLeNet) into fully convolutional networks and transfer their learned representations by fine-tuning to the segmentation task. We then define a novel architecture that combines semantic information from a deep, coarse layer with appearance information from a shallow, fine layer to produce accurate and detailed segmentations. Our fully convolutional network achieves state-of-the-art segmentation of PASCAL VOC (20% relative improvement to 62.2% mean IU on 2012), NYUDv2, and SIFT Flow, while inference takes one third of a second for a typical image. + + + +
+ +
+ +## Citation + +```bibtex @article{shelhamer2017fully, title={Fully convolutional networks for semantic segmentation}, author={Shelhamer, Evan and Long, Jonathan and Darrell, Trevor}, @@ -17,29 +40,72 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) | -| FCN | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.66 | 75.45 | 76.58 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852.log.json) | -| FCN | R-50-D8 | 769x769 | 40000 | 6.5 | 1.80 | 71.47 | 72.54 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104.log.json) | -| FCN | R-101-D8 | 769x769 | 40000 | 10.4 | 1.19 | 73.93 | 75.14 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208.log.json) | -| FCN | R-50-D8 | 512x1024 | 80000 | - | - | 73.61 | 74.24 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019.log.json) | -| FCN | R-101-D8 | 512x1024 | 80000 | - | - | 75.13 | 75.94 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038.log.json) | -| FCN | R-50-D8 | 769x769 | 80000 | - | - | 72.64 | 73.32 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749.log.json) | -| FCN | R-101-D8 | 769x769 | 80000 | - | - | 75.52 | 76.61 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | ---------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) | +| FCN | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.66 | 75.45 | 76.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852.log.json) | +| FCN | R-50-D8 | 769x769 | 40000 | 6.5 | 1.80 | 71.47 | 72.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104.log.json) | +| FCN | R-101-D8 | 769x769 | 40000 | 10.4 | 1.19 | 73.93 | 75.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208.log.json) | +| FCN | R-18-D8 | 512x1024 | 80000 | 1.7 | 14.65 | 71.11 | 72.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes_20201225_021327-6c50f8b4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes-20201225_021327.log.json) | +| FCN | R-50-D8 | 512x1024 | 80000 | - | | 73.61 | 74.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019.log.json) | +| FCN | R-101-D8 | 512x1024 | 80000 | - | - | 75.13 | 75.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038.log.json) | +| FCN (FP16) | R-101-D8 | 512x1024 | 80000 | 5.37 | 8.64 | 76.80 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921-fb13e883.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921.log.json) | +| FCN | R-18-D8 | 769x769 | 80000 | 1.9 | 6.40 | 70.80 | 73.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes_20201225_021451-9739d1b8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes-20201225_021451.log.json) | +| FCN | R-50-D8 | 769x769 | 80000 | - | - | 72.64 | 73.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749.log.json) | +| FCN | R-101-D8 | 769x769 | 80000 | - | - | 75.52 | 76.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354.log.json) | +| FCN | R-18b-D8 | 512x1024 | 80000 | 1.6 | 16.74 | 70.24 | 72.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes_20201225_230143-92c0f445.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes-20201225_230143.log.json) | +| FCN | R-50b-D8 | 512x1024 | 80000 | 5.6 | 4.20 | 75.65 | 77.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes_20201225_094221-82957416.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes-20201225_094221.log.json) | +| FCN | R-101b-D8 | 512x1024 | 80000 | 9.1 | 2.73 | 77.37 | 78.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes_20201226_160213-4543858f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes-20201226_160213.log.json) | +| FCN | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.70 | 69.66 | 72.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes_20201226_004430-32d504e5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes-20201226_004430.log.json) | +| FCN | R-50b-D8 | 769x769 | 80000 | 6.3 | 1.82 | 73.83 | 76.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes_20201225_094223-94552d38.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes-20201225_094223.log.json) | +| FCN | R-101b-D8 | 769x769 | 80000 | 10.3 | 1.15 | 77.02 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes_20201226_170012-82be37e2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes-20201226_170012.log.json) | +| FCN (D6) | R-50-D16 | 512x1024 | 40000 | 3.4 | 10.22 | 77.06 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes_20210305_130133-98d5d1bc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes-20210305_130133.log.json) | +| FCN (D6) | R-50-D16 | 512x1024 | 80000 | - | 10.35 | 77.27 | 78.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes_20210306_115604-133c292f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes-20210306_115604.log.json) | +| FCN (D6) | R-50-D16 | 769x769 | 40000 | 3.7 | 4.17 | 76.82 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes_20210305_185744-1aab18ed.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes-20210305_185744.log.json) | +| FCN (D6) | R-50-D16 | 769x769 | 80000 | - | 4.15 | 77.04 | 78.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes_20210305_200413-109d88eb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes-20210305_200413.log.json) | +| FCN (D6) | R-101-D16 | 512x1024 | 40000 | 4.5 | 8.04 | 77.36 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes_20210305_130337-9cf2b450.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes-20210305_130337.log.json) | +| FCN (D6) | R-101-D16 | 512x1024 | 80000 | - | 8.26 | 78.46 | 80.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes_20210308_102747-cb336445.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes-20210308_102747.log.json) | +| FCN (D6) | R-101-D16 | 769x769 | 40000 | 5.0 | 3.12 | 77.28 | 78.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes_20210308_102453-60b114e9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes-20210308_102453.log.json) | +| FCN (D6) | R-101-D16 | 769x769 | 80000 | - | 3.21 | 78.06 | 79.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes-20210306_120016.log.json) | +| FCN (D6) | R-50b-D16 | 512x1024 | 80000 | 3.2 | 10.16 | 76.99 | 79.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes/fcn_d6_r50b-d16_512x1024_80k_cityscapes_20210311_125550-6a0b62e9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_512x1024_80k_cityscapes/fcn_d6_r50b_d16_512x1024_80k_cityscapes-20210311_125550.log.json) | +| FCN (D6) | R-50b-D16 | 769x769 | 80000 | 3.6 | 4.17 | 76.86 | 78.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes/fcn_d6_r50b-d16_769x769_80k_cityscapes_20210311_131012-d665f231.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_769x769_80k_cityscapes/fcn_d6_r50b_d16_769x769_80k_cityscapes-20210311_131012.log.json) | +| FCN (D6) | R-101b-D16 | 512x1024 | 80000 | 4.3 | 8.46 | 77.72 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes/fcn_d6_r101b-d16_512x1024_80k_cityscapes_20210311_144305-3f2eb5b4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_512x1024_80k_cityscapes/fcn_d6_r101b_d16_512x1024_80k_cityscapes-20210311_144305.log.json) | +| FCN (D6) | R-101b-D16 | 769x769 | 80000 | 4.8 | 3.32 | 77.34 | 78.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes/fcn_d6_r101b-d16_769x769_80k_cityscapes_20210311_154527-c4d8bfbc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_769x769_80k_cityscapes/fcn_d6_r101b_d16_769x769_80k_cityscapes-20210311_154527.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | R-50-D8 | 512x512 | 80000 | 8.5 | 23.49 | 35.94 | 37.94 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016.log.json) | -| FCN | R-101-D8 | 512x512 | 80000 | 12 | 14.78 | 39.61 | 40.83 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143.log.json) | -| FCN | R-50-D8 | 512x512 | 160000 | - | - | 36.10 | 38.08 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713.log.json) | -| FCN | R-101-D8 | 512x512 | 160000 | - | - | 39.91 | 41.40 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x512 | 80000 | 8.5 | 23.49 | 35.94 | 37.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016.log.json) | +| FCN | R-101-D8 | 512x512 | 80000 | 12 | 14.78 | 39.61 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-80k_ade20k-512x512.pyy) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143.log.json) | +| FCN | R-50-D8 | 512x512 | 160000 | - | - | 36.10 | 38.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713.log.json) | +| FCN | R-101-D8 | 512x512 | 160000 | - | - | 39.91 | 41.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | R-50-D8 | 512x512 | 20000 | 5.7 | 23.28 | 67.08 | 69.94 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715.log.json) | -| FCN | R-101-D8 | 512x512 | 20000 | 9.2 | 14.81 | 71.16 | 73.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842.log.json) | -| FCN | R-50-D8 | 512x512 | 40000 | - | - | 66.97 | 69.04 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | -| FCN | R-101-D8 | 512x512 | 40000 | - | - | 69.91 | 72.38 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x512 | 20000 | 5.7 | 23.28 | 67.08 | 69.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715.log.json) | +| FCN | R-101-D8 | 512x512 | 20000 | 9.2 | 14.81 | 71.16 | 73.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842.log.json) | +| FCN | R-50-D8 | 512x512 | 40000 | - | - | 66.97 | 69.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | +| FCN | R-101-D8 | 512x512 | 40000 | - | - | 69.91 | 72.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-101-D8 | 480x480 | 40000 | - | 9.93 | 44.43 | 45.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context_20210421_154757-b5e97937.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context-20210421_154757.log.json) | +| FCN | R-101-D8 | 480x480 | 80000 | - | - | 44.13 | 45.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context_20210421_163310-4711813f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context-20210421_163310.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-101-D8 | 480x480 | 40000 | - | - | 48.42 | 50.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59_20210415_230724-8cf83682.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59-20210415_230724.log.json) | +| FCN | R-101-D8 | 480x480 | 80000 | - | - | 49.35 | 51.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59_20210416_110804-9a6f2c94.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59-20210416_110804.log.json) | + +Note: + +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `FCN D6` means dilation rate of convolution operator in FCN is 6. diff --git a/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..8f2cd02b00 --- /dev/null +++ b/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..4782b30377 --- /dev/null +++ b/configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5f654b4bbd --- /dev/null +++ b/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..91eca1c52e --- /dev/null +++ b/configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..62e6127799 --- /dev/null +++ b/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,4 @@ +_base_ = './fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..1b8d24799e --- /dev/null +++ b/configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,4 @@ +_base_ = './fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..9a1efb41d5 --- /dev/null +++ b/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(dilation=6), + auxiliary_head=dict(dilation=6)) diff --git a/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..2b2a6f4537 --- /dev/null +++ b/configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(align_corners=True, dilation=6), + auxiliary_head=dict(align_corners=True, dilation=6), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..e6cca006f3 --- /dev/null +++ b/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(dilation=6), + auxiliary_head=dict(dilation=6)) diff --git a/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..990ff9c58e --- /dev/null +++ b/configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(align_corners=True, dilation=6), + auxiliary_head=dict(align_corners=True, dilation=6), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7d470a50be --- /dev/null +++ b/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..e9093ea2dc --- /dev/null +++ b/configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/fcn/fcn.yml b/configs/fcn/fcn.yml new file mode 100644 index 0000000000..71c4b2d122 --- /dev/null +++ b/configs/fcn/fcn.yml @@ -0,0 +1,827 @@ +Collections: +- Name: FCN + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + Paper: + URL: https://arxiv.org/abs/1411.4038 + Title: Fully Convolutional Networks for Semantic Segmentation + README: configs/fcn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fcn_head.py#L11 + Version: v0.17.0 + Converted From: + Code: https://github.com/BVLC/caffe/wiki/Model-Zoo#fcn +Models: +- Name: fcn_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.25 + mIoU(ms+flip): 73.36 + Config: configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth +- Name: fcn_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.45 + mIoU(ms+flip): 76.58 + Config: configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth +- Name: fcn_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 555.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.47 + mIoU(ms+flip): 72.54 + Config: configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth +- Name: fcn_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 840.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.93 + mIoU(ms+flip): 75.14 + Config: configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth +- Name: fcn_r18-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 68.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.11 + mIoU(ms+flip): 72.91 + Config: configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes_20201225_021327-6c50f8b4.pth +- Name: fcn_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.61 + mIoU(ms+flip): 74.24 + Config: configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth +- Name: fcn_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.13 + mIoU(ms+flip): 75.94 + Config: configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth +- Name: fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 115.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,1024) + Training Memory (GB): 5.37 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.8 + Config: configs/fcn/fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921-fb13e883.pth +- Name: fcn_r18-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 156.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.8 + mIoU(ms+flip): 73.16 + Config: configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes_20201225_021451-9739d1b8.pth +- Name: fcn_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.64 + mIoU(ms+flip): 73.32 + Config: configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth +- Name: fcn_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.52 + mIoU(ms+flip): 76.61 + Config: configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth +- Name: fcn_r18b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 59.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.24 + mIoU(ms+flip): 72.77 + Config: configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes_20201225_230143-92c0f445.pth +- Name: fcn_r50b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.65 + mIoU(ms+flip): 77.59 + Config: configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes_20201225_094221-82957416.pth +- Name: fcn_r101b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 366.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.37 + mIoU(ms+flip): 78.77 + Config: configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes_20201226_160213-4543858f.pth +- Name: fcn_r18b-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 149.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.66 + mIoU(ms+flip): 72.07 + Config: configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes_20201226_004430-32d504e5.pth +- Name: fcn_r50b-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 549.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.83 + mIoU(ms+flip): 76.6 + Config: configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes_20201225_094223-94552d38.pth +- Name: fcn_r101b-d8_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.02 + mIoU(ms+flip): 78.67 + Config: configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes_20201226_170012-82be37e2.pth +- Name: fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 97.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.06 + mIoU(ms+flip): 78.85 + Config: configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes_20210305_130133-98d5d1bc.pth +- Name: fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 96.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.27 + mIoU(ms+flip): 78.88 + Config: configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes_20210306_115604-133c292f.pth +- Name: fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 3.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.82 + mIoU(ms+flip): 78.22 + Config: configs/fcn/fcn-d6_r50-d16_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes_20210305_185744-1aab18ed.pth +- Name: fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 240.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.04 + mIoU(ms+flip): 78.4 + Config: configs/fcn/fcn-d6_r50-d16_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes_20210305_200413-109d88eb.pth +- Name: fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 124.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.36 + mIoU(ms+flip): 79.18 + Config: configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes_20210305_130337-9cf2b450.pth +- Name: fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 121.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 80.42 + Config: configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes_20210308_102747-cb336445.pth +- Name: fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 320.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 5.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.28 + mIoU(ms+flip): 78.95 + Config: configs/fcn/fcn-d6_r101-d16_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes_20210308_102453-60b114e9.pth +- Name: fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 311.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.06 + mIoU(ms+flip): 79.58 + Config: configs/fcn/fcn-d6_r101-d16_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth +- Name: fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-50b-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 98.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.99 + mIoU(ms+flip): 79.03 + Config: configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes/fcn_d6_r50b-d16_512x1024_80k_cityscapes_20210311_125550-6a0b62e9.pth +- Name: fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-50b-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.86 + mIoU(ms+flip): 78.52 + Config: configs/fcn/fcn-d6_r50b-d16_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes/fcn_d6_r50b-d16_769x769_80k_cityscapes_20210311_131012-d665f231.pth +- Name: fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: R-101b-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 118.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.53 + Config: configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes/fcn_d6_r101b-d16_512x1024_80k_cityscapes_20210311_144305-3f2eb5b4.pth +- Name: fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769 + In Collection: FCN + Metadata: + backbone: R-101b-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 301.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 4.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.34 + mIoU(ms+flip): 78.91 + Config: configs/fcn/fcn-d6_r101b-d16_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes/fcn_d6_r101b-d16_769x769_80k_cityscapes_20210311_154527-c4d8bfbc.pth +- Name: fcn_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 35.94 + mIoU(ms+flip): 37.94 + Config: configs/fcn/fcn_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth +- Name: fcn_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.66 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.61 + mIoU(ms+flip): 40.83 + Config: configs/fcn/fcn_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth +- Name: fcn_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.1 + mIoU(ms+flip): 38.08 + Config: configs/fcn/fcn_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth +- Name: fcn_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.91 + mIoU(ms+flip): 41.4 + Config: configs/fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth +- Name: fcn_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.7 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 67.08 + mIoU(ms+flip): 69.94 + Config: configs/fcn/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth +- Name: fcn_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 67.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 71.16 + mIoU(ms+flip): 73.57 + Config: configs/fcn/fcn_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth +- Name: fcn_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 66.97 + mIoU(ms+flip): 69.04 + Config: configs/fcn/fcn_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth +- Name: fcn_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 69.91 + mIoU(ms+flip): 72.38 + Config: configs/fcn/fcn_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth +- Name: fcn_r101-d8_4xb4-40k_pascal-context-480x480 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 100.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 44.43 + mIoU(ms+flip): 45.63 + Config: configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context_20210421_154757-b5e97937.pth +- Name: fcn_r101-d8_4xb4-80k_pascal-context-480x480 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 44.13 + mIoU(ms+flip): 45.26 + Config: configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context_20210421_163310-4711813f.pth +- Name: fcn_r101-d8_4xb4-40k_pascal-context-59-480x480 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 48.42 + mIoU(ms+flip): 50.4 + Config: configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59_20210415_230724-8cf83682.pth +- Name: fcn_r101-d8_4xb4-80k_pascal-context-59-480x480 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 49.35 + mIoU(ms+flip): 51.38 + Config: configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59_20210416_110804-9a6f2c94.pth diff --git a/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b3ec0a742c --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..1f83fe2078 --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4527b3b8a0 --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..6ce112484d --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b4d94878c8 --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +_base_ = './fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py' +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005), + loss_scale=512.) diff --git a/configs/fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..b1f5c5c785 --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/fcn/fcn_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..61ee96f94e --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-480x480.py b/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..1161193adb --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-40k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..f3a6dbc9ab --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-40k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/fcn/fcn_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..b68b6e0407 --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/fcn/fcn_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..3facce30dc --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-480x480.py b/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..1161193adb --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-40k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..cebe33082a --- /dev/null +++ b/configs/fcn/fcn_r101-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb4-80k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py b/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 7918dd10d0..0000000000 --- a/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py b/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 528110dc73..0000000000 --- a/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py b/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 1bf6780f2c..0000000000 --- a/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py b/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 09a5fe5468..0000000000 --- a/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py b/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index eafefaa675..0000000000 --- a/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py b/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 6d0294530f..0000000000 --- a/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py b/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 6b4cc57129..0000000000 --- a/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py b/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 3503c76935..0000000000 --- a/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..e53751b144 --- /dev/null +++ b/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,4 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..daa6502610 --- /dev/null +++ b/configs/fcn/fcn_r101b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,4 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4073148122 --- /dev/null +++ b/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..2c1d2b6df0 --- /dev/null +++ b/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..08ab467573 --- /dev/null +++ b/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..c591ebe972 --- /dev/null +++ b/configs/fcn/fcn_r18b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4fba72333d --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..d57afe1c22 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6b1fdae809 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..8a713fd309 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/fcn/fcn_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/fcn/fcn_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..258b9fb579 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/fcn/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/fcn/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..eac86d5389 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-480x480.py b/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..d99cb0dc36 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..64c9410521 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/fcn/fcn_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/fcn/fcn_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..42edb46e94 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/fcn/fcn_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/fcn/fcn_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..099f6affa5 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-480x480.py b/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..1eeafb8a53 --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..c11a9bbb6d --- /dev/null +++ b/configs/fcn/fcn_r50-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py b/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 401c6ea733..0000000000 --- a/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py b/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 990a085eda..0000000000 --- a/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py b/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index db272d6b5b..0000000000 --- a/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py b/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 17206a5171..0000000000 --- a/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py b/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 8cec429c3e..0000000000 --- a/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py b/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 64997c26f7..0000000000 --- a/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py b/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 9a91f9cc96..0000000000 --- a/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py b/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index bbde29e8e9..0000000000 --- a/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..44821fd7d3 --- /dev/null +++ b/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-769x769.py b/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..a85b39197e --- /dev/null +++ b/configs/fcn/fcn_r50b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/gcnet/README.md b/configs/gcnet/README.md index 44c4a40511..fa37f76468 100644 --- a/configs/gcnet/README.md +++ b/configs/gcnet/README.md @@ -1,7 +1,30 @@ -# GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond +# GCNet + +[GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond](https://arxiv.org/abs/1904.11492) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. The code and configurations are released at [this https URL](https://github.com/xvjiarui/GCNet). + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{cao2019gcnet, title={Gcnet: Non-local networks meet squeeze-excitation networks and beyond}, author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han}, @@ -14,29 +37,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GCNet | R-50-D8 | 512x1024 | 40000 | 5.8 | 3.93 | 77.69 | 78.56 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | -| GCNet | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.61 | 78.28 | 79.34 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | -| GCNet | R-50-D8 | 769x769 | 40000 | 6.5 | 1.67 | 78.12 | 80.09 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814.log.json) | -| GCNet | R-101-D8 | 769x769 | 40000 | 10.5 | 1.13 | 78.95 | 80.71 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550.log.json) | -| GCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.48 | 80.01 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | -| GCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.03 | 79.84 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | -| GCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.68 | 80.66 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516.log.json) | -| GCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.18 | 80.71 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GCNet | R-50-D8 | 512x1024 | 40000 | 5.8 | 3.93 | 77.69 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | +| GCNet | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.61 | 78.28 | 79.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | +| GCNet | R-50-D8 | 769x769 | 40000 | 6.5 | 1.67 | 78.12 | 80.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814.log.json) | +| GCNet | R-101-D8 | 769x769 | 40000 | 10.5 | 1.13 | 78.95 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550.log.json) | +| GCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.48 | 80.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | +| GCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.03 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-512x1024.pyy) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | +| GCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.68 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516.log.json) | +| GCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.18 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GCNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.38 | 41.47 | 42.85 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146.log.json) | -| GCNet | R-101-D8 | 512x512 | 80000 | 12 | 15.20 | 42.82 | 44.54 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811.log.json) | -| GCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.37 | 43.52 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122.log.json) | -| GCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.69 | 45.21 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GCNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.38 | 41.47 | 42.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146.log.json) | +| GCNet | R-101-D8 | 512x512 | 80000 | 12 | 15.20 | 42.82 | 44.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811.log.json) | +| GCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.37 | 43.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122.log.json) | +| GCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.69 | 45.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GCNet | R-50-D8 | 512x512 | 20000 | 5.8 | 23.35 | 76.42 | 77.51 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701.log.json) | -| GCNet | R-101-D8 | 512x512 | 20000 | 9.2 | 14.80 | 77.41 | 78.56 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713.log.json) | -| GCNet | R-50-D8 | 512x512 | 40000 | - | - | 76.24 | 77.63 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105.log.json) | -| GCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.84 | 78.59 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GCNet | R-50-D8 | 512x512 | 20000 | 5.8 | 23.35 | 76.42 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701.log.json) | +| GCNet | R-101-D8 | 512x512 | 20000 | 9.2 | 14.80 | 77.41 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713.log.json) | +| GCNet | R-50-D8 | 512x512 | 40000 | - | - | 76.24 | 77.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105.log.json) | +| GCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.84 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/gcnet/gcnet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806.log.json) | diff --git a/configs/gcnet/gcnet.yml b/configs/gcnet/gcnet.yml new file mode 100644 index 0000000000..dfd8cf56c4 --- /dev/null +++ b/configs/gcnet/gcnet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: GCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1904.11492 + Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' + README: configs/gcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/gc_head.py#L10 + Version: v0.17.0 + Converted From: + Code: https://github.com/xvjiarui/GCNet +Models: +- Name: gcnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 254.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.69 + mIoU(ms+flip): 78.56 + Config: configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth +- Name: gcnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 383.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.28 + mIoU(ms+flip): 79.34 + Config: configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth +- Name: gcnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 598.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 80.09 + Config: configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth +- Name: gcnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 884.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.95 + mIoU(ms+flip): 80.71 + Config: configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth +- Name: gcnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.48 + mIoU(ms+flip): 80.01 + Config: configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth +- Name: gcnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 79.84 + Config: configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth +- Name: gcnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.68 + mIoU(ms+flip): 80.66 + Config: configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth +- Name: gcnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.18 + mIoU(ms+flip): 80.71 + Config: configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth +- Name: gcnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.47 + mIoU(ms+flip): 42.85 + Config: configs/gcnet/gcnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth +- Name: gcnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 65.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.82 + mIoU(ms+flip): 44.54 + Config: configs/gcnet/gcnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth +- Name: gcnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.37 + mIoU(ms+flip): 43.52 + Config: configs/gcnet/gcnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth +- Name: gcnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.69 + mIoU(ms+flip): 45.21 + Config: configs/gcnet/gcnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth +- Name: gcnet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.42 + mIoU(ms+flip): 77.51 + Config: configs/gcnet/gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth +- Name: gcnet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 67.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.41 + mIoU(ms+flip): 78.56 + Config: configs/gcnet/gcnet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth +- Name: gcnet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 77.63 + Config: configs/gcnet/gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth +- Name: gcnet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.84 + mIoU(ms+flip): 78.59 + Config: configs/gcnet/gcnet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth diff --git a/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..e8f7c552fb --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..887d17b71d --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..aa47578d16 --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..ddf4ad7bbc --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/gcnet/gcnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..45285c0183 --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/gcnet/gcnet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..b466c409e8 --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/gcnet/gcnet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..9c7f741f05 --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/gcnet/gcnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..61337dbda2 --- /dev/null +++ b/configs/gcnet/gcnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py b/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 27bd9422da..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py b/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 7f0f83fe39..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py b/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 9888120f65..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py b/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 1b70ca8e46..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py b/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index b17c7a12b5..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py b/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index a2183fc2db..0000000000 --- a/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py b/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 08a6031f20..0000000000 --- a/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py b/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 5efb61339c..0000000000 --- a/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f976bd907a --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..34ce822c59 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5088929047 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..f886f170fc --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/gcnet/gcnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/gcnet/gcnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..d3f5631319 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/gcnet/gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/gcnet/gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..356b088236 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/gcnet/gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/gcnet/gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..802b7668e0 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/gcnet/gcnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/gcnet/gcnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..7327934289 --- /dev/null +++ b/configs/gcnet/gcnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py b/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 610467c072..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py b/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 155e28f421..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py b/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 1549a4d5bf..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py b/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index a496204bdb..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py b/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index d85cf6550f..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py b/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 89d5e1ae0f..0000000000 --- a/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py b/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index ac9826ad92..0000000000 --- a/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py b/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index cacf24e4f3..0000000000 --- a/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md index 4bb016e441..f85683b63f 100644 --- a/configs/hrnet/README.md +++ b/configs/hrnet/README.md @@ -1,7 +1,30 @@ -# Deep High-Resolution Representation Learning for Human Pose Estimation +# HRNet + +[Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1908.07919) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions \\emph{in series} (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams \\emph{in parallel}; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. All the codes are available at [this https URL](https://github.com/HRNet). + + + +
+ +
+ +## Citation + +```bibtext @inproceedings{SunXLW19, title={Deep High-Resolution Representation Learning for Human Pose Estimation}, author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, @@ -13,34 +36,87 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | HRNetV2p-W18-Small | 512x1024 | 40000 | 1.7 | 23.74 | 73.86 | 75.91 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json) | -| FCN | HRNetV2p-W18 | 512x1024 | 40000 | 2.9 | 12.97 | 77.19 | 78.92 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json) | -| FCN | HRNetV2p-W48 | 512x1024 | 40000 | 6.2 | 6.42 | 78.48 | 79.69 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240.log.json) | -| FCN | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 75.31 | 77.48 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json) | -| FCN | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.65 | 80.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json) | -| FCN | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 79.93 | 80.72 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606.log.json) | -| FCN | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 76.31 | 78.31 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json) | -| FCN | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 78.80 | 80.74 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json) | -| FCN | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 80.65 | 81.92 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 512x1024 | 40000 | 1.7 | 23.74 | 73.86 | 75.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 40000 | 2.9 | 12.97 | 77.19 | 78.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 40000 | 6.2 | 6.42 | 78.48 | 79.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240.log.json) | +| FCN | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 75.31 | 77.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.65 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 79.93 | 80.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606.log.json) | +| FCN | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 76.31 | 78.31 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 78.80 | 80.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 80.65 | 81.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 3.8 | 38.66 | 31.38 | 32.45 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345.log.json) | -| FCN | HRNetV2p-W18 | 512x512 | 80000 | 4.9 | 22.57 | 35.51 | 36.80 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145-66f20cb7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145.log.json) | -| FCN | HRNetV2p-W48 | 512x512 | 80000 | 8.2 | 21.23 | 41.90 | 43.27 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946.log.json) | -| FCN | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 33.00 | 34.55 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413.log.json) | -| FCN | HRNetV2p-W18 | 512x512 | 160000 | - | - | 36.79 | 38.58 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426.log.json) | -| FCN | HRNetV2p-W48 | 512x512 | 160000 | - | - | 42.02 | 43.86 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 3.8 | 38.66 | 31.38 | 32.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 4.9 | 22.57 | 36.27 | 37.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910-6c9382c0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 8.2 | 21.23 | 41.90 | 43.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946.log.json) | +| FCN | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 33.07 | 34.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739-f1e7c2e7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 160000 | - | - | 36.79 | 38.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 160000 | - | - | 42.02 | 43.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| FCN | HRNetV2p-W18-Small | 512x512 | 20000 | 1.8 | 43.36 | 65.20 | 68.55 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503-56e36088.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503.log.json) | -| FCN | HRNetV2p-W18 | 512x512 | 20000 | 2.9 | 23.48 | 72.30 | 74.71 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503.log.json) | -| FCN | HRNetV2p-W48 | 512x512 | 20000 | 6.2 | 22.05 | 75.87 | 78.58 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419.log.json) | -| FCN | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 66.61 | 70.00 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648.log.json) | -| FCN | HRNetV2p-W18 | 512x512 | 40000 | - | - | 72.90 | 75.59 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401.log.json) | -| FCN | HRNetV2p-W48 | 512x512 | 40000 | - | - | 76.24 | 78.49 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 512x512 | 20000 | 1.8 | 43.36 | 65.5 | 68.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910-0aceadb4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 20000 | 2.9 | 23.48 | 72.30 | 74.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 20000 | 6.2 | 22.05 | 75.87 | 78.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419.log.json) | +| FCN | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 66.61 | 70.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 40000 | - | - | 72.90 | 75.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 40000 | - | - | 76.24 | 78.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W48 | 480x480 | 40000 | 6.1 | 8.86 | 45.14 | 47.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context-20200911_164852.log.json) | +| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 45.84 | 47.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context-20200911_155322.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W48 | 480x480 | 40000 | - | - | 50.33 | 52.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59-20210410_122738.log.json) | +| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 51.12 | 53.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59-20210411_003240.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.59 | 24.87 | 49.28 | 49.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-80k_loveda-512x512.pyy) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228-60a86a7a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 12.92 | 50.81 | 50.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952-93d9c3b3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 9.61 | 51.42 | 51.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756-67072f55.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.58 | 36.00 | 77.64 | 78.8 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517-ba32af63.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 19.25 | 78.26 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517-5d0387ad.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 16.42 | 78.39 | 79.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601-97434c78.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.58 | 38.11 | 71.81 | 73.1 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909-b23aae02.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 19.55 | 72.57 | 74.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216-2ec3ae8a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 17.25 | 72.50 | 73.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244-7133cb22.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 896x896 | 80000 | 4.95 | 13.84 | 62.30 | 62.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18s_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603-3cc0769b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603.log.json) | +| FCN | HRNetV2p-W18 | 896x896 | 80000 | 8.30 | 7.71 | 65.06 | 65.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr18_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230-49bf752e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230.log.json) | +| FCN | HRNetV2p-W48 | 896x896 | 80000 | 16.89 | 7.34 | 67.80 | 68.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/hrnet/fcn_hr48_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643-547fc420.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643.log.json) | + +Note: + +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) diff --git a/configs/hrnet/fcn_hr18_4xb2-160k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0b374632b8 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/hrnet/fcn_hr18_4xb2-40k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..598b938a3f --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/hrnet/fcn_hr18_4xb2-80k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..eb7da49dbc --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/hrnet/fcn_hr18_4xb4-160k_ade20k-512x512.py b/configs/hrnet/fcn_hr18_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..c4f732cd89 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=150)) diff --git a/configs/hrnet/fcn_hr18_4xb4-20k_voc12aug-512x512.py b/configs/hrnet/fcn_hr18_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..107df6b13b --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=21)) diff --git a/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-480x480.py b/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..f744baec7b --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..0daaa35ebc --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context_59.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/hrnet/fcn_hr18_4xb4-40k_voc12aug-512x512.py b/configs/hrnet/fcn_hr18_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..2aa16b124d --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=21)) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_ade20k-512x512.py b/configs/hrnet/fcn_hr18_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..029b7d0e9a --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=150)) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_isaid-896x896.py b/configs/hrnet/fcn_hr18_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..33a6ac70a6 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_isaid-896x896.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (896, 896) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=16)) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_loveda-512x512.py b/configs/hrnet/fcn_hr18_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..1a918b2ece --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_loveda-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=7)) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-480x480.py b/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..4f37e8ade7 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..2c35cb9c32 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context_59.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_potsdam-512x512.py b/configs/hrnet/fcn_hr18_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..181c03d379 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/potsdam.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=6)) diff --git a/configs/hrnet/fcn_hr18_4xb4-80k_vaihingen-512x512.py b/configs/hrnet/fcn_hr18_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..6303bb65c3 --- /dev/null +++ b/configs/hrnet/fcn_hr18_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/vaihingen.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=6)) diff --git a/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py b/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py deleted file mode 100644 index 9f04e935c3..0000000000 --- a/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] diff --git a/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py b/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py deleted file mode 100644 index 99760c36d8..0000000000 --- a/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py b/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py deleted file mode 100644 index a653dda192..0000000000 --- a/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py b/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py deleted file mode 100644 index 45ed99b681..0000000000 --- a/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict(decode_head=dict(num_classes=150)) diff --git a/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py b/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py deleted file mode 100644 index f06448b168..0000000000 --- a/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' -] -model = dict(decode_head=dict(num_classes=21)) diff --git a/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py b/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py deleted file mode 100644 index d74e95943a..0000000000 --- a/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] -model = dict(decode_head=dict(num_classes=21)) diff --git a/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py b/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py deleted file mode 100644 index 52bc9f5e91..0000000000 --- a/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict(decode_head=dict(num_classes=150)) diff --git a/configs/hrnet/fcn_hr18s_4xb2-160k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18s_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6ca631cbee --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb2-160k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..ba7e9c696e --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb2-80k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr18s_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..26ab6210dd --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-160k_ade20k-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..29cbd10cbf --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-20k_voc12aug-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..9dd1933349 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-20k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-480x480.py b/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..5f88f532a3 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-40k_pascal-context-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..b616fad8c2 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-40k_pascal-context-59-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-40k_voc12aug-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..b10b282dd8 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-40k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_ade20k-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..f9f49360bf --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_isaid-896x896.py b/configs/hrnet/fcn_hr18s_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..ab2d2414dd --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_isaid-896x896.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_isaid-896x896.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_loveda-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..dd17076c3f --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_loveda-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_loveda-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-480x480.py b/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..b7b52331c7 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_pascal-context-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..ccf1040d13 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_pascal-context-59-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_potsdam-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..3a5726f5d1 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_potsdam-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_4xb4-80k_vaihingen-512x512.py b/configs/hrnet/fcn_hr18s_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..720c1732b0 --- /dev/null +++ b/configs/hrnet/fcn_hr18s_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4xb4-80k_vaihingen-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py b/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py deleted file mode 100644 index ddbe3801f9..0000000000 --- a/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x1024_160k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py b/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py deleted file mode 100644 index 4e31d26e09..0000000000 --- a/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py b/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py deleted file mode 100644 index ee2831d99d..0000000000 --- a/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x1024_80k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py b/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py deleted file mode 100644 index 22a3ce0b38..0000000000 --- a/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x512_160k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py b/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py deleted file mode 100644 index d0de5df752..0000000000 --- a/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x512_20k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py b/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py deleted file mode 100644 index 409db3c628..0000000000 --- a/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x512_40k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py b/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py deleted file mode 100644 index a8400979b1..0000000000 --- a/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x512_80k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/hrnet/fcn_hr48_4xb2-160k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr48_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4aa5d94d1e --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb2-160k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb2-40k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr48_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7cb795250d --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb2-80k_cityscapes-512x1024.py b/configs/hrnet/fcn_hr48_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..3e2ce034b2 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-160k_ade20k-512x512.py b/configs/hrnet/fcn_hr48_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..89b1f04651 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-20k_voc12aug-512x512.py b/configs/hrnet/fcn_hr48_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..7ca38a9a79 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-20k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-480x480.py b/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..379be1d67e --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-40k_pascal-context-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..12730dd533 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-40k_pascal-context-59-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-40k_voc12aug-512x512.py b/configs/hrnet/fcn_hr48_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..3e1b920c59 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-40k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_ade20k-512x512.py b/configs/hrnet/fcn_hr48_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..14fd663e87 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_isaid-896x896.py b/configs/hrnet/fcn_hr48_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..81815efa8d --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_isaid-896x896.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_isaid-896x896.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_loveda-512x512.py b/configs/hrnet/fcn_hr48_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..34d23af163 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_loveda-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_loveda-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-480x480.py b/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..4d193d9042 --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_pascal-context-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-59-480x480.py b/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..d8b4c4aa8e --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_pascal-context-59-480x480.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_potsdam-512x512.py b/configs/hrnet/fcn_hr48_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..58a650004d --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_potsdam-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_4xb4-80k_vaihingen-512x512.py b/configs/hrnet/fcn_hr48_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..db91ed83ef --- /dev/null +++ b/configs/hrnet/fcn_hr48_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4xb4-80k_vaihingen-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py b/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py deleted file mode 100644 index 394a61c99f..0000000000 --- a/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x1024_160k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py b/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py deleted file mode 100644 index d37ab1d09e..0000000000 --- a/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py b/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py deleted file mode 100644 index a9bab32b52..0000000000 --- a/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x1024_80k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py b/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py deleted file mode 100644 index dff4fea85c..0000000000 --- a/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x512_160k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py b/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py deleted file mode 100644 index a8d1deb986..0000000000 --- a/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x512_20k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py b/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py deleted file mode 100644 index 1084a57e97..0000000000 --- a/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x512_40k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py b/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py deleted file mode 100644 index 7eca7fa4b8..0000000000 --- a/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcn_hr18_512x512_80k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=dict( - in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/configs/hrnet/hrnet.yml b/configs/hrnet/hrnet.yml new file mode 100644 index 0000000000..77f556e17a --- /dev/null +++ b/configs/hrnet/hrnet.yml @@ -0,0 +1,695 @@ +Models: +- Name: fcn_hr18s_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 42.12 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.86 + mIoU(ms+flip): 75.91 + Config: configs/hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth +- Name: fcn_hr18_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 77.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.19 + mIoU(ms+flip): 78.92 + Config: configs/hrnet/fcn_hr18_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth +- Name: fcn_hr48_4xb2-40k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 155.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.48 + mIoU(ms+flip): 79.69 + Config: configs/hrnet/fcn_hr48_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth +- Name: fcn_hr18s_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.31 + mIoU(ms+flip): 77.48 + Config: configs/hrnet/fcn_hr18s_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth +- Name: fcn_hr18_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.65 + mIoU(ms+flip): 80.35 + Config: configs/hrnet/fcn_hr18_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth +- Name: fcn_hr48_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.93 + mIoU(ms+flip): 80.72 + Config: configs/hrnet/fcn_hr48_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth +- Name: fcn_hr18s_4xb2-160k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.31 + mIoU(ms+flip): 78.31 + Config: configs/hrnet/fcn_hr18s_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth +- Name: fcn_hr18_4xb2-160k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.74 + Config: configs/hrnet/fcn_hr18_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth +- Name: fcn_hr48_4xb2-160k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.65 + mIoU(ms+flip): 81.92 + Config: configs/hrnet/fcn_hr48_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth +- Name: fcn_hr18s_4xb4-80k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 25.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 31.38 + mIoU(ms+flip): 32.45 + Config: configs/hrnet/fcn_hr18s_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth +- Name: fcn_hr18_4xb4-80k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 44.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.27 + mIoU(ms+flip): 37.28 + Config: configs/hrnet/fcn_hr18_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910-6c9382c0.pth +- Name: fcn_hr48_4xb4-80k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.9 + mIoU(ms+flip): 43.27 + Config: configs/hrnet/fcn_hr48_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth +- Name: fcn_hr18s_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 33.07 + mIoU(ms+flip): 34.56 + Config: configs/hrnet/fcn_hr18s_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739-f1e7c2e7.pth +- Name: fcn_hr18_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.79 + mIoU(ms+flip): 38.58 + Config: configs/hrnet/fcn_hr18_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth +- Name: fcn_hr48_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.02 + mIoU(ms+flip): 43.86 + Config: configs/hrnet/fcn_hr48_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth +- Name: fcn_hr18s_4xb4-20k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 23.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 65.5 + mIoU(ms+flip): 68.89 + Config: configs/hrnet/fcn_hr18s_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910-0aceadb4.pth +- Name: fcn_hr18_4xb4-20k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.3 + mIoU(ms+flip): 74.71 + Config: configs/hrnet/fcn_hr18_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth +- Name: fcn_hr48_4xb4-20k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 45.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.87 + mIoU(ms+flip): 78.58 + Config: configs/hrnet/fcn_hr48_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth +- Name: fcn_hr18s_4xb4-40k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 66.61 + mIoU(ms+flip): 70.0 + Config: configs/hrnet/fcn_hr18s_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth +- Name: fcn_hr18_4xb4-40k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.9 + mIoU(ms+flip): 75.59 + Config: configs/hrnet/fcn_hr18_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth +- Name: fcn_hr48_4xb4-40k_voc12aug-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 78.49 + Config: configs/hrnet/fcn_hr48_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth +- Name: fcn_hr48_4xb4-40k_pascal-context-480x480 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 112.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 45.14 + mIoU(ms+flip): 47.42 + Config: configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth +- Name: fcn_hr48_4xb4-80k_pascal-context-480x480 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 45.84 + mIoU(ms+flip): 47.84 + Config: configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth +- Name: fcn_hr48_4xb4-40k_pascal-context-59-480x480 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 50.33 + mIoU(ms+flip): 52.83 + Config: configs/hrnet/fcn_hr48_4xb4-40k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth +- Name: fcn_hr48_4xb4-80k_pascal-context-59-480x480 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 51.12 + mIoU(ms+flip): 53.56 + Config: configs/hrnet/fcn_hr48_4xb4-80k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth +- Name: fcn_hr18s_4xb4-80k_loveda-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 40.21 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.59 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 49.28 + mIoU(ms+flip): 49.42 + Config: configs/hrnet/fcn_hr18s_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228-60a86a7a.pth +- Name: fcn_hr18_4xb4-80k_loveda-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 77.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.81 + mIoU(ms+flip): 50.95 + Config: configs/hrnet/fcn_hr18_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952-93d9c3b3.pth +- Name: fcn_hr48_4xb4-80k_loveda-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 104.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.42 + mIoU(ms+flip): 51.64 + Config: configs/hrnet/fcn_hr48_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756-67072f55.pth +- Name: fcn_hr18s_4xb4-80k_potsdam-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 27.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.58 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.64 + mIoU(ms+flip): 78.8 + Config: configs/hrnet/fcn_hr18s_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517-ba32af63.pth +- Name: fcn_hr18_4xb4-80k_potsdam-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.26 + mIoU(ms+flip): 79.24 + Config: configs/hrnet/fcn_hr18_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517-5d0387ad.pth +- Name: fcn_hr48_4xb4-80k_potsdam-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 60.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.39 + mIoU(ms+flip): 79.34 + Config: configs/hrnet/fcn_hr48_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601-97434c78.pth +- Name: fcn_hr18s_4xb4-80k_vaihingen-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 26.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.58 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 71.81 + mIoU(ms+flip): 73.1 + Config: configs/hrnet/fcn_hr18s_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909-b23aae02.pth +- Name: fcn_hr18_4xb4-80k_vaihingen-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.15 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.57 + mIoU(ms+flip): 74.09 + Config: configs/hrnet/fcn_hr18_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216-2ec3ae8a.pth +- Name: fcn_hr48_4xb4-80k_vaihingen-512x512 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 57.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.5 + mIoU(ms+flip): 73.52 + Config: configs/hrnet/fcn_hr48_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244-7133cb22.pth +- Name: fcn_hr18s_4xb4-80k_isaid-896x896 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 72.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 4.95 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 62.3 + mIoU(ms+flip): 62.97 + Config: configs/hrnet/fcn_hr18s_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603-3cc0769b.pth +- Name: fcn_hr18_4xb4-80k_isaid-896x896 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 8.3 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 65.06 + mIoU(ms+flip): 65.6 + Config: configs/hrnet/fcn_hr18_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230-49bf752e.pth +- Name: fcn_hr48_4xb4-80k_isaid-896x896 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 136.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 16.89 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 67.8 + mIoU(ms+flip): 68.53 + Config: configs/hrnet/fcn_hr48_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643-547fc420.pth diff --git a/configs/icnet/README.md b/configs/icnet/README.md new file mode 100644 index 0000000000..134f53b29f --- /dev/null +++ b/configs/icnet/README.md @@ -0,0 +1,56 @@ +# ICNet + +[ICNet for Real-time Semantic Segmentation on High-resolution Images](https://arxiv.org/abs/1704.08545) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We focus on the challenging task of real-time semantic segmentation in this paper. It finds many practical applications and yet is with fundamental difficulty of reducing a large portion of computation for pixel-wise label inference. We propose an image cascade network (ICNet) that incorporates multi-resolution branches under proper label guidance to address this challenge. We provide in-depth analysis of our framework and introduce the cascade feature fusion unit to quickly achieve high-quality segmentation. Our system yields real-time inference on a single GPU card with decent quality results evaluated on challenging datasets like Cityscapes, CamVid and COCO-Stuff. + + + +
+ +
+ +## Citation + +```bibtext +@inproceedings{zhao2018icnet, + title={Icnet for real-time semantic segmentation on high-resolution images}, + author={Zhao, Hengshuang and Qi, Xiaojuan and Shen, Xiaoyong and Shi, Jianping and Jia, Jiaya}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={405--420}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ICNet | R-18-D8 | 832x832 | 80000 | 1.70 | 27.12 | 68.14 | 70.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521.log.json) | +| ICNet | R-18-D8 | 832x832 | 160000 | - | - | 71.64 | 74.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r18-d8_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153.log.json) | +| ICNet (in1k-pre) | R-18-D8 | 832x832 | 80000 | - | - | 72.51 | 74.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354.log.json) | +| ICNet (in1k-pre) | R-18-D8 | 832x832 | 160000 | - | - | 74.43 | 76.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702.log.json) | +| ICNet | R-50-D8 | 832x832 | 80000 | 2.53 | 20.08 | 68.91 | 69.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r50-d8_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625.log.json) | +| ICNet | R-50-D8 | 832x832 | 160000 | - | - | 73.82 | 75.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r50-d8_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612.log.json) | +| ICNet (in1k-pre) | R-50-D8 | 832x832 | 80000 | - | - | 74.58 | 76.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943.log.json) | +| ICNet (in1k-pre) | R-50-D8 | 832x832 | 160000 | - | - | 76.29 | 78.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715.log.json) | +| ICNet | R-101-D8 | 832x832 | 80000 | 3.08 | 16.95 | 70.28 | 71.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r101-d8_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447.log.json) | +| ICNet | R-101-D8 | 832x832 | 160000 | - | - | 73.80 | 76.10 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r101-d8_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350.log.json) | +| ICNet (in1k-pre) | R-101-D8 | 832x832 | 80000 | - | - | 75.57 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414.log.json) | +| ICNet (in1k-pre) | R-101-D8 | 832x832 | 160000 | - | - | 76.15 | 77.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612.log.json) | + +Note: `in1k-pre` means pretrained model is used. diff --git a/configs/icnet/icnet.yml b/configs/icnet/icnet.yml new file mode 100644 index 0000000000..5ded544726 --- /dev/null +++ b/configs/icnet/icnet.yml @@ -0,0 +1,207 @@ +Collections: +- Name: ICNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1704.08545 + Title: ICNet for Real-time Semantic Segmentation on High-resolution Images + README: configs/icnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/ic_neck.py#L77 + Version: v0.18.0 + Converted From: + Code: https://github.com/hszhao/ICNet +Models: +- Name: icnet_r18-d8_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 36.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.14 + mIoU(ms+flip): 70.16 + Config: configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth +- Name: icnet_r18-d8_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.64 + mIoU(ms+flip): 74.18 + Config: configs/icnet/icnet_r18-d8_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth +- Name: icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.51 + mIoU(ms+flip): 74.78 + Config: configs/icnet/icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth +- Name: icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.43 + mIoU(ms+flip): 76.72 + Config: configs/icnet/icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth +- Name: icnet_r50-d8_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 49.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 2.53 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.91 + mIoU(ms+flip): 69.72 + Config: configs/icnet/icnet_r50-d8_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth +- Name: icnet_r50-d8_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.82 + mIoU(ms+flip): 75.67 + Config: configs/icnet/icnet_r50-d8_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth +- Name: icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.58 + mIoU(ms+flip): 76.41 + Config: configs/icnet/icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth +- Name: icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.29 + mIoU(ms+flip): 78.09 + Config: configs/icnet/icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth +- Name: icnet_r101-d8_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 59.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 3.08 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.28 + mIoU(ms+flip): 71.95 + Config: configs/icnet/icnet_r101-d8_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth +- Name: icnet_r101-d8_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.8 + mIoU(ms+flip): 76.1 + Config: configs/icnet/icnet_r101-d8_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth +- Name: icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.57 + mIoU(ms+flip): 77.86 + Config: configs/icnet/icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth +- Name: icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832 + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.15 + mIoU(ms+flip): 77.98 + Config: configs/icnet/icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth diff --git a/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..a6840a1155 --- /dev/null +++ b/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,7 @@ +_base_ = './icnet_r50-d8_4xb2-160k_cityscapes-832x832.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..ca81df8c7b --- /dev/null +++ b/configs/icnet/icnet_r101-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,7 @@ +_base_ = './icnet_r50-d8_4xb2-80k_cityscapes-832x832.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/configs/icnet/icnet_r101-d8_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r101-d8_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..ef60446bc5 --- /dev/null +++ b/configs/icnet/icnet_r101-d8_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,2 @@ +_base_ = './icnet_r50-d8_4xb2-160k_cityscapes-832x832.py' +model = dict(backbone=dict(backbone_cfg=dict(depth=101))) diff --git a/configs/icnet/icnet_r101-d8_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r101-d8_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..5173d2d6f8 --- /dev/null +++ b/configs/icnet/icnet_r101-d8_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,2 @@ +_base_ = './icnet_r50-d8_4xb2-80k_cityscapes-832x832.py' +model = dict(backbone=dict(backbone_cfg=dict(depth=101))) diff --git a/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..5f72daab65 --- /dev/null +++ b/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,8 @@ +_base_ = './icnet_r50-d8_4xb2-160k_cityscapes-832x832.py' +model = dict( + backbone=dict( + layer_channels=(128, 512), + backbone_cfg=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) diff --git a/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..2fc79ab197 --- /dev/null +++ b/configs/icnet/icnet_r18-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,8 @@ +_base_ = './icnet_r50-d8_4xb2-80k_cityscapes-832x832.py' +model = dict( + backbone=dict( + layer_channels=(128, 512), + backbone_cfg=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) diff --git a/configs/icnet/icnet_r18-d8_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r18-d8_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..2c70e94810 --- /dev/null +++ b/configs/icnet/icnet_r18-d8_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,3 @@ +_base_ = './icnet_r50-d8_4xb2-160k_cityscapes-832x832.py' +model = dict( + backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) diff --git a/configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..23c7ac2990 --- /dev/null +++ b/configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,3 @@ +_base_ = './icnet_r50-d8_4xb2-80k_cityscapes-832x832.py' +model = dict( + backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) diff --git a/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..f9ab863402 --- /dev/null +++ b/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,6 @@ +_base_ = './icnet_r50-d8_4xb2-160k_cityscapes-832x832.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..9a085d4f61 --- /dev/null +++ b/configs/icnet/icnet_r50-d8-in1k-pre_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,6 @@ +_base_ = './icnet_r50-d8_4xb2-80k_cityscapes-832x832.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/configs/icnet/icnet_r50-d8_4xb2-160k_cityscapes-832x832.py b/configs/icnet/icnet_r50-d8_4xb2-160k_cityscapes-832x832.py new file mode 100644 index 0000000000..1b7b1884f7 --- /dev/null +++ b/configs/icnet/icnet_r50-d8_4xb2-160k_cityscapes-832x832.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/icnet_r50-d8.py', + '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (832, 832) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/icnet/icnet_r50-d8_4xb2-80k_cityscapes-832x832.py b/configs/icnet/icnet_r50-d8_4xb2-80k_cityscapes-832x832.py new file mode 100644 index 0000000000..001dbcaf7f --- /dev/null +++ b/configs/icnet/icnet_r50-d8_4xb2-80k_cityscapes-832x832.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/icnet_r50-d8.py', + '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (832, 832) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/isanet/README.md b/configs/isanet/README.md new file mode 100644 index 0000000000..db93dae234 --- /dev/null +++ b/configs/isanet/README.md @@ -0,0 +1,80 @@ +# ISANet + +[Interlaced Sparse Self-Attention for Semantic Segmentation](https://arxiv.org/abs/1907.12273) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we present a so-called interlaced sparse self-attention approach to improve the efficiency of the \\emph{self-attention} mechanism for semantic segmentation. The main idea is that we factorize the dense affinity matrix as the product of two sparse affinity matrices. There are two successive attention modules each estimating a sparse affinity matrix. The first attention module is used to estimate the affinities within a subset of positions that have long spatial interval distances and the second attention module is used to estimate the affinities within a subset of positions that have short spatial interval distances. These two attention modules are designed so that each position is able to receive the information from all the other positions. In contrast to the original self-attention module, our approach decreases the computation and memory complexity substantially especially when processing high-resolution feature maps. We empirically verify the effectiveness of our approach on six challenging semantic segmentation benchmarks. + + + +
+ +
+ +## Citation + +```bibetex +@article{huang2019isa, + title={Interlaced Sparse Self-Attention for Semantic Segmentation}, + author={Huang, Lang and Yuan, Yuhui and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={arXiv preprint arXiv:1907.12273}, + year={2019} +} +``` + +The technical report above is also presented at: + +```bibetex +@article{yuan2021ocnet, + title={OCNet: Object Context for Semantic Segmentation}, + author={Yuan, Yuhui and Huang, Lang and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={International Journal of Computer Vision}, + pages={1--24}, + year={2021}, + publisher={Springer} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | --------------------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ISANet | R-50-D8 | 512x1024 | 40000 | 5.869 | 2.91 | 78.49 | 79.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739.log.json) | +| ISANet | R-50-D8 | 512x1024 | 80000 | 5.869 | 2.91 | 78.68 | 80.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202.log.json) | +| ISANet | R-50-D8 | 769x769 | 40000 | 6.759 | 1.54 | 78.70 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200.log.json) | +| ISANet | R-50-D8 | 769x769 | 80000 | 6.759 | 1.54 | 79.29 | 80.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126.log.json) | +| ISANet | R-101-D8 | 512x1024 | 40000 | 9.425 | 2.35 | 79.58 | 81.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553.log.json) | +| ISANet | R-101-D8 | 512x1024 | 80000 | 9.425 | 2.35 | 80.32 | 81.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243.log.json) | +| ISANet | R-101-D8 | 769x769 | 40000 | 10.815 | 0.92 | 79.68 | 80.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320.log.json) | +| ISANet | R-101-D8 | 769x769 | 80000 | 10.815 | 0.92 | 80.61 | 81.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | ----------------------------------------------------------------------------------------------------------------------------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ISANet | R-50-D8 | 512x512 | 80000 | 9.0 | 22.55 | 41.12 | 42.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557.log.json) | +| ISANet | R-50-D8 | 512x512 | 160000 | 9.0 | 22.55 | 42.59 | 43.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850.log.json) | +| ISANet | R-101-D8 | 512x512 | 80000 | 12.562 | 10.56 | 43.51 | 44.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056.log.json) | +| ISANet | R-101-D8 | 512x512 | 160000 | 12.562 | 10.56 | 43.80 | 45.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | -----------------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ISANet | R-50-D8 | 512x512 | 20000 | 5.9 | 23.08 | 76.78 | 77.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838.log.json) | +| ISANet | R-50-D8 | 512x512 | 40000 | 5.9 | 23.08 | 76.20 | 77.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349.log.json) | +| ISANet | R-101-D8 | 512x512 | 20000 | 9.465 | 7.42 | 78.46 | 79.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805.log.json) | +| ISANet | R-101-D8 | 512x512 | 40000 | 9.465 | 7.42 | 78.12 | 79.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/isanet/isanet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814.log.json) | diff --git a/configs/isanet/isanet.yml b/configs/isanet/isanet.yml new file mode 100644 index 0000000000..405b3c1231 --- /dev/null +++ b/configs/isanet/isanet.yml @@ -0,0 +1,369 @@ +Collections: +- Name: ISANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1907.12273 + Title: Interlaced Sparse Self-Attention for Semantic Segmentation + README: configs/isanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/decode_heads/isa_head.py#L58 + Version: v0.18.0 + Converted From: + Code: https://github.com/openseg-group/openseg.pytorch +Models: +- Name: isanet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 343.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.869 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.49 + mIoU(ms+flip): 79.44 + Config: configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth +- Name: isanet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 343.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.869 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.68 + mIoU(ms+flip): 80.25 + Config: configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth +- Name: isanet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 649.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.759 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 80.28 + Config: configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth +- Name: isanet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 649.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.759 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.29 + mIoU(ms+flip): 80.53 + Config: configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth +- Name: isanet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 425.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.425 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.58 + mIoU(ms+flip): 81.05 + Config: configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth +- Name: isanet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 425.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.425 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.32 + mIoU(ms+flip): 81.58 + Config: configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth +- Name: isanet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1086.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.815 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.68 + mIoU(ms+flip): 80.95 + Config: configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth +- Name: isanet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 1086.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.815 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.61 + mIoU(ms+flip): 81.59 + Config: configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth +- Name: isanet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 44.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.12 + mIoU(ms+flip): 42.35 + Config: configs/isanet/isanet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth +- Name: isanet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 44.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.59 + mIoU(ms+flip): 43.07 + Config: configs/isanet/isanet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth +- Name: isanet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 94.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.562 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.51 + mIoU(ms+flip): 44.38 + Config: configs/isanet/isanet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth +- Name: isanet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 94.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.562 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.8 + mIoU(ms+flip): 45.4 + Config: configs/isanet/isanet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth +- Name: isanet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 43.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.78 + mIoU(ms+flip): 77.79 + Config: configs/isanet/isanet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth +- Name: isanet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 43.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.2 + mIoU(ms+flip): 77.22 + Config: configs/isanet/isanet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth +- Name: isanet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 134.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.465 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 79.16 + Config: configs/isanet/isanet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth +- Name: isanet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 134.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.465 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 79.04 + Config: configs/isanet/isanet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth diff --git a/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6093aeb4f7 --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..dc14c76dfb --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..1735f89d41 --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..b1a6371b76 --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/isanet/isanet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..c2fb09e374 --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/isanet/isanet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..7c225cfe3a --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/isanet/isanet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..5e86ee584f --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/isanet/isanet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..090e86f243 --- /dev/null +++ b/configs/isanet/isanet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f03365e224 --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..f073a7b691 --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4be445d5cf --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..0278ad852a --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/isanet/isanet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/isanet/isanet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..1f4af8d0ba --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/isanet/isanet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/isanet/isanet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..591df42a87 --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/isanet/isanet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/isanet/isanet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..a59879b0fc --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/isanet/isanet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/isanet/isanet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..7df05c3781 --- /dev/null +++ b/configs/isanet/isanet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/knet/README.md b/configs/knet/README.md new file mode 100644 index 0000000000..ed5bc06257 --- /dev/null +++ b/configs/knet/README.md @@ -0,0 +1,50 @@ +# K-Net + +[K-Net: Towards Unified Image Segmentation](https://arxiv.org/abs/2106.14855) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic, instance, and panoptic segmentations have been addressed using different and specialized frameworks despite their underlying connections. This paper presents a unified, simple, and effective framework for these essentially similar tasks. The framework, named K-Net, segments both instances and semantic categories consistently by a group of learnable kernels, where each kernel is responsible for generating a mask for either a potential instance or a stuff class. To remedy the difficulties of distinguishing various instances, we propose a kernel update strategy that enables each kernel dynamic and conditional on its meaningful group in the input image. K-Net can be trained in an end-to-end manner with bipartite matching, and its training and inference are naturally NMS-free and box-free. Without bells and whistles, K-Net surpasses all previous published state-of-the-art single-model results of panoptic segmentation on MS COCO test-dev split and semantic segmentation on ADE20K val split with 55.2% PQ and 54.3% mIoU, respectively. Its instance segmentation performance is also on par with Cascade Mask R-CNN on MS COCO with 60%-90% faster inference speeds. Code and models will be released at [this https URL](https://github.com/ZwwWayne/K-Net/). + + + +
+ +
+ +```bibtex +@inproceedings{zhang2021knet, + title={{K-Net: Towards} Unified Image Segmentation}, + author={Wenwei Zhang and Jiangmiao Pang and Kai Chen and Chen Change Loy}, + year={2021}, + booktitle={NeurIPS}, +} +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| KNet + FCN | R-50-D8 | 512x512 | 80000 | 7.01 | 19.24 | 43.60 | 45.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751.log.json) | +| KNet + PSPNet | R-50-D8 | 512x512 | 80000 | 6.98 | 20.04 | 44.18 | 45.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634.log.json) | +| KNet + DeepLabV3 | R-50-D8 | 512x512 | 80000 | 7.42 | 12.10 | 45.06 | 46.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642.log.json) | +| KNet + UperNet | R-50-D8 | 512x512 | 80000 | 7.34 | 17.11 | 43.45 | 44.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657.log.json) | +| KNet + UperNet | Swin-T | 512x512 | 80000 | 7.57 | 15.56 | 45.84 | 46.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059.log.json) | +| KNet + UperNet | Swin-L | 512x512 | 80000 | 13.5 | 8.29 | 52.05 | 53.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559.log.json) | +| KNet + UperNet | Swin-L | 640x640 | 80000 | 13.54 | 8.29 | 52.21 | 53.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747-8787fc71.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747.log.json) | + +Note: + +- All experiments of K-Net are implemented with 8 V100 (32G) GPUs with 2 samplers per GPU. diff --git a/configs/knet/knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..7946cca067 --- /dev/null +++ b/configs/knet/knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,111 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + size=crop_size, + seg_pad_val=255) +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='ASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005), + clip_grad=dict(max_norm=1, norm_type=2)) +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + milestones=[60000, 72000], + by_epoch=False, + ) +] +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..497cd04bf5 --- /dev/null +++ b/configs/knet/knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,112 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + size=crop_size, + seg_pad_val=255) +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='FCNHead', + in_channels=2048, + in_index=3, + channels=512, + num_convs=2, + concat_input=True, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005), + clip_grad=dict(max_norm=1, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + milestones=[60000, 72000], + by_epoch=False, + ) +] +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..b918671bfc --- /dev/null +++ b/configs/knet/knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,110 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + size=crop_size, + seg_pad_val=255) +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005), + clip_grad=dict(max_norm=1, norm_type=2)) +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + milestones=[60000, 72000], + by_epoch=False, + ) +] +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..a0a66c57dd --- /dev/null +++ b/configs/knet/knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,111 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + size=crop_size, + seg_pad_val=255) +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 + +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005), + clip_grad=dict(max_norm=1, norm_type=2)) +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + milestones=[60000, 72000], + by_epoch=False, + ) +] +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..c6f4eb6ae2 --- /dev/null +++ b/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,21 @@ +_base_ = 'knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa +# model settings +model = dict( + pretrained=checkpoint_file, + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.3, + patch_norm=True), + decode_head=dict( + kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), + auxiliary_head=dict(in_channels=768)) +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640.py b/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640.py new file mode 100644 index 0000000000..84c3d8cc6a --- /dev/null +++ b/configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640.py @@ -0,0 +1,57 @@ +_base_ = 'knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa +# model settings +crop_size = (640, 640) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + size=crop_size, + seg_pad_val=255) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=checkpoint_file, + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.4, + patch_norm=True), + decode_head=dict( + kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), + auxiliary_head=dict(in_channels=768)) + +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2048, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 640), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py b/configs/knet/knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py new file mode 100644 index 0000000000..a7acec4996 --- /dev/null +++ b/configs/knet/knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py @@ -0,0 +1,63 @@ +_base_ = 'knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220308-f41b89d3.pth' # noqa + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 + +model = dict( + type='EncoderDecoder', + pretrained=checkpoint_file, + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + patch_norm=True, + out_indices=(0, 1, 2, 3)), + decode_head=dict( + kernel_generate_head=dict(in_channels=[96, 192, 384, 768])), + auxiliary_head=dict(in_channels=384)) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + # modify learning rate following the official implementation of Swin Transformer # noqa + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.0005), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + }), + clip_grad=dict(max_norm=1, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + milestones=[60000, 72000], + by_epoch=False, + ) +] +# In K-Net implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/knet/knet.yml b/configs/knet/knet.yml new file mode 100644 index 0000000000..1c98e4703c --- /dev/null +++ b/configs/knet/knet.yml @@ -0,0 +1,169 @@ +Collections: +- Name: KNet + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2106.14855 + Title: 'K-Net: Towards Unified Image Segmentation' + README: configs/knet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.23.0/mmseg/models/decode_heads/knet_head.py#L392 + Version: v0.23.0 + Converted From: + Code: https://github.com/ZwwWayne/K-Net/ +Models: +- Name: knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.01 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.6 + mIoU(ms+flip): 45.12 + Config: configs/knet/knet-s3_r50-d8_fcn_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth +- Name: knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 49.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.98 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.18 + mIoU(ms+flip): 45.58 + Config: configs/knet/knet-s3_r50-d8_pspnet_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth +- Name: knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.42 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.06 + mIoU(ms+flip): 46.11 + Config: configs/knet/knet-s3_r50-d8_deeplabv3_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth +- Name: knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 58.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.34 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.45 + mIoU(ms+flip): 44.07 + Config: configs/knet/knet-s3_r50-d8_upernet_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth +- Name: knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 64.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.57 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.84 + mIoU(ms+flip): 46.27 + Config: configs/knet/knet-s3_swin-t_upernet_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth +- Name: knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512 + In Collection: KNet + Metadata: + backbone: Swin-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 120.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.05 + mIoU(ms+flip): 53.24 + Config: configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth +- Name: knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640 + In Collection: KNet + Metadata: + backbone: Swin-L + crop size: (640,640) + lr schd: 80000 + inference time (ms/im): + - value: 120.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 13.54 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.21 + mIoU(ms+flip): 53.34 + Config: configs/knet/knet-s3_swin-l_upernet_8xb2-adamw-80k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747-8787fc71.pth diff --git a/configs/mae/README.md b/configs/mae/README.md new file mode 100644 index 0000000000..330749732e --- /dev/null +++ b/configs/mae/README.md @@ -0,0 +1,82 @@ +# MAE + +[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior. + + + +
+ +
+ +## Citation + +```bibtex +@article{he2021masked, + title={Masked autoencoders are scalable vision learners}, + author={He, Kaiming and Chen, Xinlei and Xie, Saining and Li, Yanghao and Doll{\'a}r, Piotr and Girshick, Ross}, + journal={arXiv preprint arXiv:2111.06377}, + year={2021} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`beit2mmseg.py`](../../tools/model_converters/beit2mmseg.py) in the tools directory to convert the key of MAE model from [the official repo](https://github.com/facebookresearch/mae) to MMSegmentation style. + +```shell +python tools/model_converters/beit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/beit2mmseg.py https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth pretrain/mae_pretrain_vit_base_mmcls.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models could be defined below: + +| pretrained models | original models | +| ------------------------------- | ------------------------------------------------------------------------------------------------ | +| mae_pretrain_vit_base_mmcls.pth | ['mae_pretrain_vit_base'](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth) | + +Verify the single-scale results of the model: + +```shell +sh tools/dist_test.sh \ +configs/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py \ +upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth $GPUS --eval mIoU +``` + +Since relative position embedding requires the input length and width to be equal, the sliding window is adopted for multi-scale inference. So we set min_size=512, that is, the shortest edge is 512. So the multi-scale inference of config is performed separately, instead of '--aug-test'. For multi-scale inference: + +```shell +sh tools/dist_test.sh \ +configs/mae/upernet_mae-base_fp16_512x512_160k_ade20k_ms.py \ +upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth $GPUS --eval mIoU +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ----------- | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UPerNet | ViT-B | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 9.96 | 7.14 | 48.13 | 48.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752.log.json) | diff --git a/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512-ms.py b/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512-ms.py new file mode 100644 index 0000000000..ec32fea54b --- /dev/null +++ b/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512-ms.py @@ -0,0 +1,16 @@ +_base_ = './mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py' + +test_pipeline = [ + dict(type='LoadImageFromFile'), + # TODO: Refactor 'MultiScaleFlipAug' which supports + # `min_size` feature in `Resize` class + # img_ratios is [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] + # original image scale is (2048, 512) + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +val_dataloader = dict(batch_size=1, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py b/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py new file mode 100644 index 0000000000..b8eae174e9 --- /dev/null +++ b/configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/upernet_mae.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='./pretrain/mae_pretrain_vit_base_mmcls.pth', + backbone=dict( + type='MAE', + img_size=(512, 512), + patch_size=16, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + init_values=1.0, + drop_path_rate=0.1, + out_indices=[3, 5, 7, 11]), + neck=dict(embed_dim=768, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + in_channels=[768, 768, 768, 768], num_classes=150, channels=768), + auxiliary_head=dict(in_channels=768, num_classes=150), + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341))) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65), + constructor='LayerDecayOptimizerConstructor') + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +# mixed precision +fp16 = dict(loss_scale='dynamic') + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/mae/mae.yml b/configs/mae/mae.yml new file mode 100644 index 0000000000..72b2cc7f12 --- /dev/null +++ b/configs/mae/mae.yml @@ -0,0 +1,23 @@ +Models: +- Name: mae-base_upernet_8xb2-amp-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ViT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 140.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,512) + Training Memory (GB): 9.96 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.13 + mIoU(ms+flip): 48.7 + Config: configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth diff --git a/configs/mask2former/README.md b/configs/mask2former/README.md new file mode 100644 index 0000000000..8881b0d66c --- /dev/null +++ b/configs/mask2former/README.md @@ -0,0 +1,72 @@ +# Mask2Former + +[Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Image segmentation is about grouping pixels with different semantics, e.g., category or instance membership, where each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K). + +```bibtex +@inproceedings{cheng2021mask2former, + title={Masked-attention Mask Transformer for Universal Image Segmentation}, + author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, + journal={CVPR}, + year={2022} +} +@inproceedings{cheng2021maskformer, + title={Per-Pixel Classification is Not All You Need for Semantic Segmentation}, + author={Bowen Cheng and Alexander G. Schwing and Alexander Kirillov}, + journal={NeurIPS}, + year={2021} +} +``` + +### Usage + +- Mask2Former model needs to install [MMDetection](https://github.com/open-mmlab/mmdetection) first. + +```shell +pip install "mmdet>=3.0.0rc4" +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------- | -------------- | --------- | ------- | -------: | -------------- | ----- | ------------: | -----------------------------------------------------------------------------------------------------------------------------------------------------------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Mask2Former | R-50-D32 | 512x1024 | 90000 | 5806 | 9.17 | 80.44 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024/mask2former_r50_8xb2-90k_cityscapes-512x1024_20221202_140802-2ff5ffa0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024/mask2former_r50_8xb2-90k_cityscapes-512x1024_20221202_140802.json) | +| Mask2Former | R-101-D32 | 512x1024 | 90000 | 6971 | 7.11 | 80.80 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024/mask2former_r101_8xb2-90k_cityscapes-512x1024_20221130_031628-8ad528ea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024/mask2former_r101_8xb2-90k_cityscapes-512x1024_20221130_031628.json)) | +| Mask2Former | Swin-T | 512x1024 | 90000 | 6511 | 7.18 | 81.71 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024/mask2former_swin-t_8xb2-90k_cityscapes-512x1024_20221127_144501-290b34af.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024/mask2former_swin-t_8xb2-90k_cityscapes-512x1024_20221127_144501.json)) | +| Mask2Former | Swin-S | 512x1024 | 90000 | 8282 | 5.57 | 82.57 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024/mask2former_swin-s_8xb2-90k_cityscapes-512x1024_20221127_143802-7c98854a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024/mask2former_swin-s_8xb2-90k_cityscapes-512x1024_20221127_143802.json)) | +| Mask2Former | Swin-B (in22k) | 512x1024 | 90000 | 11152 | 4.32 | 83.52 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221203_045030-59a4379a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221203_045030.json)) | +| Mask2Former | Swin-L (in22k) | 512x1024 | 90000 | 16207 | 2.86 | 83.65 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221202_141901-dc2c2ddd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221202_141901.json)) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------- | -------------- | --------- | ------- | -------: | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Mask2Former | R-50-D32 | 512x512 | 160000 | 3385 | 26.59 | 47.87 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512/mask2former_r50_8xb2-160k_ade20k-512x512_20221204_000055-4c62652d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512/mask2former_r50_8xb2-160k_ade20k-512x512_20221204_000055.json)) | +| Mask2Former | R-101-D32 | 512x512 | 160000 | 4190 | 22.97 | 48.60 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512/mask2former_r101_8xb2-160k_ade20k-512x512_20221203_233905-b1169bc0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512/mask2former_r101_8xb2-160k_ade20k-512x512_20221203_233905.json)) | +| Mask2Former | Swin-T | 512x512 | 160000 | 3826 | 23.82 | 48.66 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512/mask2former_swin-t_8xb2-160k_ade20k-512x512_20221203_234230-4341520b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512/mask2former_swin-t_8xb2-160k_ade20k-512x512_20221203_234230.json)) | +| Mask2Former | Swin-S | 512x512 | 160000 | 5034 | 19.69 | 51.24 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512/mask2former_swin-s_8xb2-160k_ade20k-512x512_20221204_143905-ab263c11.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512/mask2former_swin-s_8xb2-160k_ade20k-512x512_20221204_143905.json)) | +| Mask2Former | Swin-B | 640x640 | 160000 | 5795 | 12.48 | 52.44 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640_20221129_125118-35e3a2c7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640_20221129_125118.json)) | +| Mask2Former | Swin-B (in22k) | 640x640 | 160000 | 5795 | 12.43 | 53.90 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235230-622e093b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235230.json)) | +| Mask2Former | Swin-L (in22k) | 640x640 | 160000 | 9077 | 8.81 | 56.01 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235933-5cc76a78.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235933.json)) | + +Note: + +- All experiments of Mask2Former are implemented with 8 A100 GPUs with 2 samplers per GPU. +- As mentioned at [the official repo](https://github.com/facebookresearch/Mask2Former/issues/5), the results of Mask2Former are relatively not stable, the result of Mask2Former(swin-s) on ADE20K dataset in the table is the medium result obtained by training 5 times following the suggestion of the author. +- The ResNet backbones utilized in MaskFormer models are standard `ResNet` rather than `ResNetV1c`. +- Test time augmentation is not supported in MMSegmentation 1.x version yet, we would add "ms+flip" results as soon as possible. diff --git a/configs/mask2former/mask2former.yml b/configs/mask2former/mask2former.yml new file mode 100644 index 0000000000..78655fc52f --- /dev/null +++ b/configs/mask2former/mask2former.yml @@ -0,0 +1,290 @@ +Collections: +- Name: Mask2Former + Metadata: + Training Data: + - Usage + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/2112.01527 + Title: Masked-attention Mask Transformer for Universal Image Segmentation + README: configs/mask2former/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/3.x/mmdet/models/dense_heads/mask2former_head.py + Version: 3.x + Converted From: + Code: https://github.com/facebookresearch/Mask2Former +Models: +- Name: mask2former_r50_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 109.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5806.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.44 + Config: configs/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024/mask2former_r50_8xb2-90k_cityscapes-512x1024_20221202_140802-2ff5ffa0.pth +- Name: mask2former_r101_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: R-101-D32 + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 140.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6971.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.8 + Config: configs/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024/mask2former_r101_8xb2-90k_cityscapes-512x1024_20221130_031628-8ad528ea.pth +- Name: mask2former_swin-t_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: Swin-T + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 139.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6511.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.71 + Config: configs/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024/mask2former_swin-t_8xb2-90k_cityscapes-512x1024_20221127_144501-290b34af.pth +- Name: mask2former_swin-s_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: Swin-S + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 179.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8282.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 82.57 + Config: configs/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024/mask2former_swin-s_8xb2-90k_cityscapes-512x1024_20221127_143802-7c98854a.pth +- Name: mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: Swin-B (in22k) + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 231.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11152.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 83.52 + Config: configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221203_045030-59a4379a.pth +- Name: mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024 + In Collection: Mask2Former + Metadata: + backbone: Swin-L (in22k) + crop size: (512,1024) + lr schd: 90000 + inference time (ms/im): + - value: 349.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 16207.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 83.65 + Config: configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024_20221202_141901-dc2c2ddd.pth +- Name: mask2former_r50_8xb2-160k_ade20k-512x512 + In Collection: Mask2Former + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 37.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3385.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.87 + Config: configs/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512/mask2former_r50_8xb2-160k_ade20k-512x512_20221204_000055-4c62652d.pth +- Name: mask2former_r101_8xb2-160k_ade20k-512x512 + In Collection: Mask2Former + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 43.54 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4190.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.6 + Config: configs/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512/mask2former_r101_8xb2-160k_ade20k-512x512_20221203_233905-b1169bc0.pth +- Name: mask2former_swin-t_8xb2-160k_ade20k-512x512 + In Collection: Mask2Former + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 41.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3826.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.66 + Config: configs/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512/mask2former_swin-t_8xb2-160k_ade20k-512x512_20221203_234230-4341520b.pth +- Name: mask2former_swin-s_8xb2-160k_ade20k-512x512 + In Collection: Mask2Former + Metadata: + backbone: Swin-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 50.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5034.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 51.24 + Config: configs/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512/mask2former_swin-s_8xb2-160k_ade20k-512x512_20221204_143905-ab263c11.pth +- Name: mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640 + In Collection: Mask2Former + Metadata: + backbone: Swin-B + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 80.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 5795.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.44 + Config: configs/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640_20221129_125118-35e3a2c7.pth +- Name: mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640 + In Collection: Mask2Former + Metadata: + backbone: Swin-B (in22k) + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 80.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 5795.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.9 + Config: configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235230-622e093b.pth +- Name: mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640 + In Collection: Mask2Former + Metadata: + backbone: Swin-L (in22k) + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 113.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 9077.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 56.01 + Config: configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235933-5cc76a78.pth diff --git a/configs/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512.py b/configs/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..48f6c12d13 --- /dev/null +++ b/configs/mask2former/mask2former_r101_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,7 @@ +_base_ = ['./mask2former_r50_8xb2-160k_ade20k-512x512.py'] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..275a7dab52 --- /dev/null +++ b/configs/mask2former/mask2former_r101_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = ['./mask2former_r50_8xb2-90k_cityscapes-512x1024.py'] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512.py b/configs/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..598cabfb6d --- /dev/null +++ b/configs/mask2former/mask2former_r50_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,207 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/datasets/ade20k.py'] + +custom_imports = dict(imports='mmdet.models', allow_failed_imports=False) + +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255, + size=crop_size, + test_cfg=dict(size_divisor=32)) +num_classes = 150 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ResNet', + depth=50, + deep_stem=False, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN', requires_grad=False), + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[256, 512, 1024, 2048], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + num_transformer_feat_level=3, + align_corners=False, + pixel_decoder=dict( + type='mmdet.MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='mmdet.DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='mmdet.BaseTransformerLayer', + attn_cfgs=dict( + type='mmdet.MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', num_feats=128, + normalize=True), + transformer_decoder=dict( + type='mmdet.DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='mmdet.MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='mmdet.DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='mmdet.HungarianAssigner', + match_costs=[ + dict(type='mmdet.ClassificationCost', weight=2.0), + dict( + type='mmdet.CrossEntropyLossCost', + weight=5.0, + use_sigmoid=True), + dict( + type='mmdet.DiceCost', + weight=5.0, + pred_act=True, + eps=1.0) + ]), + sampler=dict(type='mmdet.MaskPseudoSampler'))), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# dataset config +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomChoiceResize', + scales=[int(512 * x * 0.1) for x in range(5, 21)], + resize_type='ResizeShortestEdge', + max_size=2048), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +train_dataloader = dict(batch_size=2, dataset=dict(pipeline=train_pipeline)) + +# optimizer +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +optimizer = dict( + type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999)) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi, + }, + norm_decay_mult=0.0)) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=0, + power=0.9, + begin=0, + end=160000, + by_epoch=False) +] + +# training schedule for 160k +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=160000, val_interval=5000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', by_epoch=False, interval=5000, + save_best='mIoU'), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f92dda98a6 --- /dev/null +++ b/configs/mask2former/mask2former_r50_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,206 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/datasets/cityscapes.py'] + +custom_imports = dict(imports='mmdet.models', allow_failed_imports=False) + +crop_size = (512, 1024) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255, + size=crop_size, + test_cfg=dict(size_divisor=32)) +num_classes = 19 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ResNet', + depth=50, + deep_stem=False, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN', requires_grad=False), + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[256, 512, 1024, 2048], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + num_transformer_feat_level=3, + align_corners=False, + pixel_decoder=dict( + type='mmdet.MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='mmdet.DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='mmdet.BaseTransformerLayer', + attn_cfgs=dict( + type='mmdet.MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', num_feats=128, + normalize=True), + transformer_decoder=dict( + type='mmdet.DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='mmdet.MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='mmdet.DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='mmdet.HungarianAssigner', + match_costs=[ + dict(type='mmdet.ClassificationCost', weight=2.0), + dict( + type='mmdet.CrossEntropyLossCost', + weight=5.0, + use_sigmoid=True), + dict( + type='mmdet.DiceCost', + weight=5.0, + pred_act=True, + eps=1.0) + ]), + sampler=dict(type='mmdet.MaskPseudoSampler'))), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# dataset config +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomChoiceResize', + scales=[int(1024 * x * 0.1) for x in range(5, 21)], + resize_type='ResizeShortestEdge', + max_size=4096), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# optimizer +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +optimizer = dict( + type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999)) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi, + }, + norm_decay_mult=0.0)) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=0, + power=0.9, + begin=0, + end=90000, + by_epoch=False) +] + +# training schedule for 90k +train_cfg = dict(type='IterBasedTrainLoop', max_iters=90000, val_interval=5000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', by_epoch=False, interval=5000, + save_best='mIoU'), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py b/configs/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py new file mode 100644 index 0000000000..56112dfa3e --- /dev/null +++ b/configs/mask2former/mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py @@ -0,0 +1,237 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/ade20k_640x640.py' +] + +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_20220317-55b0104a.pth' # noqa +custom_imports = dict(imports='mmdet.models', allow_failed_imports=False) + +crop_size = (640, 640) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255, + size=crop_size) +num_classes = 150 + +depths = [2, 2, 18, 2] +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=128, + depths=depths, + num_heads=[4, 8, 16, 32], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[128, 256, 512, 1024], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + num_transformer_feat_level=3, + align_corners=False, + pixel_decoder=dict( + type='mmdet.MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='mmdet.DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='mmdet.BaseTransformerLayer', + attn_cfgs=dict( + type='mmdet.MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', num_feats=128, + normalize=True), + transformer_decoder=dict( + type='mmdet.DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='mmdet.MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='mmdet.DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='mmdet.HungarianAssigner', + match_costs=[ + dict(type='mmdet.ClassificationCost', weight=2.0), + dict( + type='mmdet.CrossEntropyLossCost', + weight=5.0, + use_sigmoid=True), + dict( + type='mmdet.DiceCost', + weight=5.0, + pred_act=True, + eps=1.0) + ]), + sampler=dict(type='mmdet.MaskPseudoSampler'))), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# dataset config +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomChoiceResize', + scales=[int(x * 0.1 * 640) for x in range(5, 21)], + resize_type='ResizeShortestEdge', + max_size=2560), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +train_dataloader = dict(batch_size=2, dataset=dict(pipeline=train_pipeline)) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999)) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) + +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=0, + power=0.9, + begin=0, + end=160000, + by_epoch=False) +] + +# training schedule for 160k +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=160000, val_interval=5000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', by_epoch=False, interval=5000, + save_best='mIoU'), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py b/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py new file mode 100644 index 0000000000..f39a3c5906 --- /dev/null +++ b/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py @@ -0,0 +1,5 @@ +_base_ = ['./mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py'] + +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_22k_20220317-e5c09f74.pth' # noqa +model = dict( + backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0c229c145d --- /dev/null +++ b/configs/mask2former/mask2former_swin-b-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,42 @@ +_base_ = ['./mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_22k_20220317-e5c09f74.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=depths, + num_heads=[4, 8, 16, 32], + window_size=12, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(in_channels=[128, 256, 512, 1024])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py b/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py new file mode 100644 index 0000000000..f2657e8842 --- /dev/null +++ b/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py @@ -0,0 +1,9 @@ +_base_ = ['./mask2former_swin-b-in1k-384x384-pre_8xb2-160k_ade20k-640x640.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window12_384_22k_20220412-6580f57d.pth' # noqa + +model = dict( + backbone=dict( + embed_dims=192, + num_heads=[6, 12, 24, 48], + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_queries=100, in_channels=[192, 384, 768, 1536])) diff --git a/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..01a7b9988f --- /dev/null +++ b/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,42 @@ +_base_ = ['./mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window12_384_22k_20220412-6580f57d.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + pretrain_img_size=384, + embed_dims=192, + depths=depths, + num_heads=[6, 12, 24, 48], + window_size=12, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(in_channels=[192, 384, 768, 1536])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512.py b/configs/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..a7796d5693 --- /dev/null +++ b/configs/mask2former/mask2former_swin-s_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t_8xb2-160k_ade20k-512x512.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5f75544b1a --- /dev/null +++ b/configs/mask2former/mask2former_swin-s_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512.py b/configs/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9de3d242eb --- /dev/null +++ b/configs/mask2former/mask2former_swin-t_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,52 @@ +_base_ = ['./mask2former_r50_8xb2-160k_ade20k-512x512.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa +depths = [2, 2, 6, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(in_channels=[96, 192, 384, 768])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py b/configs/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0abda6430c --- /dev/null +++ b/configs/mask2former/mask2former_swin-t_8xb2-90k_cityscapes-512x1024.py @@ -0,0 +1,52 @@ +_base_ = ['./mask2former_r50_8xb2-90k_cityscapes-512x1024.py'] +pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa +depths = [2, 2, 6, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(in_channels=[96, 192, 384, 768])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/configs/maskformer/README.md b/configs/maskformer/README.md new file mode 100644 index 0000000000..5e33d17afb --- /dev/null +++ b/configs/maskformer/README.md @@ -0,0 +1,60 @@ +# MaskFormer + +[MaskFormer: Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models. + + + +
+ +
+ +```bibtex +@article{cheng2021per, + title={Per-pixel classification is not all you need for semantic segmentation}, + author={Cheng, Bowen and Schwing, Alex and Kirillov, Alexander}, + journal={Advances in Neural Information Processing Systems}, + volume={34}, + pages={17864--17875}, + year={2021} +} +``` + +### Usage + +- MaskFormer model needs to install [MMDetection](https://github.com/open-mmlab/mmdetection) first. + +```shell +pip install "mmdet>=3.0.0rc4" +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | --------- | --------- | ------- | -------- | -------------- | ----- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| MaskFormer | R-50-D32 | 512x512 | 160000 | 3.29 | 42.20 | 44.29 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512/maskformer_r50-d32_8xb2-160k_ade20k-512x512_20221030_182724-cbd39cc1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512/maskformer_r50-d32_8xb2-160k_ade20k-512x512_20221030_182724.json) | +| MaskFormer | R-101-D32 | 512x512 | 160000 | 4.12 | 34.90 | 45.11 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512/maskformer_r101-d32_8xb2-160k_ade20k-512x512_20221031_223053-c8e0931d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512/maskformer_r101-d32_8xb2-160k_ade20k-512x512_20221031_223053.json) | +| MaskFormer | Swin-T | 512x512 | 160000 | 3.73 | 40.53 | 46.69 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512_20221114_232813-03550716.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512_20221114_232813.json) | +| MaskFormer | Swin-S | 512x512 | 160000 | 5.33 | 26.98 | 49.36 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512_20221115_114710-5ab67e58.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512_20221115_114710.json) | + +Note: + +- All experiments of MaskFormer are implemented with 8 V100 (32G) GPUs with 2 samplers per GPU. +- The results of MaskFormer are relatively not stable. The accuracy (mIoU) of model with `R-101-D32` is from 44.7 to 46.0, and with `Swin-S` is from 49.0 to 49.8. +- The ResNet backbones utilized in MaskFormer models are standard `ResNet` rather than `ResNetV1c`. +- Test time augmentation is not supported in MMSegmentation 1.x version yet, we would add "ms+flip" results as soon as possible. diff --git a/configs/maskformer/maskformer.yml b/configs/maskformer/maskformer.yml new file mode 100644 index 0000000000..1b3d398e34 --- /dev/null +++ b/configs/maskformer/maskformer.yml @@ -0,0 +1,101 @@ +Collections: +- Name: MaskFormer + Metadata: + Training Data: + - Usage + - ADE20K + Paper: + URL: https://arxiv.org/abs/2107.06278 + Title: 'MaskFormer: Per-Pixel Classification is Not All You Need for Semantic + Segmentation' + README: configs/maskformer/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/dev-3.x/mmdet/models/dense_heads/maskformer_head.py#L21 + Version: dev-3.x + Converted From: + Code: https://github.com/facebookresearch/MaskFormer/ +Models: +- Name: maskformer_r50-d32_8xb2-160k_ade20k-512x512 + In Collection: MaskFormer + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 23.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.29 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.29 + Config: configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512/maskformer_r50-d32_8xb2-160k_ade20k-512x512_20221030_182724-cbd39cc1.pth +- Name: maskformer_r101-d32_8xb2-160k_ade20k-512x512 + In Collection: MaskFormer + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 28.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.12 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.11 + Config: configs/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512/maskformer_r101-d32_8xb2-160k_ade20k-512x512_20221031_223053-c8e0931d.pth +- Name: maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512 + In Collection: MaskFormer + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 24.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.73 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.69 + Config: configs/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512_20221114_232813-03550716.pth +- Name: maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512 + In Collection: MaskFormer + Metadata: + backbone: Swin-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 37.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.33 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.36 + Config: configs/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512_20221115_114710-5ab67e58.pth diff --git a/configs/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512.py b/configs/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..04bd37546a --- /dev/null +++ b/configs/maskformer/maskformer_r101-d32_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,7 @@ +_base_ = './maskformer_r50-d32_8xb2-160k_ade20k-512x512.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py b/configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..7d8f657221 --- /dev/null +++ b/configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,143 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +crop_size = (512, 512) +data_preprocessor = dict( + type='SegDataPreProcessor', + size=crop_size, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +# model_cfg +num_classes = 150 +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=True, + style='pytorch', + contract_dilation=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + decode_head=dict( + type='MaskFormerHead', + in_channels=[256, 512, 1024, + 2048], # input channels of pixel_decoder modules + feat_channels=256, + in_index=[0, 1, 2, 3], + num_classes=150, + out_channels=256, + num_queries=100, + pixel_decoder=dict( + type='mmdet.PixelDecoder', + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU')), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', num_feats=128, + normalize=True), + transformer_decoder=dict( + type='mmdet.DetrTransformerDecoder', + return_intermediate=True, + num_layers=6, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='mmdet.MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.1, + proj_drop=0.1, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.1, + dropout_layer=None, + add_identity=True), + # the following parameter was not used, + # just make current api happy + feedforward_channels=2048, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=20.0), + loss_dice=dict( + type='mmdet.DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=1.0), + train_cfg=dict( + assigner=dict( + type='mmdet.HungarianAssigner', + match_costs=[ + dict(type='mmdet.ClassificationCost', weight=1.0), + dict( + type='mmdet.FocalLossCost', + weight=20.0, + binary_input=True), + dict( + type='mmdet.DiceCost', + weight=1.0, + pred_act=True, + eps=1.0) + ]), + sampler=dict(type='mmdet.MaskPseudoSampler'))), + # training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole'), +) +# optimizer +optimizer = dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.0001) +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict(custom_keys={ + 'backbone': dict(lr_mult=0.1), + })) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=0, + power=0.9, + begin=0, + end=160000, + by_epoch=False) +] + +# In MaskFormer implementation we use batch size 2 per GPU as default +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512.py b/configs/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..2cbc038ac2 --- /dev/null +++ b/configs/maskformer/maskformer_swin-s_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,79 @@ +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa +_base_ = './maskformer_r50-d32_8xb2-160k_ade20k-512x512.py' +backbone_norm_cfg = dict(type='LN', requires_grad=True) +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=224, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=depths, + num_heads=[3, 6, 12, 24], + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=backbone_norm_cfg, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file)), + decode_head=dict( + type='MaskFormerHead', + in_channels=[96, 192, 384, + 768], # input channels of pixel_decoder modules + )) + +# optimizer +optimizer = dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01) +# set all layers in backbone to lr_mult=1.0 +# set all norm layers, position_embeding, +# query_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=1.0, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +embed_multi = dict(decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict(custom_keys=custom_keys)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] diff --git a/configs/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512.py b/configs/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..aa242dbe31 --- /dev/null +++ b/configs/maskformer/maskformer_swin-t_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,81 @@ +_base_ = './maskformer_r50-d32_8xb2-160k_ade20k-512x512.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa +backbone_norm_cfg = dict(type='LN', requires_grad=True) +depths = [2, 2, 6, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=224, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=depths, + num_heads=[3, 6, 12, 24], + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=backbone_norm_cfg, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file)), + decode_head=dict( + type='MaskFormerHead', + in_channels=[96, 192, 384, + 768], # input channels of pixel_decoder modules + )) + +# optimizer +optimizer = dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01) + +# set all layers in backbone to lr_mult=1.0 +# set all norm layers, position_embeding, +# query_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=1.0, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +embed_multi = dict(decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=0.01, norm_type=2), + paramwise_cfg=dict(custom_keys=custom_keys)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] diff --git a/configs/mobilenet_v2/README.md b/configs/mobilenet_v2/README.md new file mode 100644 index 0000000000..c1010044a9 --- /dev/null +++ b/configs/mobilenet_v2/README.md @@ -0,0 +1,56 @@ +# MobileNetV2 + +[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. +The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | M-V2-D8 | 512x1024 | 80000 | 3.4 | 14.2 | 61.54 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-d24c28c1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) | +| PSPNet | M-V2-D8 | 512x1024 | 80000 | 3.6 | 11.2 | 70.23 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) | +| DeepLabV3 | M-V2-D8 | 512x1024 | 80000 | 3.9 | 8.4 | 73.84 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | +| DeepLabV3+ | M-V2-D8 | 512x1024 | 80000 | 5.1 | 8.4 | 75.20 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | M-V2-D8 | 512x512 | 160000 | 6.5 | 64.4 | 19.71 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | +| PSPNet | M-V2-D8 | 512x512 | 160000 | 6.5 | 57.7 | 29.68 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | +| DeepLabV3 | M-V2-D8 | 512x512 | 160000 | 6.8 | 39.9 | 34.08 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) | +| DeepLabV3+ | M-V2-D8 | 512x512 | 160000 | 8.2 | 43.1 | 34.02 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) | diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..ece9b0bf8f --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,13 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512.py b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..86eec0d948 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..195046edc4 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,15 @@ +_base_ = [ + '../deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py' +] +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320, c1_in_channels=24), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..d4f669f163 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320, c1_in_channels=24), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024.py b/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..0829f438a7 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,13 @@ +_base_ = '../fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512.py b/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..015fa6f201 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = '../fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py b/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..8542e02669 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,13 @@ +_base_ = '../pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512.py b/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..73db59beae --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = '../pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6), + norm_cfg=dict(type='SyncBN', requires_grad=True)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/configs/mobilenet_v2/mobilenet_v2.yml b/configs/mobilenet_v2/mobilenet_v2.yml new file mode 100644 index 0000000000..69d73d568a --- /dev/null +++ b/configs/mobilenet_v2/mobilenet_v2.yml @@ -0,0 +1,169 @@ +Models: +- Name: mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 70.42 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 61.54 + Config: configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-d24c28c1.pth +- Name: mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.23 + Config: configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth +- Name: mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.84 + Config: configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth +- Name: mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.2 + Config: configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth +- Name: mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 15.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 19.71 + Config: configs/mobilenet_v2/mobilenet-v2-d8_fcn_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth +- Name: mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 17.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 29.68 + Config: configs/mobilenet_v2/mobilenet-v2-d8_pspnet_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth +- Name: mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 25.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 34.08 + Config: configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth +- Name: mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 23.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 34.02 + Config: configs/mobilenet_v2/mobilenet-v2-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth diff --git a/configs/mobilenet_v3/README.md b/configs/mobilenet_v3/README.md new file mode 100644 index 0000000000..c2fed06ccd --- /dev/null +++ b/configs/mobilenet_v3/README.md @@ -0,0 +1,50 @@ +# MobileNetV3 + +[Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) + +## Introduction + + + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{Howard_2019_ICCV, + title={Searching for MobileNetV3}, + author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, + booktitle={The IEEE International Conference on Computer Vision (ICCV)}, + pages={1314-1324}, + month={October}, + year={2019}, + doi={10.1109/ICCV.2019.00140}} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| LRASPP | M-V3-D8 | 512x1024 | 320000 | 8.9 | 15.22 | 69.54 | 70.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v3/mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes-20201224_220337.log.json) | +| LRASPP | M-V3-D8 (scratch) | 512x1024 | 320000 | 8.9 | 14.77 | 67.87 | 69.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v3/mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes-20201224_220337.log.json) | +| LRASPP | M-V3s-D8 | 512x1024 | 320000 | 5.3 | 23.64 | 64.11 | 66.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v3/mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes-20201224_223935.log.json) | +| LRASPP | M-V3s-D8 (scratch) | 512x1024 | 320000 | 5.3 | 24.50 | 62.74 | 65.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/mobilenet_v3/mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes-20201224_223935.log.json) | diff --git a/configs/mobilenet_v3/mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024.py b/configs/mobilenet_v3/mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bc6322fe40 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024.py @@ -0,0 +1,23 @@ +_base_ = './mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://contrib/mobilenet_v3_small', + backbone=dict( + type='MobileNetV3', + arch='small', + out_indices=(0, 1, 12), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 16, 576), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/configs/mobilenet_v3/mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024.py b/configs/mobilenet_v3/mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7260936e60 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024.py @@ -0,0 +1,22 @@ +_base_ = './mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='MobileNetV3', + arch='small', + out_indices=(0, 1, 12), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 16, 576), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/configs/mobilenet_v3/mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py b/configs/mobilenet_v3/mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py new file mode 100644 index 0000000000..8dcbc3395f --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +# Re-config the data sampler. +model = dict(data_preprocessor=data_preprocessor) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader + +runner = dict(type='IterBasedRunner', max_iters=320000) diff --git a/configs/mobilenet_v3/mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py b/configs/mobilenet_v3/mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py new file mode 100644 index 0000000000..cd84265f32 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://contrib/mobilenet_v3_large') + +# Re-config the data sampler. +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader + +runner = dict(type='IterBasedRunner', max_iters=320000) diff --git a/configs/mobilenet_v3/mobilenet_v3.yml b/configs/mobilenet_v3/mobilenet_v3.yml new file mode 100644 index 0000000000..067a150cea --- /dev/null +++ b/configs/mobilenet_v3/mobilenet_v3.yml @@ -0,0 +1,103 @@ +Collections: +- Name: LRASPP + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1905.02244 + Title: Searching for MobileNetV3 + README: configs/mobilenet_v3/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mobilenet_v3.py#L15 + Version: v0.17.0 + Converted From: + Code: https://github.com/tensorflow/models/tree/master/research/deeplab +Models: +- Name: mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024 + In Collection: LRASPP + Metadata: + backbone: M-V3-D8 + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 65.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.54 + mIoU(ms+flip): 70.89 + Config: configs/mobilenet_v3/mobilenet-v3-d8_lraspp_4xb4-320k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth +- Name: mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024 + In Collection: LRASPP + Metadata: + backbone: M-V3-D8 (scratch) + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 67.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 67.87 + mIoU(ms+flip): 69.78 + Config: configs/mobilenet_v3/mobilenet-v3-d8-scratch_lraspp_4xb4-320k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth +- Name: mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024 + In Collection: LRASPP + Metadata: + backbone: M-V3s-D8 + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 42.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 64.11 + mIoU(ms+flip): 66.42 + Config: configs/mobilenet_v3/mobilenet-v3-d8-s_lraspp_4xb4-320k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth +- Name: mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024 + In Collection: LRASPP + Metadata: + backbone: M-V3s-D8 (scratch) + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 40.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 62.74 + mIoU(ms+flip): 65.01 + Config: configs/mobilenet_v3/mobilenet-v3-d8-scratch-s_lraspp_4xb4-320k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth diff --git a/configs/nonlocal_net/README.md b/configs/nonlocal_net/README.md index dbd924dfe8..80d45ab589 100644 --- a/configs/nonlocal_net/README.md +++ b/configs/nonlocal_net/README.md @@ -1,7 +1,30 @@ -# Non-local Neural Networks +# NonLocal Net + +[Non-local Neural Networks](https://arxiv.org/abs/1711.07971) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Both convolutional and recurrent operations are building blocks that process one local neighborhood at a time. In this paper, we present non-local operations as a generic family of building blocks for capturing long-range dependencies. Inspired by the classical non-local means method in computer vision, our non-local operation computes the response at a position as a weighted sum of the features at all positions. This building block can be plugged into many computer vision architectures. On the task of video classification, even without any bells and whistles, our non-local models can compete or outperform current competition winners on both Kinetics and Charades datasets. In static image recognition, our non-local models improve object detection/segmentation and pose estimation on the COCO suite of tasks. Code is available at [this https URL](https://github.com/facebookresearch/video-nonlocal-net). + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{wang2018non, title={Non-local neural networks}, author={Wang, Xiaolong and Girshick, Ross and Gupta, Abhinav and He, Kaiming}, @@ -14,29 +37,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|----------|----------|-----------|--------:|----------|----------------|------:|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| NonLocal | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.72 | 78.24 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | -| NonLocal | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.95 | 78.66 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | -| NonLocal | R-50-D8 | 769x769 | 40000 | 8.9 | 1.52 | 78.33 | 79.92 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json) | -| NonLocal | R-101-D8 | 769x769 | 40000 | 12.8 | 1.05 | 78.57 | 80.29 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json) | -| NonLocal | R-50-D8 | 512x1024 | 80000 | - | - | 78.01 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json) | -| NonLocal | R-101-D8 | 512x1024 | 80000 | - | - | 78.93 | - | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json) | -| NonLocal | R-50-D8 | 769x769 | 80000 | - | - | 79.05 | 80.68 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json) | -| NonLocal | R-101-D8 | 769x769 | 80000 | - | - | 79.40 | 80.85 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| NonLocalNet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.72 | 78.24 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | +| NonLocalNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.95 | 78.66 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | +| NonLocalNet | R-50-D8 | 769x769 | 40000 | 8.9 | 1.52 | 78.33 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json) | +| NonLocalNet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.05 | 78.57 | 80.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json) | +| NonLocalNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.01 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json) | +| NonLocalNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.93 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json) | +| NonLocalNet | R-50-D8 | 769x769 | 80000 | - | - | 79.05 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json) | +| NonLocalNet | R-101-D8 | 769x769 | 80000 | - | - | 79.40 | 80.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|----------|----------|-----------|--------:|----------|----------------|------:|--------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| NonLocal | R-50-D8 | 512x512 | 80000 | 9.1 | 21.37 | 40.75 | 42.05 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json) | -| NonLocal | R-101-D8 | 512x512 | 80000 | 12.6 | 13.97 | 42.90 | 44.27 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json) | -| NonLocal | R-50-D8 | 512x512 | 160000 | - | - | 42.03 | 43.04 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json) | -| NonLocal | R-101-D8 | 512x512 | 160000 | - | - | 43.36 | 44.83 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422-affd0f8d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| NonLocalNet | R-50-D8 | 512x512 | 80000 | 9.1 | 21.37 | 40.75 | 42.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 80000 | 12.6 | 13.97 | 42.90 | 44.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json) | +| NonLocalNet | R-50-D8 | 512x512 | 160000 | - | - | 42.03 | 43.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 160000 | - | - | 44.63 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|----------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| NonLocal | R-50-D8 | 512x512 | 20000 | 6.4 | 21.21 | 76.20 | 77.12 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json) | -| NonLocal | R-101-D8 | 512x512 | 20000 | 9.8 | 14.01 | 78.15 | 78.86 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json) | -| NonLocal | R-50-D8 | 512x512 | 40000 | - | - | 76.65 | 77.47 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json) | -| NonLocal | R-101-D8 | 512x512 | 40000 | - | - | 78.27 | 79.12 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| NonLocalNet | R-50-D8 | 512x512 | 20000 | 6.4 | 21.21 | 76.20 | 77.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 20000 | 9.8 | 14.01 | 78.15 | 78.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json) | +| NonLocalNet | R-50-D8 | 512x512 | 40000 | - | - | 76.65 | 77.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 40000 | - | - | 78.27 | 79.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/nonlocal_net/nonlocal_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json) | diff --git a/configs/nonlocal_net/nonlocal_net.yml b/configs/nonlocal_net/nonlocal_net.yml new file mode 100644 index 0000000000..22f32c5abb --- /dev/null +++ b/configs/nonlocal_net/nonlocal_net.yml @@ -0,0 +1,301 @@ +Collections: +- Name: NonLocalNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1711.07971 + Title: Non-local Neural Networks + README: configs/nonlocal_net/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/nl_head.py#L10 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/video-nonlocal-net +Models: +- Name: nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 367.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.24 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth +- Name: nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 512.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.66 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth +- Name: nonlocal_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 657.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.33 + mIoU(ms+flip): 79.92 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth +- Name: nonlocal_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 952.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 80.29 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth +- Name: nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.01 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth +- Name: nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.93 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth +- Name: nonlocal_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.05 + mIoU(ms+flip): 80.68 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth +- Name: nonlocal_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.4 + mIoU(ms+flip): 80.85 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth +- Name: nonlocal_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 46.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.75 + mIoU(ms+flip): 42.05 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth +- Name: nonlocal_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 71.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.9 + mIoU(ms+flip): 44.27 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth +- Name: nonlocal_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.03 + mIoU(ms+flip): 43.04 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth +- Name: nonlocal_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.63 + mIoU(ms+flip): 45.79 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth +- Name: nonlocal_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.15 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.2 + mIoU(ms+flip): 77.12 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth +- Name: nonlocal_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 71.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.15 + mIoU(ms+flip): 78.86 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth +- Name: nonlocal_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.65 + mIoU(ms+flip): 77.47 + Config: configs/nonlocal_net/nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth +- Name: nonlocal_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.27 + mIoU(ms+flip): 79.12 + Config: configs/nonlocal_net/nonlocal_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5fcf7bcb16 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..ee984c2bbd --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..aca80d676a --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..8a7aeea7f6 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..0cdb3caaf3 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..a7cacea517 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..ec475443e8 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..ca79f6fdc0 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py b/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index ef7b06dd38..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py b/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 7a1e66cf1c..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py b/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index df9c2aca9c..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py b/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 490f9873a2..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py b/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 40d9190fba..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py b/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 0c6f60dac7..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py b/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 23e6da7f23..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py b/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 0627e2b5a7..0000000000 --- a/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f4d5fd22f9 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..17423f2658 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7cc752c2c7 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..f855a814e5 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..848be4a233 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..cd840a03d1 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..0efb9d0969 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..52783bcc98 --- /dev/null +++ b/configs/nonlocal_net/nonlocal_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py b/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 9d4dc73903..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py b/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index b0672b687a..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py b/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index b1adfbab88..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py b/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 2e808d8072..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py b/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 66b443abec..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py b/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 8a7a2f509b..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py b/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 3f0d47238f..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py b/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 5d448c730a..0000000000 --- a/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/ocrnet/README.md b/configs/ocrnet/README.md index fe9e05aaac..5cbfbabfce 100644 --- a/configs/ocrnet/README.md +++ b/configs/ocrnet/README.md @@ -1,46 +1,89 @@ -# Object-Contextual Representations for Semantic Segmentation +# OCRNet + +[Object-Contextual Representations for Semantic Segmentation](https://arxiv.org/abs/1909.11065) ## Introduction -``` -@article{yuan2019ocr, + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we address the problem of semantic segmentation and focus on the context aggregation strategy for robust segmentation. Our motivation is that the label of a pixel is the category of the object that the pixel belongs to. We present a simple yet effective approach, object-contextual representations, characterizing a pixel by exploiting the representation of the corresponding object class. First, we construct object regions based on a feature map supervised by the ground-truth segmentation, and then compute the object region representations. Second, we compute the representation similarity between each pixel and each object region, and augment the representation of each pixel with an object contextual representation, which is a weighted aggregation of all the object region representations according to their similarities with the pixel. We empirically demonstrate that the proposed approach achieves competitive performance on six challenging semantic segmentation benchmarks: Cityscapes, ADE20K, LIP, PASCAL VOC 2012, PASCAL-Context and COCO-Stuff. Notably, we achieved the \\nth{2} place on the Cityscapes leader-board with a single model. + + + +
+ +
+ +## Citation + +```bibtex +@article{YuanW18, + title={Ocnet: Object context network for scene parsing}, + author={Yuhui Yuan and Jingdong Wang}, + booktitle={arXiv preprint arXiv:1809.00916}, + year={2018} +} + +@article{YuanCW20, title={Object-Contextual Representations for Semantic Segmentation}, - author={Yuan Yuhui and Chen Xilin and Wang Jingdong}, - journal={arXiv preprint arXiv:1909.11065}, - year={2019} + author={Yuhui Yuan and Xilin Chen and Jingdong Wang}, + booktitle={ECCV}, + year={2020} } ``` ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| OCRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 3.5 | 10.45 | 74.30 | 75.95 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json) | -| OCRNet | HRNetV2p-W18 | 512x1024 | 40000 | 4.7 | 7.50 | 77.72 | 79.49 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json) | -| OCRNet | HRNetV2p-W48 | 512x1024 | 40000 | 8 | 4.22 | 80.58 | 81.79 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336.log.json) | -| OCRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 77.16 | 78.66 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json) | -| OCRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.57 | 80.46 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json) | -| OCRNet | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 80.70 | 81.87 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752.log.json) | -| OCRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 78.45 | 79.97 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json) | -| OCRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 79.47 | 80.91 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json) | -| OCRNet | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 81.35 | 82.70 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037.log.json) | + +#### HRNet backbone + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 3.5 | 10.45 | 74.30 | 75.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 40000 | 4.7 | 7.50 | 77.72 | 79.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 40000 | 8 | 4.22 | 80.58 | 81.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb2-40k_cityscapes-512x1024.pyy) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 77.16 | 78.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.57 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 80.70 | 81.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 78.45 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 79.47 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 81.35 | 82.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037.log.json) | + +#### ResNet backbone + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| OCRNet | R-101-D8 | 512x1024 | 8 | 40000 | - | - | 80.09 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721.log.json) | +| OCRNet | R-101-D8 | 512x1024 | 16 | 40000 | 8.8 | 3.02 | 80.30 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726.log.json) | +| OCRNet | R-101-D8 | 512x1024 | 16 | 80000 | 8.8 | 3.02 | 80.81 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| OCRNet | HRNetV2p-W18-Small | 512x512 | 80000 | 6.7 | 28.98 | 35.06 | 35.80 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600.log.json) | -| OCRNet | HRNetV2p-W18 | 512x512 | 80000 | 7.9 | 18.93 | 37.79 | 39.16 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157.log.json) | -| OCRNet | HRNetV2p-W48 | 512x512 | 80000 | 11.2 | 16.99 | 43.00 | 44.30 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518.log.json) | -| OCRNet | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 37.19 | 38.40 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505.log.json) | -| OCRNet | HRNetV2p-W18 | 512x512 | 160000 | - | - | 39.32 | 40.80 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940.log.json) | -| OCRNet | HRNetV2p-W48 | 512x512 | 160000 | - | - | 43.25 | 44.88 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 80000 | 6.7 | 28.98 | 35.06 | 35.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 80000 | 7.9 | 18.93 | 37.79 | 39.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 80000 | 11.2 | 16.99 | 43.00 | 44.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 37.19 | 38.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 160000 | - | - | 39.32 | 40.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 160000 | - | - | 43.25 | 44.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|--------------------|-----------|--------:|----------|----------------|------:|--------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| OCRNet | HRNetV2p-W18-Small | 512x512 | 20000 | 3.5 | 31.55 | 71.70 | 73.84 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913.log.json) | -| OCRNet | HRNetV2p-W18 | 512x512 | 20000 | 4.7 | 19.91 | 74.75 | 77.11 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932.log.json) | -| OCRNet | HRNetV2p-W48 | 512x512 | 20000 | 8.1 | 17.83 | 77.72 | 79.87 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932.log.json) | -| OCRNet | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 72.76 | 74.60 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025.log.json) | -| OCRNet | HRNetV2p-W18 | 512x512 | 40000 | - | - | 74.98 | 77.40 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958.log.json) | -| OCRNet | HRNetV2p-W48 | 512x512 | 40000 | - | - | 77.14 | 79.71 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 20000 | 3.5 | 31.55 | 71.70 | 73.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 20000 | 4.7 | 19.91 | 74.75 | 77.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 20000 | 8.1 | 17.83 | 77.72 | 79.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 72.76 | 74.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18s_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 40000 | - | - | 74.98 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr18_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 40000 | - | - | 77.14 | 79.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/ocrnet/ocrnet_hr48_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958.log.json) | diff --git a/configs/ocrnet/ocrnet.yml b/configs/ocrnet/ocrnet.yml new file mode 100644 index 0000000000..a81aec2c75 --- /dev/null +++ b/configs/ocrnet/ocrnet.yml @@ -0,0 +1,438 @@ +Collections: +- Name: OCRNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1909.11065 + Title: Object-Contextual Representations for Semantic Segmentation + README: configs/ocrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ocr_head.py#L86 + Version: v0.17.0 + Converted From: + Code: https://github.com/openseg-group/OCNet.pytorch +Models: +- Name: ocrnet_hr18s_4xb2-40k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 95.69 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.3 + mIoU(ms+flip): 75.95 + Config: configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth +- Name: ocrnet_hr18_4xb2-40k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 133.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.49 + Config: configs/ocrnet/ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth +- Name: ocrnet_hr48_4xb2-40k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 236.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.58 + mIoU(ms+flip): 81.79 + Config: configs/ocrnet/ocrnet_hr48_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth +- Name: ocrnet_hr18s_4xb2-80k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.16 + mIoU(ms+flip): 78.66 + Config: configs/ocrnet/ocrnet_hr18s_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth +- Name: ocrnet_hr18_4xb2-80k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 80.46 + Config: configs/ocrnet/ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth +- Name: ocrnet_hr48_4xb2-80k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.7 + mIoU(ms+flip): 81.87 + Config: configs/ocrnet/ocrnet_hr48_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth +- Name: ocrnet_hr18s_4xb2-160k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.45 + mIoU(ms+flip): 79.97 + Config: configs/ocrnet/ocrnet_hr18s_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth +- Name: ocrnet_hr18_4xb2-160k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.47 + mIoU(ms+flip): 80.91 + Config: configs/ocrnet/ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth +- Name: ocrnet_hr48_4xb2-160k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.35 + mIoU(ms+flip): 82.7 + Config: configs/ocrnet/ocrnet_hr48_4xb2-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth +- Name: ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.09 + Config: configs/ocrnet/ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth +- Name: ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 331.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.3 + Config: configs/ocrnet/ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth +- Name: ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024 + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 331.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.81 + Config: configs/ocrnet/ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth +- Name: ocrnet_hr18s_4xb4-80k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 34.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.7 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 35.06 + mIoU(ms+flip): 35.8 + Config: configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth +- Name: ocrnet_hr18_4xb4-80k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 52.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.79 + mIoU(ms+flip): 39.16 + Config: configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth +- Name: ocrnet_hr48_4xb4-80k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 58.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.0 + mIoU(ms+flip): 44.3 + Config: configs/ocrnet/ocrnet_hr48_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth +- Name: ocrnet_hr18s_4xb4-80k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.19 + mIoU(ms+flip): 38.4 + Config: configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth +- Name: ocrnet_hr18_4xb4-80k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.32 + mIoU(ms+flip): 40.8 + Config: configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth +- Name: ocrnet_hr48_4xb4-160k_ade20k-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.25 + mIoU(ms+flip): 44.88 + Config: configs/ocrnet/ocrnet_hr48_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth +- Name: ocrnet_hr18s_4xb4-20k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 31.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 71.7 + mIoU(ms+flip): 73.84 + Config: configs/ocrnet/ocrnet_hr18s_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth +- Name: ocrnet_hr18_4xb4-20k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 50.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.7 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.75 + mIoU(ms+flip): 77.11 + Config: configs/ocrnet/ocrnet_hr18_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth +- Name: ocrnet_hr48_4xb4-20k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 56.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.87 + Config: configs/ocrnet/ocrnet_hr48_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth +- Name: ocrnet_hr18s_4xb4-40k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.76 + mIoU(ms+flip): 74.6 + Config: configs/ocrnet/ocrnet_hr18s_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth +- Name: ocrnet_hr18_4xb4-40k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.98 + mIoU(ms+flip): 77.4 + Config: configs/ocrnet/ocrnet_hr18_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth +- Name: ocrnet_hr48_4xb4-40k_voc12aug-512x512 + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.14 + mIoU(ms+flip): 79.71 + Config: configs/ocrnet/ocrnet_hr48_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth diff --git a/configs/ocrnet/ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..659217cf69 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ocrnet/ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..d401c4b1e7 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ocrnet/ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..44426a28e1 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/ocrnet/ocrnet_hr18_4xb4-160k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr18_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..353005b57b --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) diff --git a/configs/ocrnet/ocrnet_hr18_4xb4-20k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr18_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..c696c21e96 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) diff --git a/configs/ocrnet/ocrnet_hr18_4xb4-40k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr18_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..c6b69ea632 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) diff --git a/configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..ceca8df696 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ]) diff --git a/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py b/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py deleted file mode 100644 index 1c86eba17c..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] diff --git a/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py b/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py deleted file mode 100644 index 2c73b3839c..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py b/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py deleted file mode 100644 index 506ad9319a..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py b/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py deleted file mode 100644 index fe5d20ffb0..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict(decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - drop_out_ratio=-1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -]) diff --git a/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py b/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py deleted file mode 100644 index 71e70dcec1..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py +++ /dev/null @@ -1,36 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict(decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=21, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - drop_out_ratio=-1, - num_classes=21, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -]) diff --git a/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py b/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py deleted file mode 100644 index b3fd747211..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py +++ /dev/null @@ -1,36 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict(decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=21, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - drop_out_ratio=-1, - num_classes=21, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -]) diff --git a/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py b/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py deleted file mode 100644 index e41eaf8ac5..0000000000 --- a/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict(decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - drop_out_ratio=-1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - drop_out_ratio=-1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -]) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb2-160k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18s_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..c5388fb751 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..2335f3b762 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb2-80k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr18s_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b2d1a8fa84 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb4-160k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr18s_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..fabf5826cd --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb4-20k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr18s_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..0eca655cfc --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb4-20k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb4-40k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr18s_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..13b02b9df6 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb4-40k_voc12aug-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..60c79c2dc5 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr18s_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_4xb4-80k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py b/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py deleted file mode 100644 index fc7909785f..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_160k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py b/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py deleted file mode 100644 index 923731f74f..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py b/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py deleted file mode 100644 index be6bf16a2f..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_80k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py b/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py deleted file mode 100644 index 81f3d5cb91..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_160k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py b/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py deleted file mode 100644 index ceb944815b..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_20k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py b/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py deleted file mode 100644 index 70babc91c9..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_40k_voc12aug.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py b/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py deleted file mode 100644 index 36e77219ac..0000000000 --- a/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_80k_ade20k.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/configs/ocrnet/ocrnet_hr48_4xb2-160k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr48_4xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..184d38dd2c --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb2-160k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb2-40k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr48_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7025ee9e77 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb2-40k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb2-80k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_hr48_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..9c68a15fc5 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb2-80k_cityscapes-512x1024.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb4-160k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr48_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..e74976c805 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb4-160k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb4-20k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr48_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..f015b920e1 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb4-20k_voc12aug-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb4-40k_voc12aug-512x512.py b/configs/ocrnet/ocrnet_hr48_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..baafa380d4 --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb4-40k_voc12aug-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_4xb4-80k_ade20k-512x512.py b/configs/ocrnet/ocrnet_hr48_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..85514b9d7e --- /dev/null +++ b/configs/ocrnet/ocrnet_hr48_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_4xb4-80k_ade20k-512x512.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py b/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py deleted file mode 100644 index 70c1ce5b5b..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_160k_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py b/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py deleted file mode 100644 index cd777e89bf..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py b/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py deleted file mode 100644 index 6ed60096a1..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_80k_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py b/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py deleted file mode 100644 index f6cd20e642..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_160k_ade20k.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py b/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py deleted file mode 100644 index 3149cfc371..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_20k_voc12aug.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py b/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py deleted file mode 100644 index f97260039b..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_40k_voc12aug.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py b/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py deleted file mode 100644 index 94dbe90298..0000000000 --- a/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_80k_ade20k.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - drop_out_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - drop_out_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/configs/ocrnet/ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a94597bf35 --- /dev/null +++ b/configs/ocrnet/ocrnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet101_v1c', + backbone=dict(depth=101)) diff --git a/configs/ocrnet/ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..88e5ad08fd --- /dev/null +++ b/configs/ocrnet/ocrnet_r101-d8_8xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet101_v1c', + backbone=dict(depth=101)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +param_scheduler = [ + dict( + type='PolyLR', + eta_min=2e-4, + power=0.9, + begin=0, + end=40000, + by_epoch=False) +] diff --git a/configs/ocrnet/ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024.py b/configs/ocrnet/ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a3b420909c --- /dev/null +++ b/configs/ocrnet/ocrnet_r101-d8_8xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet101_v1c', + backbone=dict(depth=101)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +param_scheduler = [ + dict( + type='PolyLR', + eta_min=2e-4, + power=0.9, + begin=0, + end=40000, + by_epoch=False) +] diff --git a/configs/point_rend/README.md b/configs/point_rend/README.md new file mode 100644 index 0000000000..2690e7b9e6 --- /dev/null +++ b/configs/point_rend/README.md @@ -0,0 +1,51 @@ +# PointRend + +[PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. Code has been made available at [this https URL](https://github.com/facebookresearch/detectron2/tree/main/projects/PointRend). + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{kirillov2020pointrend, + title={Pointrend: Image segmentation as rendering}, + author={Kirillov, Alexander and Wu, Yuxin and He, Kaiming and Girshick, Ross}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={9799--9808}, + year={2020} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PointRend | R-50 | 512x1024 | 80000 | 3.1 | 8.48 | 76.47 | 78.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes_20200711_015821-bb1ff523.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes-20200715_214714.log.json) | +| PointRend | R-101 | 512x1024 | 80000 | 4.2 | 7.00 | 78.30 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/point_rend/pointrend_r101_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes_20200711_170850-d0ca84be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes-20200715_214824.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PointRend | R-50 | 512x512 | 160000 | 5.1 | 17.31 | 37.64 | 39.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/point_rend/pointrend_r50_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k_20200807_232644-ac3febf2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k-20200807_232644.log.json) | +| PointRend | R-101 | 512x512 | 160000 | 6.1 | 15.50 | 40.02 | 41.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/point_rend/pointrend_r101_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k_20200808_030852-8834902a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k-20200808_030852.log.json) | diff --git a/configs/point_rend/point_rend.yml b/configs/point_rend/point_rend.yml new file mode 100644 index 0000000000..a4539081f3 --- /dev/null +++ b/configs/point_rend/point_rend.yml @@ -0,0 +1,104 @@ +Collections: +- Name: PointRend + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1912.08193 + Title: 'PointRend: Image Segmentation as Rendering' + README: configs/point_rend/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/point_head.py#L36 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend +Models: +- Name: pointrend_r50_4xb2-80k_cityscapes-512x1024 + In Collection: PointRend + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 117.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.47 + mIoU(ms+flip): 78.13 + Config: configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes_20200711_015821-bb1ff523.pth +- Name: pointrend_r101_4xb2-80k_cityscapes-512x1024 + In Collection: PointRend + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 142.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.3 + mIoU(ms+flip): 79.97 + Config: configs/point_rend/pointrend_r101_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes_20200711_170850-d0ca84be.pth +- Name: pointrend_r50_4xb4-160k_ade20k-512x512 + In Collection: PointRend + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 57.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.64 + mIoU(ms+flip): 39.17 + Config: configs/point_rend/pointrend_r50_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k_20200807_232644-ac3febf2.pth +- Name: pointrend_r101_4xb4-160k_ade20k-512x512 + In Collection: PointRend + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 64.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.02 + mIoU(ms+flip): 41.6 + Config: configs/point_rend/pointrend_r101_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k_20200808_030852-8834902a.pth diff --git a/configs/point_rend/pointrend_r101_4xb2-80k_cityscapes-512x1024.py b/configs/point_rend/pointrend_r101_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..ca2a19a196 --- /dev/null +++ b/configs/point_rend/pointrend_r101_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './pointrend_r50_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/point_rend/pointrend_r101_4xb4-160k_ade20k-512x512.py b/configs/point_rend/pointrend_r101_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..6729d3b672 --- /dev/null +++ b/configs/point_rend/pointrend_r101_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pointrend_r50_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py b/configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..fb005d8bee --- /dev/null +++ b/configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=200), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=200, + end=80000, + by_epoch=False, + ) +] diff --git a/configs/point_rend/pointrend_r50_4xb4-160k_ade20k-512x512.py b/configs/point_rend/pointrend_r50_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..d350fa686b --- /dev/null +++ b/configs/point_rend/pointrend_r50_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/models/pointrend_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=[ + dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='PointHead', + in_channels=[256], + in_index=[0], + channels=256, + num_fcs=3, + coarse_pred_each_layer=True, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=200), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=200, + end=160000, + by_epoch=False, + ) +] diff --git a/configs/poolformer/README.md b/configs/poolformer/README.md new file mode 100644 index 0000000000..3bdd2ba3f1 --- /dev/null +++ b/configs/poolformer/README.md @@ -0,0 +1,65 @@ +# PoolFormer + +[MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Transformers have shown great potential in computer vision tasks. A common belief is their attention-based token mixer module contributes most to their competence. However, recent works show the attention-based module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. Based on this observation, we hypothesize that the general architecture of the transformers, instead of the specific token mixer module, is more essential to the model's performance. To verify this, we deliberately replace the attention module in transformers with an embarrassingly simple spatial pooling operator to conduct only the most basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer achieves 82.1% top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 48%/60% fewer MACs. The effectiveness of PoolFormer verifies our hypothesis and urges us to initiate the concept of "MetaFormer", a general architecture abstracted from transformers without specifying the token mixer. Based on the extensive experiments, we argue that MetaFormer is the key player in achieving superior results for recent transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer could serve as a starting baseline for future MetaFormer architecture design. Code is available at [this https URL](https://github.com/sail-sg/poolformer) + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{yu2022metaformer, + title={Metaformer is actually what you need for vision}, + author={Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10819--10829}, + year={2022} +} +``` + +### Usage + +- PoolFormer backbone needs to install [MMClassification](https://github.com/open-mmlab/mmclassification) first, which has abundant backbones for downstream tasks. + +```shell +pip install "mmcls>=1.0.0rc0" +``` + +- The pretrained models could also be downloaded from [PoolFormer config of MMClassification](https://github.com/open-mmlab/mmclassification/tree/master/configs/poolformer). + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | mIoU\* | mIoU\*(ms+flip) | config | download | +| ------ | -------------- | --------- | ----------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------ | --------------: | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FPN | PoolFormer-S12 | 512x512 | ImageNet-1K | 32 | 40000 | 4.17 | 23.48 | 36.68 | - | 37.07 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/poolformer/fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s12_8x4_512x512_40k_ade20k/fpn_poolformer_s12_8x4_512x512_40k_ade20k_20220501_115154-b5aa2f49.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s12_8x4_512x512_40k_ade20k/fpn_poolformer_s12_8x4_512x512_40k_ade20k_20220501_115154.log.json) | +| FPN | PoolFormer-S24 | 512x512 | ImageNet-1K | 32 | 40000 | 5.47 | 15.74 | 40.12 | - | 40.36 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/poolformer/fpn_poolformer_s24_8xb4-40k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s24_8x4_512x512_40k_ade20k/fpn_poolformer_s24_8x4_512x512_40k_ade20k_20220503_222049-394a7cf7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s24_8x4_512x512_40k_ade20k/fpn_poolformer_s24_8x4_512x512_40k_ade20k_20220503_222049.log.json) | +| FPN | PoolFormer-S36 | 512x512 | ImageNet-1K | 32 | 40000 | 6.77 | 11.34 | 41.61 | - | 41.81 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/poolformer/fpn_poolformer_s36_8xb4-40k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s36_8x4_512x512_40k_ade20k/fpn_poolformer_s36_8x4_512x512_40k_ade20k_20220501_151122-b47e607d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s36_8x4_512x512_40k_ade20k/fpn_poolformer_s36_8x4_512x512_40k_ade20k_20220501_151122.log.json) | +| FPN | PoolFormer-M36 | 512x512 | ImageNet-1K | 32 | 40000 | 8.59 | 8.97 | 41.95 | - | 42.35 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/poolformer/fpn_poolformer_m36_8xb4-40k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m36_8x4_512x512_40k_ade20k/fpn_poolformer_m36_8x4_512x512_40k_ade20k_20220501_164230-3dc83921.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m36_8x4_512x512_40k_ade20k/fpn_poolformer_m36_8x4_512x512_40k_ade20k_20220501_164230.log.json) | +| FPN | PoolFormer-M48 | 512x512 | ImageNet-1K | 32 | 40000 | 10.48 | 6.69 | 42.43 | - | 42.76 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/poolformer/fpn_poolformer_m48_8xb4-40k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m48_8x4_512x512_40k_ade20k/fpn_poolformer_m48_8x4_512x512_40k_ade20k_20220504_003923-64168d3b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m48_8x4_512x512_40k_ade20k/fpn_poolformer_m48_8x4_512x512_40k_ade20k_20220504_003923.log.json) | + +Note: + +- We replace `AlignedResize` in original PoolFormer implementation to `Resize + ResizeToMultiple`. + +- `mIoU` with * is collected when `Resize + ResizeToMultiple` is adopted in `test_pipeline`, so do `mIoU` in logs. + +- The Test Time Augmentation i.e., "ms+flip" in MMSegmentation v1.x is developing, stay tuned! diff --git a/configs/poolformer/fpn_poolformer_m36_8xb4-40k_ade20k-512x512.py b/configs/poolformer/fpn_poolformer_m36_8xb4-40k_ade20k-512x512.py new file mode 100644 index 0000000000..4100eb9923 --- /dev/null +++ b/configs/poolformer/fpn_poolformer_m36_8xb4-40k_ade20k-512x512.py @@ -0,0 +1,11 @@ +_base_ = './fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py' +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth' # noqa + +# model settings +model = dict( + backbone=dict( + arch='m36', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768])) diff --git a/configs/poolformer/fpn_poolformer_m48_8xb4-40k_ade20k-512x512.py b/configs/poolformer/fpn_poolformer_m48_8xb4-40k_ade20k-512x512.py new file mode 100644 index 0000000000..cfc49ccbdb --- /dev/null +++ b/configs/poolformer/fpn_poolformer_m48_8xb4-40k_ade20k-512x512.py @@ -0,0 +1,11 @@ +_base_ = './fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py' +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth' # noqa + +# model settings +model = dict( + backbone=dict( + arch='m48', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768])) diff --git a/configs/poolformer/fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py b/configs/poolformer/fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py new file mode 100644 index 0000000000..c0b15312fe --- /dev/null +++ b/configs/poolformer/fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py @@ -0,0 +1,91 @@ +_base_ = [ + '../_base_/models/fpn_poolformer_s12.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] + +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2048, 512), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=50, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/training', + seg_map_path='annotations/training'), + pipeline=train_pipeline))) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator + +# model settings +model = dict( + data_preprocessor=data_preprocessor, + neck=dict(in_channels=[64, 128, 320, 512]), + decode_head=dict(num_classes=150)) + +# optimizer +# optimizer = dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.0001) +# optimizer_config = dict() +# # learning policy +# lr_config = dict(policy='poly', power=0.9, min_lr=0.0, by_epoch=False) +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001)) +param_scheduler = [ + dict( + type='PolyLR', + power=0.9, + begin=0, + end=40000, + eta_min=0.0, + by_epoch=False, + ) +] diff --git a/configs/poolformer/fpn_poolformer_s24_8xb4-40k_ade20k-512x512.py b/configs/poolformer/fpn_poolformer_s24_8xb4-40k_ade20k-512x512.py new file mode 100644 index 0000000000..1f9d24cd41 --- /dev/null +++ b/configs/poolformer/fpn_poolformer_s24_8xb4-40k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py' +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth' # noqa +# model settings +model = dict( + backbone=dict( + arch='s24', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.'))) diff --git a/configs/poolformer/fpn_poolformer_s36_8x4_512x512_40k_ade20k.py b/configs/poolformer/fpn_poolformer_s36_8x4_512x512_40k_ade20k.py new file mode 100644 index 0000000000..231dcf6c20 --- /dev/null +++ b/configs/poolformer/fpn_poolformer_s36_8x4_512x512_40k_ade20k.py @@ -0,0 +1,10 @@ +_base_ = './fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py' +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth' # noqa + +# model settings +model = dict( + backbone=dict( + arch='s36', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.'))) diff --git a/configs/poolformer/poolformer.yml b/configs/poolformer/poolformer.yml new file mode 100644 index 0000000000..fa5fc30125 --- /dev/null +++ b/configs/poolformer/poolformer.yml @@ -0,0 +1,106 @@ +Models: +- Name: fpn_poolformer_s12_8xb4-40k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PoolFormer-S12 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 42.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.17 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.68 + Config: configs/poolformer/fpn_poolformer_s12_8xb4-40k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s12_8x4_512x512_40k_ade20k/fpn_poolformer_s12_8x4_512x512_40k_ade20k_20220501_115154-b5aa2f49.pth +- Name: fpn_poolformer_s24_8xb4-40k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PoolFormer-S24 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 63.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.47 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.12 + Config: configs/poolformer/fpn_poolformer_s24_8xb4-40k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_s24_8x4_512x512_40k_ade20k/fpn_poolformer_s24_8x4_512x512_40k_ade20k_20220503_222049-394a7cf7.pth +- Name: '' + In Collection: FPN + Metadata: + backbone: PoolFormer-S36 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 88.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.77 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.61 + Config: '' + Weights: '' +- Name: fpn_poolformer_m36_8xb4-40k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PoolFormer-M36 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 111.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.59 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.95 + Config: configs/poolformer/fpn_poolformer_m36_8xb4-40k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m36_8x4_512x512_40k_ade20k/fpn_poolformer_m36_8x4_512x512_40k_ade20k_20220501_164230-3dc83921.pth +- Name: fpn_poolformer_m48_8xb4-40k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PoolFormer-M48 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 149.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.48 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.43 + Config: configs/poolformer/fpn_poolformer_m48_8xb4-40k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/poolformer/fpn_poolformer_m48_8x4_512x512_40k_ade20k/fpn_poolformer_m48_8x4_512x512_40k_ade20k_20220504_003923-64168d3b.pth diff --git a/configs/psanet/README.md b/configs/psanet/README.md index d6d94e36d2..7182e500a1 100644 --- a/configs/psanet/README.md +++ b/configs/psanet/README.md @@ -1,7 +1,30 @@ -# PSANet: Point-wise Spatial Attention Network for Scene Parsing +# PSANet + +[PSANet: Point-wise Spatial Attention Network for Scene Parsing](https://openaccess.thecvf.com/content_ECCV_2018/papers/Hengshuang_Zhao_PSANet_Point-wise_Spatial_ECCV_2018_paper.pdf) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +We notice information flow in convolutional neural networksis restricted inside local neighborhood regions due to the physical de-sign of convolutional filters, which limits the overall understanding ofcomplex scenes. In this paper, we propose thepoint-wise spatial atten-tion network(PSANet) to relax the local neighborhood constraint. Eachposition on the feature map is connected to all the other ones througha self-adaptively learned attention mask. Moreover, information propa-gation in bi-direction for scene parsing is enabled. Information at otherpositions can be collected to help the prediction of the current positionand vice versa, information at the current position can be distributedto assist the prediction of other ones. Our proposed approach achievestop performance on various competitive scene parsing datasets, includ-ing ADE20K, PASCAL VOC 2012 and Cityscapes, demonstrating itseffectiveness and generality. + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{zhao2018psanet, title={Psanet: Point-wise spatial attention network for scene parsing}, author={Zhao, Hengshuang and Zhang, Yi and Liu, Shu and Shi, Jianping and Change Loy, Chen and Lin, Dahua and Jia, Jiaya}, @@ -14,29 +37,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSANet | R-50-D8 | 512x1024 | 40000 | 7 | 3.17 | 77.63 | 79.04 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117.log.json) | -| PSANet | R-101-D8 | 512x1024 | 40000 | 10.5 | 2.20 | 79.14 | 80.19 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418.log.json) | -| PSANet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.40 | 77.99 | 79.64 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717.log.json) | -| PSANet | R-101-D8 | 769x769 | 40000 | 11.9 | 0.98 | 78.43 | 80.26 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107.log.json) | -| PSANet | R-50-D8 | 512x1024 | 80000 | - | - | 77.24 | 78.69 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842.log.json) | -| PSANet | R-101-D8 | 512x1024 | 80000 | - | - | 79.31 | 80.53 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823.log.json) | -| PSANet | R-50-D8 | 769x769 | 80000 | - | - | 79.31 | 80.91 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134.log.json) | -| PSANet | R-101-D8 | 769x769 | 80000 | - | - | 79.69 | 80.89 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSANet | R-50-D8 | 512x1024 | 40000 | 7 | 3.17 | 77.63 | 79.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117.log.json) | +| PSANet | R-101-D8 | 512x1024 | 40000 | 10.5 | 2.20 | 79.14 | 80.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418.log.json) | +| PSANet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.40 | 77.99 | 79.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717.log.json) | +| PSANet | R-101-D8 | 769x769 | 40000 | 11.9 | 0.98 | 78.43 | 80.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107.log.json) | +| PSANet | R-50-D8 | 512x1024 | 80000 | - | - | 77.24 | 78.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842.log.json) | +| PSANet | R-101-D8 | 512x1024 | 80000 | - | - | 79.31 | 80.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823.log.json) | +| PSANet | R-50-D8 | 769x769 | 80000 | - | - | 79.31 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134.log.json) | +| PSANet | R-101-D8 | 769x769 | 80000 | - | - | 79.69 | 80.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSANet | R-50-D8 | 512x512 | 80000 | 9 | 18.91 | 41.14 | 41.91 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141.log.json) | -| PSANet | R-101-D8 | 512x512 | 80000 | 12.5 | 13.13 | 43.80 | 44.75 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117.log.json) | -| PSANet | R-50-D8 | 512x512 | 160000 | - | - | 41.67 | 42.95 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258.log.json) | -| PSANet | R-101-D8 | 512x512 | 160000 | - | - | 43.74 | 45.38 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSANet | R-50-D8 | 512x512 | 80000 | 9 | 18.91 | 41.14 | 41.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141.log.json) | +| PSANet | R-101-D8 | 512x512 | 80000 | 12.5 | 13.13 | 43.80 | 44.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117.log.json) | +| PSANet | R-50-D8 | 512x512 | 160000 | - | - | 41.67 | 42.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258.log.json) | +| PSANet | R-101-D8 | 512x512 | 160000 | - | - | 43.74 | 45.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSANet | R-50-D8 | 512x512 | 20000 | 6.9 | 18.24 | 76.39 | 77.34 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413.log.json) | -| PSANet | R-101-D8 | 512x512 | 20000 | 10.4 | 12.63 | 77.91 | 79.30 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624.log.json) | -| PSANet | R-50-D8 | 512x512 | 40000 | - | - | 76.30 | 77.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946.log.json) | -| PSANet | R-101-D8 | 512x512 | 40000 | - | - | 77.73 | 79.05 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSANet | R-50-D8 | 512x512 | 20000 | 6.9 | 18.24 | 76.39 | 77.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413.log.json) | +| PSANet | R-101-D8 | 512x512 | 20000 | 10.4 | 12.63 | 77.91 | 79.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624.log.json) | +| PSANet | R-50-D8 | 512x512 | 40000 | - | - | 76.30 | 77.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946.log.json) | +| PSANet | R-101-D8 | 512x512 | 40000 | - | - | 77.73 | 79.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/psanet/psanet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946.log.json) | diff --git a/configs/psanet/psanet.yml b/configs/psanet/psanet.yml new file mode 100644 index 0000000000..fca1ac1b40 --- /dev/null +++ b/configs/psanet/psanet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: PSANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Hengshuang_Zhao_PSANet_Point-wise_Spatial_ECCV_2018_paper.pdf + Title: 'PSANet: Point-wise Spatial Attention Network for Scene Parsing' + README: configs/psanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/psa_head.py#L18 + Version: v0.17.0 + Converted From: + Code: https://github.com/hszhao/PSANet +Models: +- Name: psanet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 315.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.63 + mIoU(ms+flip): 79.04 + Config: configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth +- Name: psanet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 454.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.14 + mIoU(ms+flip): 80.19 + Config: configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth +- Name: psanet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 714.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.99 + mIoU(ms+flip): 79.64 + Config: configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth +- Name: psanet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1020.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 11.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.43 + mIoU(ms+flip): 80.26 + Config: configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth +- Name: psanet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.24 + mIoU(ms+flip): 78.69 + Config: configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth +- Name: psanet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.31 + mIoU(ms+flip): 80.53 + Config: configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth +- Name: psanet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.31 + mIoU(ms+flip): 80.91 + Config: configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth +- Name: psanet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.69 + mIoU(ms+flip): 80.89 + Config: configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth +- Name: psanet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 52.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.14 + mIoU(ms+flip): 41.91 + Config: configs/psanet/psanet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth +- Name: psanet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 76.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.8 + mIoU(ms+flip): 44.75 + Config: configs/psanet/psanet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth +- Name: psanet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.67 + mIoU(ms+flip): 42.95 + Config: configs/psanet/psanet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth +- Name: psanet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.74 + mIoU(ms+flip): 45.38 + Config: configs/psanet/psanet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth +- Name: psanet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 54.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.39 + mIoU(ms+flip): 77.34 + Config: configs/psanet/psanet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth +- Name: psanet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 79.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.91 + mIoU(ms+flip): 79.3 + Config: configs/psanet/psanet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth +- Name: psanet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.3 + mIoU(ms+flip): 77.35 + Config: configs/psanet/psanet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth +- Name: psanet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.73 + mIoU(ms+flip): 79.05 + Config: configs/psanet/psanet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth diff --git a/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..e69cf42703 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..e543099842 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b8636384d0 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..097b1c58ce --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/psanet/psanet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..ac86306cb6 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/psanet/psanet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..abd8e56512 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/psanet/psanet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..d3154a8f14 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/psanet/psanet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..b34d4248e8 --- /dev/null +++ b/configs/psanet/psanet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py b/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 69d212f158..0000000000 --- a/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py b/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index bc25d6aaf6..0000000000 --- a/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py b/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 7f6795e5ef..0000000000 --- a/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py b/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 1a3c43495b..0000000000 --- a/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py b/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index f62eef9773..0000000000 --- a/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py b/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index f8865a7c4d..0000000000 --- a/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py b/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index ffc99f0109..0000000000 --- a/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py b/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 6a9efc55ad..0000000000 --- a/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..82463aaeba --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..af44b3080f --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5e5052f8c5 --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..0eaf830703 --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/psanet/psanet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/psanet/psanet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..de13296aff --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(mask_size=(66, 66), num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/psanet/psanet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/psanet/psanet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..45d8762a09 --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/psanet/psanet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/psanet/psanet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..b5d99d1000 --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/psanet/psanet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/psanet/psanet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..c3b65287e5 --- /dev/null +++ b/configs/psanet/psanet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(mask_size=(66, 66), num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py b/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 6671fcb4bf..0000000000 --- a/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py b/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index a441013a4c..0000000000 --- a/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py b/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index d177d17e17..0000000000 --- a/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(mask_size=(66, 66), num_classes=150), - auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py b/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index af06cb66cc..0000000000 --- a/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py b/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 803c42da35..0000000000 --- a/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py b/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 58a18a043a..0000000000 --- a/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(mask_size=(66, 66), num_classes=150), - auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py b/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 2068667b0f..0000000000 --- a/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py b/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 8745f5dbad..0000000000 --- a/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/pspnet/README.md b/configs/pspnet/README.md index ec31feeb8a..2194a28e53 100644 --- a/configs/pspnet/README.md +++ b/configs/pspnet/README.md @@ -1,7 +1,30 @@ -# Pyramid Scene Parsing Network +# PSPNet + +[Pyramid Scene Parsing Network](https://arxiv.org/abs/1612.01105) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Scene parsing is challenging for unrestricted open vocabulary and diverse scenes. In this paper, we exploit the capability of global context information by different-region-based context aggregation through our pyramid pooling module together with the proposed pyramid scene parsing network (PSPNet). Our global prior representation is effective to produce good quality results on the scene parsing task, while PSPNet provides a superior framework for pixel-level prediction tasks. The proposed approach achieves state-of-the-art performance on various datasets. It came first in ImageNet scene parsing challenge 2016, PASCAL VOC 2012 benchmark and Cityscapes benchmark. A single PSPNet yields new record of mIoU accuracy 85.4% on PASCAL VOC 2012 and accuracy 80.2% on Cityscapes. + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{zhao2017pspnet, title={Pyramid Scene Parsing Network}, author={Zhao, Hengshuang and Shi, Jianping and Qi, Xiaojuan and Wang, Xiaogang and Jia, Jiaya}, @@ -10,32 +33,145 @@ } ``` +```bibtex +@article{wightman2021resnet, + title={Resnet strikes back: An improved training procedure in timm}, + author={Wightman, Ross and Touvron, Hugo and J{\'e}gou, Herv{\'e}}, + journal={arXiv preprint arXiv:2110.00476}, + year={2021} +} +``` + ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSPNet | R-50-D8 | 512x1024 | 40000 | 6.1 | 4.07 | 77.85 | 79.18 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | -| PSPNet | R-101-D8 | 512x1024 | 40000 | 9.6 | 2.68 | 78.34 | 79.74 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | -| PSPNet | R-50-D8 | 769x769 | 40000 | 6.9 | 1.76 | 78.26 | 79.88 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json) | -| PSPNet | R-101-D8 | 769x769 | 40000 | 10.9 | 1.15 | 79.08 | 80.28 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json) | -| PSPNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.79 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json) | -| PSPNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.76 | 81.01 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json) | -| PSPNet | R-50-D8 | 769x769 | 80000 | - | - | 79.59 | 80.69 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json) | -| PSPNet | R-101-D8 | 769x769 | 80000 | - | - | 79.77 | 81.06 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------------- | ------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-50-D8 | 512x1024 | 40000 | 6.1 | 4.07 | 77.85 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | +| PSPNet | R-101-D8 | 512x1024 | 40000 | 9.6 | 2.68 | 78.34 | 79.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | +| PSPNet | R-50-D8 | 769x769 | 40000 | 6.9 | 1.76 | 78.26 | 79.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json) | +| PSPNet | R-101-D8 | 769x769 | 40000 | 10.9 | 1.15 | 79.08 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json) | +| PSPNet | R-18-D8 | 512x1024 | 80000 | 1.7 | 15.71 | 74.87 | 76.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes-20201225_021458.log.json) | +| PSPNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json) | +| PSPNet | R-50b-D8 rsb | 512x1024 | 80000 | 6.2 | 3.82 | 78.47 | 79.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238-588c30be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238.log.json) | +| PSPNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.76 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json) | +| PSPNet (FP16) | R-101-D8 | 512x1024 | 80000 | 5.34 | 8.77 | 79.46 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919-a0875e5c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919.log.json) | +| PSPNet | R-18-D8 | 769x769 | 80000 | 1.9 | 6.20 | 75.90 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes-20201225_021458.log.json) | +| PSPNet | R-50-D8 | 769x769 | 80000 | - | - | 79.59 | 80.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json) | +| PSPNet | R-101-D8 | 769x769 | 80000 | - | - | 79.77 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json) | +| PSPNet | R-18b-D8 | 512x1024 | 80000 | 1.5 | 16.28 | 74.23 | 75.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes-20201226_063116.log.json) | +| PSPNet | R-50b-D8 | 512x1024 | 80000 | 6.0 | 4.30 | 78.22 | 79.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes-20201225_094315.log.json) | +| PSPNet | R-101b-D8 | 512x1024 | 80000 | 9.5 | 2.76 | 79.69 | 80.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | +| PSPNet | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.41 | 74.92 | 76.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes-20201226_080942.log.json) | +| PSPNet | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.88 | 78.50 | 79.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes-20201225_094316.log.json) | +| PSPNet | R-101b-D8 | 769x769 | 80000 | 10.8 | 1.17 | 78.87 | 80.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes-20201226_171823.log.json) | +| PSPNet | R-50-D32 | 512x1024 | 80000 | 3.0 | 15.21 | 73.88 | 76.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840-9092b254.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840.log.json) | +| PSPNet | R-50b-D32 rsb | 512x1024 | 80000 | 3.1 | 16.08 | 74.09 | 77.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229-dd9c9610.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229.log.json) | +| PSPNet | R-50b-D32 | 512x1024 | 80000 | 2.9 | 15.41 | 72.61 | 75.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152-23bcaf8c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSPNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.53 | 41.13 | 41.94 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json) | -| PSPNet | R-101-D8 | 512x512 | 80000 | 12 | 15.30 | 43.57 | 44.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json) | -| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 42.48 | 43.44 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json) | -| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 44.39 | 45.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.53 | 41.13 | 41.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 12 | 15.30 | 43.57 | 44.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json) | +| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 42.48 | 43.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json) | +| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 44.39 | 45.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|--------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PSPNet | R-50-D8 | 512x512 | 20000 | 6.1 | 23.59 | 76.78 | 77.61 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json) | -| PSPNet | R-101-D8 | 512x512 | 20000 | 9.6 | 15.02 | 78.47 | 79.25 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json) | -| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 77.29 | 78.48 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | -| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 78.52 | 79.57 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 20000 | 6.1 | 23.59 | 76.78 | 77.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json) | +| PSPNet | R-101-D8 | 512x512 | 20000 | 9.6 | 15.02 | 78.47 | 79.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json) | +| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 77.29 | 78.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | +| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 78.52 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-101-D8 | 480x480 | 40000 | 8.8 | 9.68 | 46.60 | 47.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context-20200911_211210.log.json) | +| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 46.03 | 47.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context-20200911_190530.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-101-D8 | 480x480 | 40000 | - | - | 52.02 | 53.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59-20210416_114524.log.json) | +| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 52.47 | 53.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59-20210416_114418.log.json) | + +### Dark Zurich and Nighttime Driving + +We support evaluation results on these two datasets using models above trained on Cityscapes training set. + +| Method | Backbone | Training Dataset | Test Dataset | mIoU | config | evaluation checkpoint | +| ------ | --------- | ----------------------- | ------------------------- | ----- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-50-D8 | Cityscapes Training set | Dark Zurich | 10.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | +| PSPNet | R-50-D8 | Cityscapes Training set | Nighttime Driving | 23.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | +| PSPNet | R-50-D8 | Cityscapes Training set | Cityscapes Validation set | 77.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | +| PSPNet | R-101-D8 | Cityscapes Training set | Dark Zurich | 10.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | +| PSPNet | R-101-D8 | Cityscapes Training set | Nighttime Driving | 20.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | +| PSPNet | R-101-D8 | Cityscapes Training set | Cityscapes Validation set | 78.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | +| PSPNet | R-101b-D8 | Cityscapes Training set | Dark Zurich | 15.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | +| PSPNet | R-101b-D8 | Cityscapes Training set | Nighttime Driving | 22.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | +| PSPNet | R-101b-D8 | Cityscapes Training set | Cityscapes Validation set | 79.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | + +### COCO-Stuff 10k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 20000 | 9.6 | 20.5 | 35.69 | 36.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258-b88df27f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258.log.json) | +| PSPNet | R-101-D8 | 512x512 | 20000 | 13.2 | 11.1 | 37.26 | 38.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135-76aae482.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135.log.json) | +| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 36.33 | 37.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857-92e2902b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857.log.json) | +| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 37.76 | 38.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022-831aec95.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 80000 | 9.6 | 20.5 | 38.80 | 39.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-0e41b2db.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 13.2 | 11.1 | 40.34 | 40.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-7eb41789.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034.log.json) | +| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 39.64 | 39.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-51276a57.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 41.28 | 41.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-4af9621b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-50-D8 | 512x512 | 320000 | - | - | 40.53 | 40.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-be9610cc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-101-D8 | 512x512 | 320000 | - | - | 41.95 | 42.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-72220c60.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.45 | 26.87 | 48.62 | 47.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100-b97697f1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 6.60 | 50.46 | 50.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728-88610f9f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 4.58 | 51.86 | 51.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_loveda-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212-1c06c6a8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.50 | 85.12 | 77.09 | 78.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612-7cd046e1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 30.21 | 78.12 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541-2dd5fe67.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 19.40 | 78.62 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_potsdam-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612-aed036c4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.45 | 85.06 | 71.46 | 73.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355-52a8a6f6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 30.29 | 72.36 | 73.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355-382f8f5b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 19.97 | 72.61 | 74.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r101-d8_4xb4-80k_vaihingen-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806-8eba0a09.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-18-D8 | 896x896 | 80000 | 4.52 | 26.91 | 60.22 | 61.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r18-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526-e84c0b6a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | +| PSPNet | R-50-D8 | 896x896 | 80000 | 16.58 | 8.88 | 65.36 | 66.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/pspnet/pspnet_r50-d8_4xb4-80k_isaid-896x896.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629-1f21dc32.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629.log.json) | + +Note: + +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) +- `rsb` is short for 'Resnet strikes back'. +- The `b` in `R-50b` means ResNetV1b, which is a standard ResNet backbone. In MMSegmentation, default backbone is ResNetV1c, which usually performs better in semantic segmentation task. diff --git a/configs/pspnet/pspnet.yml b/configs/pspnet/pspnet.yml new file mode 100644 index 0000000000..7f811efd0f --- /dev/null +++ b/configs/pspnet/pspnet.yml @@ -0,0 +1,1077 @@ +Collections: +- Name: PSPNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - Dark Zurich and Nighttime Driving + - COCO-Stuff 10k + - COCO-Stuff 164k + - LoveDA + - Potsdam + - Vaihingen + - iSAID + Paper: + URL: https://arxiv.org/abs/1612.01105 + Title: Pyramid Scene Parsing Network + README: configs/pspnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/psp_head.py#L63 + Version: v0.17.0 + Converted From: + Code: https://github.com/hszhao/PSPNet +Models: +- Name: pspnet_r50-d8_4xb2-40k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 245.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.85 + mIoU(ms+flip): 79.18 + Config: configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth +- Name: pspnet_r101-d8_4xb2-40k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 373.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.34 + mIoU(ms+flip): 79.74 + Config: configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth +- Name: pspnet_r50-d8_4xb2-40k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 568.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.26 + mIoU(ms+flip): 79.88 + Config: configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth +- Name: pspnet_r101-d8_4xb2-40k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.08 + mIoU(ms+flip): 80.28 + Config: configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth +- Name: pspnet_r18-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 63.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.87 + mIoU(ms+flip): 76.04 + Config: configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth +- Name: pspnet_r50-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.55 + mIoU(ms+flip): 79.79 + Config: configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth +- Name: pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 rsb + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 261.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.47 + mIoU(ms+flip): 79.45 + Config: configs/pspnet/pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238-588c30be.pth +- Name: pspnet_r101-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.76 + mIoU(ms+flip): 81.01 + Config: configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth +- Name: pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 114.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: AMP + resolution: (512,1024) + Training Memory (GB): 5.34 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.46 + Config: configs/pspnet/pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919-a0875e5c.pth +- Name: pspnet_r18-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.9 + mIoU(ms+flip): 77.86 + Config: configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth +- Name: pspnet_r50-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.59 + mIoU(ms+flip): 80.69 + Config: configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth +- Name: pspnet_r101-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.77 + mIoU(ms+flip): 81.06 + Config: configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth +- Name: pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 61.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.23 + mIoU(ms+flip): 75.79 + Config: configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth +- Name: pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 232.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.22 + mIoU(ms+flip): 79.46 + Config: configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth +- Name: pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 362.32 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.69 + mIoU(ms+flip): 80.79 + Config: configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth +- Name: pspnet_r18b-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 156.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.92 + mIoU(ms+flip): 76.9 + Config: configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth +- Name: pspnet_r50b-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.5 + mIoU(ms+flip): 79.96 + Config: configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth +- Name: pspnet_r101b-d8_4xb2-80k_cityscapes-769x769 + In Collection: PSPNet + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 854.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.87 + mIoU(ms+flip): 80.04 + Config: configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth +- Name: pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 65.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.88 + mIoU(ms+flip): 76.85 + Config: configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840-9092b254.pth +- Name: pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50b-D32 rsb + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 62.19 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.09 + mIoU(ms+flip): 77.18 + Config: configs/pspnet/pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229-dd9c9610.pth +- Name: pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024 + In Collection: PSPNet + Metadata: + backbone: R-50b-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 64.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.61 + mIoU(ms+flip): 75.51 + Config: configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152-23bcaf8c.pth +- Name: pspnet_r50-d8_4xb4-80k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.13 + mIoU(ms+flip): 41.94 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth +- Name: pspnet_r101-d8_4xb4-80k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 65.36 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.57 + mIoU(ms+flip): 44.35 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth +- Name: pspnet_r50-d8_4xb4-160k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.48 + mIoU(ms+flip): 43.44 + Config: configs/pspnet/pspnet_r50-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth +- Name: pspnet_r101-d8_4xb4-160k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.39 + mIoU(ms+flip): 45.35 + Config: configs/pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth +- Name: pspnet_r50-d8_4xb4-20k_voc12aug-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.78 + mIoU(ms+flip): 77.61 + Config: configs/pspnet/pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth +- Name: pspnet_r101-d8_4xb4-20k_voc12aug-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 66.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.47 + mIoU(ms+flip): 79.25 + Config: configs/pspnet/pspnet_r101-d8_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth +- Name: pspnet_r50-d8_4xb4-40k_voc12aug-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.29 + mIoU(ms+flip): 78.48 + Config: configs/pspnet/pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth +- Name: pspnet_r101-d8_4xb4-40k_voc12aug-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.52 + mIoU(ms+flip): 79.57 + Config: configs/pspnet/pspnet_r101-d8_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth +- Name: pspnet_r101-d8_4xb4-40k_pascal-context-480x480 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 103.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.6 + mIoU(ms+flip): 47.78 + Config: configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth +- Name: pspnet_r101-d8_4xb4-80k_pascal-context-480x480 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.03 + mIoU(ms+flip): 47.15 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth +- Name: pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.02 + mIoU(ms+flip): 53.54 + Config: configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth +- Name: pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.47 + mIoU(ms+flip): 53.99 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth +- Name: pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 48.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 35.69 + mIoU(ms+flip): 36.62 + Config: configs/pspnet/pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258-b88df27f.pth +- Name: pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.26 + mIoU(ms+flip): 38.52 + Config: configs/pspnet/pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135-76aae482.pth +- Name: pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 36.33 + mIoU(ms+flip): 37.24 + Config: configs/pspnet/pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857-92e2902b.pth +- Name: pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.76 + mIoU(ms+flip): 38.86 + Config: configs/pspnet/pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022-831aec95.pth +- Name: pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 48.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 38.8 + mIoU(ms+flip): 39.19 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-0e41b2db.pth +- Name: pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.34 + mIoU(ms+flip): 40.79 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-7eb41789.pth +- Name: pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 39.64 + mIoU(ms+flip): 39.97 + Config: configs/pspnet/pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-51276a57.pth +- Name: pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.28 + mIoU(ms+flip): 41.66 + Config: configs/pspnet/pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-4af9621b.pth +- Name: pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.53 + mIoU(ms+flip): 40.75 + Config: configs/pspnet/pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-be9610cc.pth +- Name: pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.95 + mIoU(ms+flip): 42.42 + Config: configs/pspnet/pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-72220c60.pth +- Name: pspnet_r18-d8_4xb4-80k_loveda-512x512 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.45 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 48.62 + mIoU(ms+flip): 47.57 + Config: configs/pspnet/pspnet_r18-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100-b97697f1.pth +- Name: pspnet_r50-d8_4xb4-80k_loveda-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 151.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.46 + mIoU(ms+flip): 50.19 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728-88610f9f.pth +- Name: pspnet_r101-d8_4xb4-80k_loveda-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.86 + mIoU(ms+flip): 51.34 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_loveda-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212-1c06c6a8.pth +- Name: pspnet_r18-d8_4xb4-80k_potsdam-512x512 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 11.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.5 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.09 + mIoU(ms+flip): 78.3 + Config: configs/pspnet/pspnet_r18-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612-7cd046e1.pth +- Name: pspnet_r50-d8_4xb4-80k_potsdam-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 78.98 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541-2dd5fe67.pth +- Name: pspnet_r101-d8_4xb4-80k_potsdam-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.62 + mIoU(ms+flip): 79.47 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_potsdam-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612-aed036c4.pth +- Name: pspnet_r18-d8_4xb4-80k_vaihingen-512x512 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 11.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.45 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 71.46 + mIoU(ms+flip): 73.36 + Config: configs/pspnet/pspnet_r18-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355-52a8a6f6.pth +- Name: pspnet_r50-d8_4xb4-80k_vaihingen-512x512 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.36 + mIoU(ms+flip): 73.75 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355-382f8f5b.pth +- Name: pspnet_r101-d8_4xb4-80k_vaihingen-512x512 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.61 + mIoU(ms+flip): 74.18 + Config: configs/pspnet/pspnet_r101-d8_4xb4-80k_vaihingen-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806-8eba0a09.pth +- Name: pspnet_r18-d8_4xb4-80k_isaid-896x896 + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 37.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 4.52 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 60.22 + mIoU(ms+flip): 61.25 + Config: configs/pspnet/pspnet_r18-d8_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526-e84c0b6a.pth +- Name: pspnet_r50-d8_4xb4-80k_isaid-896x896 + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 112.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 16.58 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 65.36 + mIoU(ms+flip): 66.48 + Config: configs/pspnet/pspnet_r50-d8_4xb4-80k_isaid-896x896.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629-1f21dc32.pth diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f33d653b76 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py new file mode 100644 index 0000000000..5babaa8851 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py' # noqa +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py new file mode 100644 index 0000000000..a9480c52f8 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py' # noqa +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-769x769.py b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..e05cff6d8e --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6704cdd5d2 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..3733e69198 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..52f86b5e75 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb2-amp-80k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py' +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005), + loss_scale=512.) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..2231049b8a --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..f5390f8c76 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..84a986cd9d --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-20k_coco-stuff10k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-20k_voc12aug-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..71897ddc2d --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..ebaea36da8 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-320k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..2a55f53ee9 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-40k_coco-stuff10k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-480x480.py b/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..205d00bac9 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-40k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..0d7c176073 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-40k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-40k_voc12aug-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..0599f31f96 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_ade20k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..f95560347a --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..4a34f97485 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_coco-stuff164k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_loveda-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..7076877980 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_loveda-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-480x480.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..0ac40dc861 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_pascal-context-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..307188c783 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_pascal-context-59-480x480.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_potsdam-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..31ed2f2938 --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_potsdam-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_4xb4-80k_vaihingen-512x512.py b/configs/pspnet/pspnet_r101-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..ac33ed7cda --- /dev/null +++ b/configs/pspnet/pspnet_r101-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py b/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 38fee11bc2..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py b/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 9931a07bc2..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py b/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 6107b41544..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py b/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 2221b202d6..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py b/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index 15f578b600..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py b/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index fb7c3d55d5..0000000000 --- a/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py b/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index c6e7e58508..0000000000 --- a/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py b/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 59b8c6dd5e..0000000000 --- a/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..d2c0f69638 --- /dev/null +++ b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py new file mode 100644 index 0000000000..b181744149 --- /dev/null +++ b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py' # noqa +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py new file mode 100644 index 0000000000..6a8994b4c8 --- /dev/null +++ b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py' # noqa +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..891bfd51ed --- /dev/null +++ b/configs/pspnet/pspnet_r101b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a4b342ef23 --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..0e7f3e90ac --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb4-80k_isaid-896x896.py b/configs/pspnet/pspnet_r18-d8_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..efce7a0e7d --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb4-80k_isaid-896x896.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_isaid-896x896.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb4-80k_loveda-512x512.py b/configs/pspnet/pspnet_r18-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..80e2d20cbe --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_loveda-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb4-80k_potsdam-512x512.py b/configs/pspnet/pspnet_r18-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..1ef0585e79 --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_potsdam-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18-d8_4xb4-80k_vaihingen-512x512.py b/configs/pspnet/pspnet_r18-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..51e66d2e51 --- /dev/null +++ b/configs/pspnet/pspnet_r18-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..2e356c5c5f --- /dev/null +++ b/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..831354d4ce --- /dev/null +++ b/configs/pspnet/pspnet_r18b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/configs/pspnet/pspnet_r50-d32_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50-d32_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5700b5b3b4 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d32_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict(dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2))) diff --git a/configs/pspnet/pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..1390329342 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d32_rsb_4xb2-adamw-80k_cityscapes-512x1024.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='ResNet', + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint), + dilations=(1, 1, 2, 4), + strides=(1, 2, 2, 2))) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0005, weight_decay=0.05), + clip_grad=dict(max_norm=1, norm_type=2)) +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + by_epoch=False, + milestones=[60000, 72000], + ) +] diff --git a/configs/pspnet/pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b83a0b447c --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8-rsb_4xb2-adamw-80k_cityscapes-512x1024.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='ResNet', + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0005, weight_decay=0.05), + clip_grad=dict(max_norm=1, norm_type=2)) +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=1000, + end=80000, + by_epoch=False, + milestones=[60000, 72000], + ) +] diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a9dcb52b66 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py new file mode 100644 index 0000000000..1bf4a135c7 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_dark-zurich-1920x1080.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1920, 1080), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +test_dataloader = dict( + dataset=dict( + type='DarkZurichDataset', + data_root='data/dark_zurich/', + data_prefix=dict( + img_path='rgb_anon/val/night/GOPR0356', + seg_map_path='gt/val/night/GOPR0356'), + pipeline=test_pipeline)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py new file mode 100644 index 0000000000..b912589131 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024_night-driving-1920x1080.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] + +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1920, 1080), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +test_dataloader = dict( + dataset=dict( + type='NightDrivingDataset', + data_root='data/NighttimeDrivingTest/', + data_prefix=dict( + img_path='leftImg8bit/test/night', + seg_map_path='gtCoarse_daytime_trainvaltest/test/night'), + pipeline=test_pipeline)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..6baa31baed --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..6ea27de906 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py new file mode 100644 index 0000000000..200679ffdf --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_dark-zurich-1920x1080.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1920, 1080), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +test_dataloader = dict( + dataset=dict( + type='DarkZurichDataset', + data_root='data/dark_zurich/', + data_prefix=dict( + img_path='rgb_anon/val/night/GOPR0356', + seg_map_path='gt/val/night/GOPR0356'), + pipeline=test_pipeline)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py new file mode 100644 index 0000000000..517381375f --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-512x1024_night-driving-1920x1080.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(1920, 1080), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +test_dataloader = dict( + dataset=dict( + type='NightDrivingDataset', + data_root='data/NighttimeDrivingTest/', + data_prefix=dict( + img_path='leftImg8bit/test/night', + seg_map_path='gtCoarse_daytime_trainvaltest/test/night'), + pipeline=test_pipeline)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..d43d30a0b6 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-160k_ade20k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..3d9164f2e1 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..6185c2efee --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-160k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..8c1ba2ddf0 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-20k_coco-stuff10k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..0f60819313 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..2a9ce4c4f1 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-320k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_320k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py new file mode 100644 index 0000000000..fae57b0dbc --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-40k_coco-stuff10k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-480x480.py b/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-480x480.py new file mode 100644 index 0000000000..08a214448c --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-59-480x480.py b/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..b654495732 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-40k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..c4a4611530 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..bb12aed85c --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py new file mode 100644 index 0000000000..954a653456 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_coco-stuff164k-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=171), + auxiliary_head=dict(num_classes=171)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_isaid-896x896.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_isaid-896x896.py new file mode 100644 index 0000000000..63165b608e --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_isaid-896x896.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (896, 896) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=16), + auxiliary_head=dict(num_classes=16)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_loveda-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_loveda-512x512.py new file mode 100644 index 0000000000..920729d3be --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_loveda-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=7), + auxiliary_head=dict(num_classes=7)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-480x480.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-480x480.py new file mode 100644 index 0000000000..a7d82478ce --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-59-480x480.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-59-480x480.py new file mode 100644 index 0000000000..b7abc1bdd3 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_pascal-context-59-480x480.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (480, 480) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_potsdam-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_potsdam-512x512.py new file mode 100644 index 0000000000..afb3977ad2 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_potsdam-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/potsdam.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=6), + auxiliary_head=dict(num_classes=6)) diff --git a/configs/pspnet/pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py b/configs/pspnet/pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py new file mode 100644 index 0000000000..35322d2df0 --- /dev/null +++ b/configs/pspnet/pspnet_r50-d8_4xb4-80k_vaihingen-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/vaihingen.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=6), + auxiliary_head=dict(num_classes=6)) diff --git a/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py b/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 5deb5872b0..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py b/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 4e9972849d..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py b/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index c34b66aaf8..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py b/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index cd88154d5e..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py b/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py deleted file mode 100644 index f0c20c12f6..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py b/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 6922cc6d1f..0000000000 --- a/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py b/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index e1026e0065..0000000000 --- a/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py b/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index c1215c5c4a..0000000000 --- a/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..64e55090ac --- /dev/null +++ b/configs/pspnet/pspnet_r50b-d32_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='torchvision://resnet50', + backbone=dict(type='ResNet', dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2))) diff --git a/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024.py b/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7dd64b332f --- /dev/null +++ b/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-769x769.py b/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..3875c092fe --- /dev/null +++ b/configs/pspnet/pspnet_r50b-d8_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/configs/resnest/README.md b/configs/resnest/README.md new file mode 100644 index 0000000000..7f07d147b7 --- /dev/null +++ b/configs/resnest/README.md @@ -0,0 +1,54 @@ +# ResNeSt + +[ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. + + + +
+ +
+ +## Citation + +```bibtex +@article{zhang2020resnest, +title={ResNeSt: Split-Attention Networks}, +author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, +journal={arXiv preprint arXiv:2004.08955}, +year={2020} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | S-101-D8 | 512x1024 | 80000 | 11.4 | 2.39 | 77.56 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | +| PSPNet | S-101-D8 | 512x1024 | 80000 | 11.8 | 2.52 | 78.57 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | +| DeepLabV3 | S-101-D8 | 512x1024 | 80000 | 11.9 | 1.88 | 79.67 | 80.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | +| DeepLabV3+ | S-101-D8 | 512x1024 | 80000 | 13.2 | 2.36 | 79.62 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | S-101-D8 | 512x512 | 160000 | 14.2 | 12.86 | 45.62 | 46.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | +| PSPNet | S-101-D8 | 512x512 | 160000 | 14.2 | 13.02 | 45.44 | 46.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | +| DeepLabV3 | S-101-D8 | 512x512 | 160000 | 14.6 | 9.28 | 45.71 | 46.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) | +| DeepLabV3+ | S-101-D8 | 512x512 | 160000 | 16.2 | 11.96 | 46.47 | 47.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) | diff --git a/configs/resnest/resnest.yml b/configs/resnest/resnest.yml new file mode 100644 index 0000000000..ab897e3bd5 --- /dev/null +++ b/configs/resnest/resnest.yml @@ -0,0 +1,177 @@ +Models: +- Name: resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024 + In Collection: FCN + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 418.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.56 + mIoU(ms+flip): 78.98 + Config: configs/resnest/resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth +- Name: resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024 + In Collection: PSPNet + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 396.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 79.19 + Config: configs/resnest/resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth +- Name: resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3 + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.67 + mIoU(ms+flip): 80.51 + Config: configs/resnest/resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth +- Name: resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024 + In Collection: DeepLabV3+ + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 423.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.62 + mIoU(ms+flip): 80.27 + Config: configs/resnest/resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth +- Name: resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512 + In Collection: FCN + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 77.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.62 + mIoU(ms+flip): 46.16 + Config: configs/resnest/resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth +- Name: resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512 + In Collection: PSPNet + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 76.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.44 + mIoU(ms+flip): 46.28 + Config: configs/resnest/resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth +- Name: resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3 + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 107.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.71 + mIoU(ms+flip): 46.59 + Config: configs/resnest/resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth +- Name: resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512 + In Collection: DeepLabV3+ + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 83.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 16.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.47 + mIoU(ms+flip): 47.27 + Config: configs/resnest/resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth diff --git a/configs/resnest/resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py b/configs/resnest/resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..7ece894b56 --- /dev/null +++ b/configs/resnest/resnest_s101-d8_deeplabv3_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512.py b/configs/resnest/resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..c2852301fc --- /dev/null +++ b/configs/resnest/resnest_s101-d8_deeplabv3_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py b/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5c43a9547d --- /dev/null +++ b/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024.py' # noqa +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py b/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..ce39d3709f --- /dev/null +++ b/configs/resnest/resnest_s101-d8_deeplabv3plus_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024.py b/configs/resnest/resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..fc333e4ff0 --- /dev/null +++ b/configs/resnest/resnest_s101-d8_fcn_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,9 @@ +_base_ = '../fcn/fcn_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512.py b/configs/resnest/resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..af12733444 --- /dev/null +++ b/configs/resnest/resnest_s101-d8_fcn_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = '../fcn/fcn_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024.py b/configs/resnest/resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024.py new file mode 100644 index 0000000000..3aab524449 --- /dev/null +++ b/configs/resnest/resnest_s101-d8_pspnet_4xb2-80k_cityscapes512x1024.py @@ -0,0 +1,9 @@ +_base_ = '../pspnet/pspnet_r101-d8_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/resnest/resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512.py b/configs/resnest/resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..66e6639c18 --- /dev/null +++ b/configs/resnest/resnest_s101-d8_pspnet_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = '../pspnet/pspnet_r101-d8_4xb4-160k_ade20k-512x512.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/configs/segformer/README.md b/configs/segformer/README.md new file mode 100644 index 0000000000..be64099da3 --- /dev/null +++ b/configs/segformer/README.md @@ -0,0 +1,101 @@ +# SegFormer + +[SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C. Code will be released at: [this http URL](https://github.com/NVlabs/SegFormer). + + + +
+ +
+ +## Citation + +```bibtex +@article{xie2021segformer, + title={SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers}, + author={Xie, Enze and Wang, Wenhai and Yu, Zhiding and Anandkumar, Anima and Alvarez, Jose M and Luo, Ping}, + journal={arXiv preprint arXiv:2105.15203}, + year={2021} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`mit2mmseg.py`](../../tools/model_converters/mit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/NVlabs/SegFormer) to MMSegmentation style. + +```shell +python tools/model_converters/mit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Segformer | MIT-B0 | 512x512 | 160000 | 2.1 | 51.32 | 37.41 | 38.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b0_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530-8ffa8fda.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530.log.json) | +| Segformer | MIT-B1 | 512x512 | 160000 | 2.6 | 47.66 | 40.97 | 42.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b1_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106-d70e859d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106.log.json) | +| Segformer | MIT-B2 | 512x512 | 160000 | 3.6 | 30.88 | 45.58 | 47.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b2_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103-cbd414ac.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103.log.json) | +| Segformer | MIT-B3 | 512x512 | 160000 | 4.8 | 22.11 | 47.82 | 48.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b3_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410-962b98d2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410.log.json) | +| Segformer | MIT-B4 | 512x512 | 160000 | 6.1 | 15.45 | 48.46 | 49.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b4_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055-7f509d7d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055.log.json) | +| Segformer | MIT-B5 | 512x512 | 160000 | 7.2 | 11.89 | 49.13 | 50.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235.log.json) | +| Segformer | MIT-B5 | 640x640 | 160000 | 11.5 | 11.30 | 49.62 | 50.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-640x640.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243-41d2845b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243.log.json) | + +Evaluation with AlignedResize: + +| Method | Backbone | Crop Size | Lr schd | mIoU | mIoU(ms+flip) | +| --------- | -------- | --------- | ------: | ----: | ------------- | +| Segformer | MIT-B0 | 512x512 | 160000 | 38.1 | 38.57 | +| Segformer | MIT-B1 | 512x512 | 160000 | 41.64 | 42.76 | +| Segformer | MIT-B2 | 512x512 | 160000 | 46.53 | 47.49 | +| Segformer | MIT-B3 | 512x512 | 160000 | 48.46 | 49.14 | +| Segformer | MIT-B4 | 512x512 | 160000 | 49.34 | 50.29 | +| Segformer | MIT-B5 | 512x512 | 160000 | 50.08 | 50.72 | +| Segformer | MIT-B5 | 640x640 | 160000 | 50.58 | 50.8 | + +We replace `AlignedResize` in original implementatiuon to `Resize + ResizeToMultiple`. If you want to test by +using `AlignedResize`, you can change the dataset pipeline like this: + +```python +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 512), keep_ratio=True), + # resize image to multiple of 32, improve SegFormer by 0.5-1.0 mIoU. + dict(type='ResizeToMultiple', size_divisor=32), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +``` + +### Cityscapes + +The lower fps result is caused by the sliding window inference scheme (window size:1024x1024). + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Segformer | MIT-B0 | 1024x1024 | 160000 | 3.64 | 4.74 | 76.54 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857.log.json) | +| Segformer | MIT-B1 | 1024x1024 | 160000 | 4.49 | 4.3 | 78.56 | 79.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b1_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213.log.json) | +| Segformer | MIT-B2 | 1024x1024 | 160000 | 7.42 | 3.36 | 81.08 | 82.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205.log.json) | +| Segformer | MIT-B3 | 1024x1024 | 160000 | 10.86 | 2.53 | 81.94 | 83.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b3_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823.log.json) | +| Segformer | MIT-B4 | 1024x1024 | 160000 | 15.07 | 1.88 | 81.89 | 83.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b4_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709.log.json) | +| Segformer | MIT-B5 | 1024x1024 | 160000 | 18.00 | 1.39 | 82.25 | 83.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segformer/segformer_mit-b5_8xb1-160k_cityscapes-1024x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934.log.json) | diff --git a/configs/segformer/segformer.yml b/configs/segformer/segformer.yml new file mode 100644 index 0000000000..4a3818e16e --- /dev/null +++ b/configs/segformer/segformer.yml @@ -0,0 +1,303 @@ +Collections: +- Name: Segformer + Metadata: + Training Data: + - ADE20K + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2105.15203 + Title: 'SegFormer: Simple and Efficient Design for Semantic Segmentation with + Transformers' + README: configs/segformer/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mit.py#L246 + Version: v0.17.0 + Converted From: + Code: https://github.com/NVlabs/SegFormer +Models: +- Name: segformer_mit-b0_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B0 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 19.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.41 + mIoU(ms+flip): 38.34 + Config: configs/segformer/segformer_mit-b0_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530-8ffa8fda.pth +- Name: segformer_mit-b1_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B1 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 20.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.97 + mIoU(ms+flip): 42.54 + Config: configs/segformer/segformer_mit-b1_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106-d70e859d.pth +- Name: segformer_mit-b2_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B2 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 32.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.58 + mIoU(ms+flip): 47.03 + Config: configs/segformer/segformer_mit-b2_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103-cbd414ac.pth +- Name: segformer_mit-b3_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B3 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 45.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.82 + mIoU(ms+flip): 48.81 + Config: configs/segformer/segformer_mit-b3_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410-962b98d2.pth +- Name: segformer_mit-b4_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B4 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 64.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.46 + mIoU(ms+flip): 49.76 + Config: configs/segformer/segformer_mit-b4_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055-7f509d7d.pth +- Name: segformer_mit-b5_8xb2-160k_ade20k-512x512 + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 84.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.13 + mIoU(ms+flip): 50.22 + Config: configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth +- Name: segformer_mit-b5_8xb2-160k_ade20k-640x640 + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 11.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.62 + mIoU(ms+flip): 50.36 + Config: configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-640x640.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243-41d2845b.pth +- Name: segformer_mit-b0_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B0 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 210.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 3.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.54 + mIoU(ms+flip): 78.22 + Config: configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth +- Name: segformer_mit-b1_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B1 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 232.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 4.49 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.56 + mIoU(ms+flip): 79.73 + Config: configs/segformer/segformer_mit-b1_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth +- Name: segformer_mit-b2_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 297.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 7.42 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.08 + mIoU(ms+flip): 82.18 + Config: configs/segformer/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth +- Name: segformer_mit-b3_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B3 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 395.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 10.86 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.94 + mIoU(ms+flip): 83.14 + Config: configs/segformer/segformer_mit-b3_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth +- Name: segformer_mit-b4_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B4 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.07 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.89 + mIoU(ms+flip): 83.38 + Config: configs/segformer/segformer_mit-b4_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth +- Name: segformer_mit-b5_8xb1-160k_cityscapes-1024x1024 + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 719.42 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 18.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 82.25 + mIoU(ms+flip): 83.48 + Config: configs/segformer/segformer_mit-b5_8xb1-160k_cityscapes-1024x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth diff --git a/configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..9f6bd1e81d --- /dev/null +++ b/configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/segformer_mit-b0.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (1024, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b0.pth')), + test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768))) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +train_dataloader = dict(batch_size=1, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/segformer/segformer_mit-b0_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b0_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..607e2848ee --- /dev/null +++ b/configs/segformer/segformer_mit-b0_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/mit_b0.pth', + decode_head=dict(num_classes=150)) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/segformer/segformer_mit-b1_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b1_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..d6977d4678 --- /dev/null +++ b/configs/segformer/segformer_mit-b1_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,7 @@ +_base_ = ['./segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b1.pth'), + embed_dims=64), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b1_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b1_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..3bedca9891 --- /dev/null +++ b/configs/segformer/segformer_mit-b1_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b1.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[2, 2, 2, 2]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..94f5ad33a5 --- /dev/null +++ b/configs/segformer/segformer_mit-b2_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b2.pth'), + embed_dims=64, + num_layers=[3, 4, 6, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b2_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b2_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..2c3bb101d3 --- /dev/null +++ b/configs/segformer/segformer_mit-b2_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b2.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 6, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b3_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b3_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..87ec0a599d --- /dev/null +++ b/configs/segformer/segformer_mit-b3_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b3.pth'), + embed_dims=64, + num_layers=[3, 4, 18, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b3_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b3_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..31f5fc1c12 --- /dev/null +++ b/configs/segformer/segformer_mit-b3_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b3.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 18, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b4_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b4_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..e4f436b264 --- /dev/null +++ b/configs/segformer/segformer_mit-b4_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b4.pth'), + embed_dims=64, + num_layers=[3, 8, 27, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b4_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b4_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..0015e1623a --- /dev/null +++ b/configs/segformer/segformer_mit-b4_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b4.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 8, 27, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b5_8xb1-160k_cityscapes-1024x1024.py b/configs/segformer/segformer_mit-b5_8xb1-160k_cityscapes-1024x1024.py new file mode 100644 index 0000000000..7fb2ea5b4e --- /dev/null +++ b/configs/segformer/segformer_mit-b5_8xb1-160k_cityscapes-1024x1024.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b5.pth'), + embed_dims=64, + num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-512x512.py b/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..09bb260223 --- /dev/null +++ b/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b5.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-640x640.py b/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-640x640.py new file mode 100644 index 0000000000..3bba3716ef --- /dev/null +++ b/configs/segformer/segformer_mit-b5_8xb2-160k_ade20k-640x640.py @@ -0,0 +1,37 @@ +_base_ = ['./segformer_mit-b0_8xb2-160k_ade20k-512x512.py'] + +# dataset settings +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2048, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 640), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='PackSegInputs') +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=1, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/mit_b5.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/segmenter/README.md b/configs/segmenter/README.md new file mode 100644 index 0000000000..984ef9f510 --- /dev/null +++ b/configs/segmenter/README.md @@ -0,0 +1,74 @@ +# Segmenter + +[Segmenter: Transformer for Semantic Segmentation](https://arxiv.org/abs/2105.05633) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Image segmentation is often ambiguous at the level of individual image patches and requires contextual information to reach label consensus. In this paper we introduce Segmenter, a transformer model for semantic segmentation. In contrast to convolution-based methods, our approach allows to model global context already at the first layer and throughout the network. We build on the recent Vision Transformer (ViT) and extend it to semantic segmentation. To do so, we rely on the output embeddings corresponding to image patches and obtain class labels from these embeddings with a point-wise linear decoder or a mask transformer decoder. We leverage models pre-trained for image classification and show that we can fine-tune them on moderate sized datasets available for semantic segmentation. The linear decoder allows to obtain excellent results already, but the performance can be further improved by a mask transformer generating class masks. We conduct an extensive ablation study to show the impact of the different parameters, in particular the performance is better for large models and small patch sizes. Segmenter attains excellent results for semantic segmentation. It outperforms the state of the art on both ADE20K and Pascal Context datasets and is competitive on Cityscapes. + + + +
+ +
+ +```bibtex +@inproceedings{strudel2021segmenter, + title={Segmenter: Transformer for semantic segmentation}, + author={Strudel, Robin and Garcia, Ricardo and Laptev, Ivan and Schmid, Cordelia}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={7262--7272}, + year={2021} +} +``` + +## Usage + +We have provided pretrained models converted from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106). + +If you want to convert keys on your own to use the pre-trained ViT model from [Segmenter](https://github.com/rstrudel/segmenter), we also provide a script [`vitjax2mmseg.py`](../../tools/model_converters/vitjax2mmseg.py) in the tools directory to convert the key of models from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) to MMSegmentation style. + +```shell +python tools/model_converters/vitjax2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vitjax2mmseg.py \ +Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz \ +pretrain/vit_tiny_p16_384.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models and their corresponding [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) models could be defined below: + +| pretrained models | original models | +| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| vit_tiny_p16_384.pth | ['vit_tiny_patch16_384'](https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | +| vit_small_p16_384.pth | ['vit_small_patch16_384'](https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | +| vit_base_p16_384.pth | ['vit_base_patch16_384'](https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz) | +| vit_large_p16_384.pth | ['vit_large_patch16_384'](https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz) | + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Segmenter Mask | ViT-T_16 | 512x512 | 160000 | 1.21 | 27.98 | 39.99 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segmenter/segmenter_vit-t_mask_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Linear | ViT-S_16 | 512x512 | 160000 | 1.78 | 28.07 | 45.75 | 46.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713.log.json) | +| Segmenter Mask | ViT-S_16 | 512x512 | 160000 | 2.03 | 24.80 | 46.19 | 47.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segmenter/segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Mask | ViT-B_16 | 512x512 | 160000 | 4.20 | 13.20 | 49.60 | 51.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segmenter/segmenter_vit-b_mask_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Mask | ViT-L_16 | 640x640 | 160000 | 16.56 | 2.62 | 52.16 | 53.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/segmenter/segmenter_vit-l_mask_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750-7ef345be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750.log.json) | diff --git a/configs/segmenter/segmenter.yml b/configs/segmenter/segmenter.yml new file mode 100644 index 0000000000..1069f003b7 --- /dev/null +++ b/configs/segmenter/segmenter.yml @@ -0,0 +1,125 @@ +Collections: +- Name: Segmenter + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2105.05633 + Title: 'Segmenter: Transformer for Semantic Segmentation' + README: configs/segmenter/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.21.0/mmseg/models/decode_heads/segmenter_mask_head.py#L15 + Version: v0.21.0 + Converted From: + Code: https://github.com/rstrudel/segmenter +Models: +- Name: segmenter_vit-t_mask_8xb1-160k_ade20k-512x512 + In Collection: Segmenter + Metadata: + backbone: ViT-T_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 35.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.99 + mIoU(ms+flip): 40.83 + Config: configs/segmenter/segmenter_vit-t_mask_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth +- Name: segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512 + In Collection: Segmenter + Metadata: + backbone: ViT-S_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 35.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.78 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.75 + mIoU(ms+flip): 46.82 + Config: configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth +- Name: segmenter_vit-s_mask_8xb1-160k_ade20k-512x512 + In Collection: Segmenter + Metadata: + backbone: ViT-S_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 40.32 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.03 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.19 + mIoU(ms+flip): 47.85 + Config: configs/segmenter/segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth +- Name: segmenter_vit-b_mask_8xb1-160k_ade20k-512x512 + In Collection: Segmenter + Metadata: + backbone: ViT-B_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 75.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.6 + mIoU(ms+flip): 51.07 + Config: configs/segmenter/segmenter_vit-b_mask_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth +- Name: segmenter_vit-l_mask_8xb1-160k_ade20k-512x512 + In Collection: Segmenter + Metadata: + backbone: ViT-L_16 + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 381.68 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 16.56 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.16 + mIoU(ms+flip): 53.65 + Config: configs/segmenter/segmenter_vit-l_mask_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750-7ef345be.pth diff --git a/configs/segmenter/segmenter_vit-b_mask_8xb1-160k_ade20k-512x512.py b/configs/segmenter/segmenter_vit-b_mask_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..a4bae50648 --- /dev/null +++ b/configs/segmenter/segmenter_vit-b_mask_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict( + # num_gpus: 8 -> batch_size: 8 + batch_size=1) +val_dataloader = dict(batch_size=1) diff --git a/configs/segmenter/segmenter_vit-l_mask_8xb1-160k_ade20k-512x512.py b/configs/segmenter/segmenter_vit-l_mask_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..302acdecb6 --- /dev/null +++ b/configs/segmenter/segmenter_vit-l_mask_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_large_p16_384_20220308-d4efb41d.pth' # noqa + +model = dict( + data_preprocessor=data_preprocessor, + pretrained=checkpoint, + backbone=dict( + type='VisionTransformer', + img_size=(640, 640), + embed_dims=1024, + num_layers=24, + num_heads=16), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=1024, + channels=1024, + num_heads=16, + embed_dims=1024), + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(608, 608))) + +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict( + # num_gpus: 8 -> batch_size: 8 + batch_size=1) +val_dataloader = dict(batch_size=1) diff --git a/configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py b/configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..dc1e4c8985 --- /dev/null +++ b/configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,14 @@ +_base_ = './segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py' + +model = dict( + decode_head=dict( + _delete_=True, + type='FCNHead', + in_channels=384, + channels=384, + num_convs=0, + dropout_ratio=0.0, + concat_input=False, + num_classes=150, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/configs/segmenter/segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py b/configs/segmenter/segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..b19fd41409 --- /dev/null +++ b/configs/segmenter/segmenter_vit-s_mask_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_small_p16_384_20220308-410f6037.pth' # noqa + +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=checkpoint, + backbone=dict( + img_size=(512, 512), + embed_dims=384, + num_heads=6, + ), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=384, + channels=384, + num_classes=150, + num_layers=2, + num_heads=6, + embed_dims=384, + dropout_ratio=0.0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) + +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict( + # num_gpus: 8 -> batch_size: 8 + batch_size=1) +val_dataloader = dict(batch_size=1) diff --git a/configs/segmenter/segmenter_vit-t_mask_8xb1-160k_ade20k-512x512.py b/configs/segmenter/segmenter_vit-t_mask_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..221a9f9a6d --- /dev/null +++ b/configs/segmenter/segmenter_vit-t_mask_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,26 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_tiny_p16_384_20220308-cce8c795.pth' # noqa + +model = dict( + data_preprocessor=data_preprocessor, + pretrained=checkpoint, + backbone=dict(embed_dims=192, num_heads=3), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=192, + channels=192, + num_heads=3, + embed_dims=192)) + +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +train_dataloader = dict( + # num_gpus: 8 -> batch_size: 8 + batch_size=1) +val_dataloader = dict(batch_size=1) diff --git a/configs/sem_fpn/README.md b/configs/sem_fpn/README.md new file mode 100644 index 0000000000..fcef72d2a5 --- /dev/null +++ b/configs/sem_fpn/README.md @@ -0,0 +1,51 @@ +# Semantic FPN + +[Panoptic Feature Pyramid Networks](https://arxiv.org/abs/1901.02446) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{kirillov2019panoptic, + title={Panoptic feature pyramid networks}, + author={Kirillov, Alexander and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={6399--6408}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FPN | R-50 | 512x1024 | 80000 | 2.8 | 13.54 | 74.52 | 76.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes-20200717_021437.log.json) | +| FPN | R-101 | 512x1024 | 80000 | 3.9 | 10.29 | 75.80 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/sem_fpn/fpn_r101_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes-20200717_012416.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FPN | R-50 | 512x512 | 160000 | 4.9 | 55.77 | 37.49 | 39.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/sem_fpn/fpn_r50_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k-20200718_131734.log.json) | +| FPN | R-101 | 512x512 | 160000 | 5.9 | 40.58 | 39.35 | 40.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/sem_fpn/fpn_r101_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k-20200718_131734.log.json) | diff --git a/configs/sem_fpn/fpn_r101_4xb2-80k_cityscapes-512x1024.py b/configs/sem_fpn/fpn_r101_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..1e9bcfbb59 --- /dev/null +++ b/configs/sem_fpn/fpn_r101_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './fpn_r50_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/sem_fpn/fpn_r101_4xb4-160k_ade20k-512x512.py b/configs/sem_fpn/fpn_r101_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..adad1a4f38 --- /dev/null +++ b/configs/sem_fpn/fpn_r101_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,5 @@ +_base_ = './fpn_r50_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py b/configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..bf71d388e9 --- /dev/null +++ b/configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fpn_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/sem_fpn/fpn_r50_4xb4-160k_ade20k-512x512.py b/configs/sem_fpn/fpn_r50_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..4e4bc57788 --- /dev/null +++ b/configs/sem_fpn/fpn_r50_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fpn_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, decode_head=dict(num_classes=150)) diff --git a/configs/sem_fpn/sem_fpn.yml b/configs/sem_fpn/sem_fpn.yml new file mode 100644 index 0000000000..79ed0b81c4 --- /dev/null +++ b/configs/sem_fpn/sem_fpn.yml @@ -0,0 +1,104 @@ +Collections: +- Name: FPN + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1901.02446 + Title: Panoptic Feature Pyramid Networks + README: configs/sem_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fpn_head.py#L12 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/detectron2 +Models: +- Name: fpn_r50_4xb2-80k_cityscapes-512x1024 + In Collection: FPN + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 73.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.52 + mIoU(ms+flip): 76.08 + Config: configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth +- Name: fpn_r101_4xb2-80k_cityscapes-512x1024 + In Collection: FPN + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 97.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.8 + mIoU(ms+flip): 77.4 + Config: configs/sem_fpn/fpn_r101_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth +- Name: fpn_r50_4xb4-160k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 17.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.49 + mIoU(ms+flip): 39.09 + Config: configs/sem_fpn/fpn_r50_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth +- Name: fpn_r101_4xb4-160k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 24.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.35 + mIoU(ms+flip): 40.72 + Config: configs/sem_fpn/fpn_r101_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth diff --git a/configs/setr/README.md b/configs/setr/README.md new file mode 100644 index 0000000000..1aa3f245a7 --- /dev/null +++ b/configs/setr/README.md @@ -0,0 +1,74 @@ +# SETR + +[Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers](https://arxiv.org/abs/2012.15840) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Most recent semantic segmentation methods adopt a fully-convolutional network (FCN) with an encoder-decoder architecture. The encoder progressively reduces the spatial resolution and learns more abstract/semantic visual concepts with larger receptive fields. Since context modeling is critical for segmentation, the latest efforts have been focused on increasing the receptive field, through either dilated/atrous convolutions or inserting attention modules. However, the encoder-decoder based FCN architecture remains unchanged. In this paper, we aim to provide an alternative perspective by treating semantic segmentation as a sequence-to-sequence prediction task. Specifically, we deploy a pure transformer (ie, without convolution and resolution reduction) to encode an image as a sequence of patches. With the global context modeled in every layer of the transformer, this encoder can be combined with a simple decoder to provide a powerful segmentation model, termed SEgmentation TRansformer (SETR). Extensive experiments show that SETR achieves new state of the art on ADE20K (50.28% mIoU), Pascal Context (55.83% mIoU) and competitive results on Cityscapes. Particularly, we achieve the first position in the highly competitive ADE20K test server leaderboard on the day of submission. + + + +
+ +
+ +```None +This head has two version head. +``` + +## Citation + +```bibtex +@article{zheng2020rethinking, + title={Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers}, + author={Zheng, Sixiao and Lu, Jiachen and Zhao, Hengshuang and Zhu, Xiatian and Luo, Zekun and Wang, Yabiao and Fu, Yanwei and Feng, Jianfeng and Xiang, Tao and Torr, Philip HS and others}, + journal={arXiv preprint arXiv:2012.15840}, + year={2020} +} +``` + +## Usage + +You can download the pretrain from [here](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth). Then you can convert its keys with the script `vit2mmseg.py` in the tools directory. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py \ +jx_vit_large_p16_384-b3be5167.pth pretrain/vit_large_p16.pth +``` + +This script convert the model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SETR Naive | ViT-L | 512x512 | 16 | 160000 | 18.40 | 4.72 | 48.28 | 49.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_naive_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258.log.json) | +| SETR PUP | ViT-L | 512x512 | 16 | 160000 | 19.54 | 4.50 | 48.24 | 49.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_pup_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343.log.json) | +| SETR MLA | ViT-L | 512x512 | 8 | 160000 | 10.96 | - | 47.34 | 49.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l-mla_8xb1-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118.log.json) | +| SETR MLA | ViT-L | 512x512 | 16 | 160000 | 17.30 | 5.25 | 47.39 | 49.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_mla_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057.log.json) | + +### Cityscapes + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| SETR Naive | ViT-L | 768x768 | 8 | 80000 | 24.06 | 0.39 | 78.10 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_naive_8xb1-80k_cityscapes-768x768.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505.log.json) | +| SETR PUP | ViT-L | 768x768 | 8 | 80000 | 27.96 | 0.37 | 79.21 | 81.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_pup_8xb1-80k_cityscapes-768x768.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115.log.json) | +| SETR MLA | ViT-L | 768x768 | 8 | 80000 | 24.10 | 0.41 | 77.00 | 79.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/setr/setr_vit-l_mla_8xb1-80k_cityscapes-768x768.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003.log.json) | diff --git a/configs/setr/setr.yml b/configs/setr/setr.yml new file mode 100644 index 0000000000..6a9987089d --- /dev/null +++ b/configs/setr/setr.yml @@ -0,0 +1,164 @@ +Collections: +- Name: SETR + Metadata: + Training Data: + - ADE20K + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2012.15840 + Title: Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective + with Transformers + README: configs/setr/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/setr_up_head.py#L11 + Version: v0.17.0 + Converted From: + Code: https://github.com/fudan-zvg/SETR +Models: +- Name: setr_vit-l_naive_8xb2-160k_ade20k-512x512 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 211.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 18.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.28 + mIoU(ms+flip): 49.56 + Config: configs/setr/setr_vit-l_naive_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth +- Name: setr_vit-l_pup_8xb2-160k_ade20k-512x512 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 222.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 19.54 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.24 + mIoU(ms+flip): 49.99 + Config: configs/setr/setr_vit-l_pup_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth +- Name: setr_vit-l-mla_8xb1-160k_ade20k-512x512 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + Training Memory (GB): 10.96 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.34 + mIoU(ms+flip): 49.05 + Config: configs/setr/setr_vit-l-mla_8xb1-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth +- Name: setr_vit-l_mla_8xb2-160k_ade20k-512x512 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 190.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 17.3 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.39 + mIoU(ms+flip): 49.37 + Config: configs/setr/setr_vit-l_mla_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth +- Name: setr_vit-l_naive_8xb1-80k_cityscapes-768x768 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2564.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 24.06 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.1 + mIoU(ms+flip): 80.22 + Config: configs/setr/setr_vit-l_naive_8xb1-80k_cityscapes-768x768.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth +- Name: setr_vit-l_pup_8xb1-80k_cityscapes-768x768 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2702.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 27.96 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.21 + mIoU(ms+flip): 81.02 + Config: configs/setr/setr_vit-l_pup_8xb1-80k_cityscapes-768x768.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth +- Name: setr_vit-l_mla_8xb1-80k_cityscapes-768x768 + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2439.02 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 24.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.0 + mIoU(ms+flip): 79.59 + Config: configs/setr/setr_vit-l_mla_8xb1-80k_cityscapes-768x768.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth diff --git a/configs/setr/setr_vit-l-mla_8xb1-160k_ade20k-512x512.py b/configs/setr/setr_vit-l-mla_8xb1-160k_ade20k-512x512.py new file mode 100644 index 0000000000..1c6e2845f9 --- /dev/null +++ b/configs/setr/setr_vit-l-mla_8xb1-160k_ade20k-512x512.py @@ -0,0 +1,90 @@ +_base_ = [ + '../_base_/models/setr_mla.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=0, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=1, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=2, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=3, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +# num_gpus: 8 -> batch_size: 8 +train_dataloader = dict(batch_size=1) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_mla_8xb1-80k_cityscapes-768x768.py b/configs/setr/setr_vit-l_mla_8xb1-80k_cityscapes-768x768.py new file mode 100644 index 0000000000..026557f505 --- /dev/null +++ b/configs/setr/setr_vit-l_mla_8xb1-80k_cityscapes-768x768.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/setr_mla.py', '../_base_/datasets/cityscapes_768x768.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (768, 768) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + drop_rate=0, + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) + +optimizer = dict(lr=0.002, weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +train_dataloader = dict(batch_size=1) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_mla_8xb2-160k_ade20k-512x512.py b/configs/setr/setr_vit-l_mla_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..4d3fb7d4e1 --- /dev/null +++ b/configs/setr/setr_vit-l_mla_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,6 @@ +_base_ = ['./setr_vit-l-mla_8xb1-160k_ade20k-512x512.py'] + +# num_gpus: 8 -> batch_size: 16 +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_naive_8xb1-80k_cityscapes-768x768.py b/configs/setr/setr_vit-l_naive_8xb1-80k_cityscapes-768x768.py new file mode 100644 index 0000000000..db49317301 --- /dev/null +++ b/configs/setr/setr_vit-l_naive_8xb1-80k_cityscapes-768x768.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/setr_naive.py', + '../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (768, 768) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) + +optimizer = dict(weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +train_dataloader = dict(batch_size=1) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_naive_8xb2-160k_ade20k-512x512.py b/configs/setr/setr_vit-l_naive_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..109996c1b6 --- /dev/null +++ b/configs/setr/setr_vit-l_naive_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,72 @@ +_base_ = [ + '../_base_/models/setr_naive.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict(lr=0.01, weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +# num_gpus: 8 -> batch_size: 16 +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_pup_8xb1-80k_cityscapes-768x768.py b/configs/setr/setr_vit-l_pup_8xb1-80k_cityscapes-768x768.py new file mode 100644 index 0000000000..999ab18038 --- /dev/null +++ b/configs/setr/setr_vit-l_pup_8xb1-80k_cityscapes-768x768.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/models/setr_pup.py', '../_base_/datasets/cityscapes_768x768.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (768, 768) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +crop_size = (768, 768) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(512, 512))) + +optimizer = dict(weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +train_dataloader = dict(batch_size=1) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/setr/setr_vit-l_pup_8xb2-160k_ade20k-512x512.py b/configs/setr/setr_vit-l_pup_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..e9bfb2201d --- /dev/null +++ b/configs/setr/setr_vit-l_pup_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,72 @@ +_base_ = [ + '../_base_/models/setr_pup.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict(lr=0.001, weight_decay=0.0) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +# num_gpus: 8 -> batch_size: 16 +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/stdc/README.md b/configs/stdc/README.md new file mode 100644 index 0000000000..639e6b6986 --- /dev/null +++ b/configs/stdc/README.md @@ -0,0 +1,73 @@ +# STDC + +[Rethinking BiSeNet For Real-time Semantic Segmentation](https://arxiv.org/abs/2104.13188) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +BiSeNet has been proved to be a popular two-stream network for real-time segmentation. However, its principle of adding an extra path to encode spatial information is time-consuming, and the backbones borrowed from pretrained tasks, e.g., image classification, may be inefficient for image segmentation due to the deficiency of task-specific design. To handle these problems, we propose a novel and efficient structure named Short-Term Dense Concatenate network (STDC network) by removing structure redundancy. Specifically, we gradually reduce the dimension of feature maps and use the aggregation of them for image representation, which forms the basic module of STDC network. In the decoder, we propose a Detail Aggregation module by integrating the learning of spatial information into low-level layers in single-stream manner. Finally, the low-level features and deep features are fused to predict the final segmentation results. Extensive experiments on Cityscapes and CamVid dataset demonstrate the effectiveness of our method by achieving promising trade-off between segmentation accuracy and inference speed. On Cityscapes, we achieve 71.9% mIoU on the test set with a speed of 250.4 FPS on NVIDIA GTX 1080Ti, which is 45.2% faster than the latest methods, and achieve 76.8% mIoU with 97.0 FPS while inferring on higher resolution images. + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{fan2021rethinking, + title={Rethinking BiSeNet For Real-time Semantic Segmentation}, + author={Fan, Mingyuan and Lai, Shenqi and Huang, Junshi and Wei, Xiaoming and Chai, Zhenhua and Luo, Junfeng and Wei, Xiaolin}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={9716--9725}, + year={2021} +} +``` + +## Usage + +We have provided [ImageNet Pretrained STDCNet Weights](https://drive.google.com/drive/folders/1wROFwRt8qWHD4jSo8Zu1gp1d6oYJ3ns1) models converted from [official repo](https://github.com/MichaelFan01/STDC-Seg). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`stdc2mmseg.py`](../../tools/model_converters/stdc2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/MichaelFan01/STDC-Seg) to MMSegmentation style. + +```shell +python tools/model_converters/stdc2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${STDC_TYPE} +``` + +E.g. + +```shell +python tools/model_converters/stdc2mmseg.py ./STDCNet813M_73.91.tar ./pretrained/stdc1.pth STDC1 + +python tools/model_converters/stdc2mmseg.py ./STDCNet1446_76.47.tar ./pretrained/stdc2.pth STDC2 +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| STDC 1 (No Pretrain) | STDC1 | 512x1024 | 80000 | 7.15 | 23.06 | 71.82 | 73.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/stdc/stdc1_4xb12-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048.log.json) | +| STDC 1 | STDC1 | 512x1024 | 80000 | - | - | 74.94 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648.log.json) | +| STDC 2 (No Pretrain) | STDC2 | 512x1024 | 80000 | 8.27 | 23.71 | 73.15 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/stdc/stdc2_4xb12-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015.log.json) | +| STDC 2 | STDC2 | 512x1024 | 80000 | - | - | 76.67 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048.log.json) | + +Note: + +- For STDC on Cityscapes dataset, default setting is 4 GPUs with 12 samples per GPU in training. +- `No Pretrain` means the model is trained from scratch. +- The FPS is for reference only. The environment is also different from paper setting, whose input size is `512x1024` and `768x1536`, i.e., 50% and 75% of our input size, respectively and using TensorRT. +- The parameter `fusion_kernel` in `STDCHead` is not learnable. In official repo, `find_unused_parameters=True` is set [here](https://github.com/MichaelFan01/STDC-Seg/blob/59ff37fbd693b99972c76fcefe97caa14aeb619f/train.py#L220). You may check it by printing model parameters of original repo on your own. diff --git a/configs/stdc/stdc.yml b/configs/stdc/stdc.yml new file mode 100644 index 0000000000..22fb37eeba --- /dev/null +++ b/configs/stdc/stdc.yml @@ -0,0 +1,87 @@ +Collections: +- Name: STDC + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2104.13188 + Title: Rethinking BiSeNet For Real-time Semantic Segmentation + README: configs/stdc/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/stdc.py#L394 + Version: v0.20.0 + Converted From: + Code: https://github.com/MichaelFan01/STDC-Seg +Models: +- Name: stdc1_4xb12-80k_cityscapes-512x1024 + In Collection: STDC + Metadata: + backbone: STDC1 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 43.37 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.15 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.82 + mIoU(ms+flip): 73.89 + Config: configs/stdc/stdc1_4xb12-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth +- Name: stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024 + In Collection: STDC + Metadata: + backbone: STDC1 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.94 + mIoU(ms+flip): 76.97 + Config: configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth +- Name: stdc2_4xb12-80k_cityscapes-512x1024 + In Collection: STDC + Metadata: + backbone: STDC2 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 42.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.27 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.15 + mIoU(ms+flip): 76.13 + Config: configs/stdc/stdc2_4xb12-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth +- Name: stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024 + In Collection: STDC + Metadata: + backbone: STDC2 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.67 + mIoU(ms+flip): 78.67 + Config: configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth diff --git a/configs/stdc/stdc1_4xb12-80k_cityscapes-512x1024.py b/configs/stdc/stdc1_4xb12-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..20aec3d5bf --- /dev/null +++ b/configs/stdc/stdc1_4xb12-80k_cityscapes-512x1024.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/stdc.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +param_scheduler = [ + dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000), + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=1000, + end=80000, + by_epoch=False, + ) +] +train_dataloader = dict(batch_size=12, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py b/configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..15e807f9ed --- /dev/null +++ b/configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc1_20220308-5368626c.pth' # noqa +_base_ = './stdc1_4xb12-80k_cityscapes-512x1024.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))) diff --git a/configs/stdc/stdc2_4xb12-80k_cityscapes-512x1024.py b/configs/stdc/stdc2_4xb12-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..5657351698 --- /dev/null +++ b/configs/stdc/stdc2_4xb12-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './stdc1_4xb12-80k_cityscapes-512x1024.py' +model = dict(backbone=dict(backbone_cfg=dict(stdc_type='STDCNet2'))) diff --git a/configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py b/configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..05a202b74c --- /dev/null +++ b/configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc2_20220308-7dbd9127.pth' # noqa +_base_ = './stdc2_4xb12-80k_cityscapes-512x1024.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))) diff --git a/configs/swin/README.md b/configs/swin/README.md new file mode 100644 index 0000000000..4ab20e80b8 --- /dev/null +++ b/configs/swin/README.md @@ -0,0 +1,76 @@ +# Swin Transformer + +[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at [this https URL](https://github.com/microsoft/Swin-Transformer). + + + +
+ +
+ +## Citation + +```bibtex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` + +## Usage + +We have provided pretrained models converted from [official repo](https://github.com/microsoft/Swin-Transformer). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`swin2mmseg.py`](../../tools/model_converters/swin2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation) to MMSegmentation style. + +```shell +python tools/model_converters/swin2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/swin2mmseg.py https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth pretrain/swin_base_patch4_window7_224.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models and their corresponding [original models](https://github.com/microsoft/Swin-Transforme) models could be defined below: + +| pretrained models | original models | +| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| pretrain/swin_tiny_patch4_window7_224.pth | [swin_tiny_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth) | +| pretrain/swin_small_patch4_window7_224.pth | [swin_small_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth) | +| pretrain/swin_base_patch4_window7_224.pth | [swin_base_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth) | +| pretrain/swin_base_patch4_window7_224_22k.pth | [swin_base_patch4_window7_224_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth) | +| pretrain/swin_base_patch4_window12_384.pth | [swin_base_patch4_window12_384.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth) | +| pretrain/swin_base_patch4_window12_384_22k.pth | [swin_base_patch4_window12_384_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth) | + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------------ | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | Swin-T | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 5.02 | 21.06 | 44.41 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542.log.json) | +| UPerNet | Swin-S | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 6.17 | 14.72 | 47.72 | 49.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015.log.json) | +| UPerNet | Swin-B | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 7.61 | 12.65 | 47.99 | 49.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340.log.json) | +| UPerNet | Swin-B | 512x512 | ImageNet-22K | 224x224 | 16 | 160000 | - | - | 50.13 | 51.9 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650.log.json) | +| UPerNet | Swin-B | 512x512 | ImageNet-1K | 384x384 | 16 | 160000 | 8.52 | 12.10 | 48.35 | 49.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020.log.json) | +| UPerNet | Swin-B | 512x512 | ImageNet-22K | 384x384 | 16 | 160000 | - | - | 50.76 | 52.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459.log.json) | diff --git a/configs/swin/swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..11cea36703 --- /dev/null +++ b/configs/swin/swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,14 @@ +_base_ = [ + 'swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_20220317-55b0104a.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12), + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150)) diff --git a/configs/swin/swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..5c1171646e --- /dev/null +++ b/configs/swin/swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,7 @@ +_base_ = [ + './swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py' # noqa +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_22k_20220317-e5c09f74.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file))) diff --git a/configs/swin/swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..73bf6166ef --- /dev/null +++ b/configs/swin/swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,12 @@ +_base_ = [ + './swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_20220317-e9b98025.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32]), + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150)) diff --git a/configs/swin/swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..96148cd71d --- /dev/null +++ b/configs/swin/swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,7 @@ +_base_ = [ + './swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_22k_20220317-4f79f7c0.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file))) diff --git a/configs/swin/swin-large-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-large-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..a0a654e026 --- /dev/null +++ b/configs/swin/swin-large-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + 'swin-large-patch4-window7-in22k-pre_upernet_' + '8xb2-160k_ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window12_384_22k_20220412-6580f57d.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + pretrain_img_size=384, + window_size=12)) diff --git a/configs/swin/swin-large-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-large-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..c93cdfeaae --- /dev/null +++ b/configs/swin/swin-large-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,15 @@ +_base_ = [ + 'swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_' + 'ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + pretrain_img_size=224, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7), + decode_head=dict(in_channels=[192, 384, 768, 1536], num_classes=150), + auxiliary_head=dict(in_channels=768, num_classes=150)) diff --git a/configs/swin/swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..19863dfc82 --- /dev/null +++ b/configs/swin/swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + './swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + depths=[2, 2, 18, 2]), + decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), + auxiliary_head=dict(in_channels=384, num_classes=150)) diff --git a/configs/swin/swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py b/configs/swin/swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..f61a276277 --- /dev/null +++ b/configs/swin/swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,52 @@ +_base_ = [ + '../_base_/models/upernet_swin.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.3, + patch_norm=True), + decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), + auxiliary_head=dict(in_channels=384, num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/swin/swin.yml b/configs/swin/swin.yml new file mode 100644 index 0000000000..783d839c8d --- /dev/null +++ b/configs/swin/swin.yml @@ -0,0 +1,117 @@ +Models: +- Name: swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 47.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.02 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.41 + mIoU(ms+flip): 45.79 + Config: configs/swin/swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth +- Name: swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 67.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.17 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.72 + mIoU(ms+flip): 49.24 + Config: configs/swin/swin-small-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth +- Name: swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 79.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.61 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.99 + mIoU(ms+flip): 49.57 + Config: configs/swin/swin-base-patch4-window7-in1k-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth +- Name: swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 50.13 + mIoU(ms+flip): 51.9 + Config: configs/swin/swin-base-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth +- Name: swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.52 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.35 + mIoU(ms+flip): 49.65 + Config: configs/swin/swin-base-patch4-window12-in1k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth +- Name: swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 50.76 + mIoU(ms+flip): 52.4 + Config: configs/swin/swin-base-patch4-window12-in22k-384x384-pre_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth diff --git a/configs/twins/README.md b/configs/twins/README.md new file mode 100644 index 0000000000..3e741802e6 --- /dev/null +++ b/configs/twins/README.md @@ -0,0 +1,76 @@ +# Twins + +[Twins: Revisiting the Design of Spatial Attention in Vision Transformers](https://arxiv.org/pdf/2104.13840.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins). + + + +
+ +
+ +## Citation + +```bibtex +@article{chu2021twins, + title={Twins: Revisiting spatial attention design in vision transformers}, + author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua}, + journal={arXiv preprint arXiv:2104.13840}, + year={2021}altgvt +} +``` + +## Usage + +We have provided pretrained models converted from [official repo](https://github.com/Meituan-AutoML/Twins). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`twins2mmseg.py`](../../tools/model_converters/twins2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/Meituan-AutoML/Twins) to MMSegmentation style. + +```shell +python tools/model_converters/twins2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${MODEL_TYPE} +``` + +This script convert `pcpvt` or `svt` pretrained model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +For example, + +```shell +python tools/model_converters/twins2mmseg.py ./alt_gvt_base.pth ./pretrained/alt_gvt_base.pth svt +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Twins-FPN | PCPVT-S | 512x512 | 80000 | 6.60 | 27.15 | 43.26 | 44.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132.log.json) | +| Twins-UPerNet | PCPVT-S | 512x512 | 160000 | 9.67 | 14.24 | 46.04 | 46.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537.log.json) | +| Twins-FPN | PCPVT-B | 512x512 | 80000 | 8.41 | 19.67 | 45.66 | 46.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019.log.json) | +| Twins-UPerNet (8x2) | PCPVT-B | 512x512 | 160000 | 6.46 | 12.04 | 47.91 | 48.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020.log.json) | +| Twins-FPN | PCPVT-L | 512x512 | 80000 | 10.78 | 14.32 | 45.94 | 46.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226.log.json) | +| Twins-UPerNet (8x2) | PCPVT-L | 512x512 | 160000 | 7.82 | 10.70 | 49.35 | 50.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053.log.json) | +| Twins-FPN | SVT-S | 512x512 | 80000 | 5.80 | 29.79 | 44.47 | 45.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006.log.json) | +| Twins-UPerNet (8x2) | SVT-S | 512x512 | 160000 | 4.93 | 15.09 | 46.08 | 46.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json) | +| Twins-FPN | SVT-B | 512x512 | 80000 | 8.75 | 21.10 | 46.77 | 47.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849.log.json) | +| Twins-UPerNet (8x2) | SVT-B | 512x512 | 160000 | 6.77 | 12.66 | 48.04 | 48.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_svt-b_uperhead_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826.log.json) | +| Twins-FPN | SVT-L | 512x512 | 80000 | 11.20 | 17.80 | 46.55 | 47.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005.log.json) | +| Twins-UPerNet (8x2) | SVT-L | 512x512 | 160000 | 8.41 | 10.73 | 49.65 | 50.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json) | + +Note: + +- `8x2` means 8 GPUs with 2 samples per GPU in training. Default setting of Twins on ADE20K is 8 GPUs with 4 samples per GPU in training. +- `UPerNet` and `FPN` are decoder heads utilized in corresponding Twins model, which is `UPerHead` and `FPNHead`, respectively. Specifically, models in [official repo](https://github.com/Meituan-AutoML/Twins) all use `UPerHead`. diff --git a/configs/twins/twins.yml b/configs/twins/twins.yml new file mode 100644 index 0000000000..48d25c682f --- /dev/null +++ b/configs/twins/twins.yml @@ -0,0 +1,265 @@ +Models: +- Name: twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PCPVT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 36.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.26 + mIoU(ms+flip): 44.11 + Config: configs/twins/twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth +- Name: twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: PCPVT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 70.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.67 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.04 + mIoU(ms+flip): 46.92 + Config: configs/twins/twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth +- Name: twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PCPVT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.84 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.41 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.66 + mIoU(ms+flip): 46.48 + Config: configs/twins/twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth +- Name: twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: PCPVT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 83.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.46 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.91 + mIoU(ms+flip): 48.64 + Config: configs/twins/twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth +- Name: twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: PCPVT-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 69.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.78 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.94 + mIoU(ms+flip): 46.7 + Config: configs/twins/twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth +- Name: twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: PCPVT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 93.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.82 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.35 + mIoU(ms+flip): 50.08 + Config: configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth +- Name: twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: SVT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.47 + mIoU(ms+flip): 45.42 + Config: configs/twins/twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth +- Name: twins_svt-s_uperhead_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: SVT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 66.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.93 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.08 + mIoU(ms+flip): 46.96 + Config: configs/twins/twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth +- Name: twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: SVT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.77 + mIoU(ms+flip): 47.47 + Config: configs/twins/twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth +- Name: twins_svt-b_uperhead_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: SVT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 78.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.77 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.04 + mIoU(ms+flip): 48.87 + Config: configs/twins/twins_svt-b_uperhead_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth +- Name: twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512 + In Collection: FPN + Metadata: + backbone: SVT-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 56.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.55 + mIoU(ms+flip): 47.74 + Config: configs/twins/twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth +- Name: twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: SVT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 93.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.41 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.65 + mIoU(ms+flip): 50.63 + Config: configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth diff --git a/configs/twins/twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..4739ad4b0a --- /dev/null +++ b/configs/twins/twins_pcpvt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 4, 18, 3]), ) diff --git a/configs/twins/twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512.py b/configs/twins/twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..ba9748547d --- /dev/null +++ b/configs/twins/twins_pcpvt-b_uperhead_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 4, 18, 3], + drop_path_rate=0.3)) + +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/twins/twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..bff7c41946 --- /dev/null +++ b/configs/twins/twins_pcpvt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 8, 27, 3])) diff --git a/configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py b/configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..666ff5b69c --- /dev/null +++ b/configs/twins/twins_pcpvt-l_uperhead_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 8, 27, 3], + drop_path_rate=0.3)) + +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/twins/twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..3b480b9f99 --- /dev/null +++ b/configs/twins/twins_pcpvt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=None) diff --git a/configs/twins/twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py b/configs/twins/twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..387cf60228 --- /dev/null +++ b/configs/twins/twins_pcpvt-s_uperhead_8xb4-160k_ade20k-512x512.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_upernet.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict(custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] diff --git a/configs/twins/twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..5e9fa00f88 --- /dev/null +++ b/configs/twins/twins_svt-b_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,12 @@ +_base_ = ['./twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_base_20220308-1b7eb711.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + depths=[2, 2, 18, 2]), + neck=dict(in_channels=[96, 192, 384, 768]), +) diff --git a/configs/twins/twins_svt-b_uperhead_8xb2-160k_ade20k-512x512.py b/configs/twins/twins_svt-b_uperhead_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..6ce2361f5f --- /dev/null +++ b/configs/twins/twins_svt-b_uperhead_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,12 @@ +_base_ = ['./twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_base_20220308-1b7eb711.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + depths=[2, 2, 18, 2]), + decode_head=dict(in_channels=[96, 192, 384, 768]), + auxiliary_head=dict(in_channels=384)) diff --git a/configs/twins/twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..b7e5f9cdb8 --- /dev/null +++ b/configs/twins/twins_svt-l_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_large_20220308-fb5936f3.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + depths=[2, 2, 18, 2], + drop_path_rate=0.3), + neck=dict(in_channels=[128, 256, 512, 1024]), +) diff --git a/configs/twins/twins_svt-l_uperhead_8xb2-160k_ade20k-512x512.py b/configs/twins/twins_svt-l_uperhead_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..69c69df3b5 --- /dev/null +++ b/configs/twins/twins_svt-l_uperhead_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_large_20220308-fb5936f3.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + depths=[2, 2, 18, 2], + drop_path_rate=0.3), + decode_head=dict(in_channels=[128, 256, 512, 1024]), + auxiliary_head=dict(in_channels=512)) diff --git a/configs/twins/twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py b/configs/twins/twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..c1aad83bc1 --- /dev/null +++ b/configs/twins/twins_svt-s_fpn_fpnhead_8xb4-80k_ade20k-512x512.py @@ -0,0 +1,28 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa + +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='SVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], + windiow_sizes=[7, 7, 7, 7], + norm_after_stage=True), + neck=dict(in_channels=[64, 128, 256, 512], out_channels=256, num_outs=4), + decode_head=dict(num_classes=150), +) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), + clip_grad=None) diff --git a/configs/twins/twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py b/configs/twins/twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..3846795509 --- /dev/null +++ b/configs/twins/twins_svt-s_uperhead_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_upernet.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa + +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + type='SVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], + windiow_sizes=[7, 7, 7, 7], + norm_after_stage=True), + decode_head=dict(in_channels=[64, 128, 256, 512]), + auxiliary_head=dict(in_channels=256)) + +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict(custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +train_dataloader = dict(batch_size=2, num_workers=2) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/unet/README.md b/configs/unet/README.md new file mode 100644 index 0000000000..f3dc261c22 --- /dev/null +++ b/configs/unet/README.md @@ -0,0 +1,92 @@ +# UNet + +[U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at [this http URL](https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/). + + + +
+ +
+ +## Citation + +```bibtex +@inproceedings{ronneberger2015u, + title={U-net: Convolutional networks for biomedical image segmentation}, + author={Ronneberger, Olaf and Fischer, Philipp and Brox, Thomas}, + booktitle={International Conference on Medical image computing and computer-assisted intervention}, + pages={234--241}, + year={2015}, + organization={Springer} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Loss | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | ----------- | ------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 512x1024 | 160000 | 17.91 | 3.05 | 69.10 | 71.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204-6860854e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204.log.json) | + +### DRIVE + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ---------------- | ----------- | -------------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | ----: | ----: | ---------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.680 | - | 88.38 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_64x64_40k_drive/fcn_unet_s5-d16_64x64_40k_drive_20201223_191051-5daf6d3b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_64x64_40k_drive/unet_s5-d16_64x64_40k_drive-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.582 | - | 88.71 | 79.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820-785de5c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.599 | - | 88.35 | 78.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive_20201227_181818-aac73387.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.585 | - | 88.76 | 79.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821-22b3e3ba.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.596 | - | 88.38 | 78.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive_20201226_094047-0671ff20.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.582 | - | 88.84 | 79.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825-6bf0efd7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825.log.json) | + +### STARE + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ---------------- | ----------- | -------------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | ----: | ----: | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.968 | - | 89.78 | 81.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_stare/fcn_unet_s5-d16_128x128_40k_stare_20201223_191051-7d77e78b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_128x128_40k_stare/unet_s5-d16_128x128_40k_stare-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 0.986 | - | 90.65 | 82.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821-f75705a9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.982 | - | 89.89 | 81.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare_20201227_181818-3c2923c4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 1.028 | - | 90.72 | 82.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823-f1063ef7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.999 | - | 89.73 | 80.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare_20201226_094047-93dcb93c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 1.010 | - | 90.65 | 82.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825-21db614c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825.log.json) | + +### CHASE_DB1 + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ---------------- | ----------- | -------------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | ----: | ----: | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.968 | - | 89.46 | 80.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_chase_db1/fcn_unet_s5-d16_128x128_40k_chase_db1_20201223_191051-11543527.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_128x128_40k_chase_db1/unet_s5-d16_128x128_40k_chase_db1-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 0.986 | - | 89.52 | 80.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821-1c4eb7cf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.982 | - | 89.52 | 80.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1_20201227_181818-68d4e609.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 1.028 | - | 89.45 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823-c0802c4d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.999 | - | 89.57 | 80.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1_20201226_094047-4c5aefa3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 1.010 | - | 89.49 | 80.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825-4ef29df5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825.log.json) | + +### HRF + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ---------------- | ----------- | -------------------- | ---------- | --------- | ------: | ------- | -------- | -------------: | ----: | ----: | ---------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.525 | - | 88.92 | 79.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_256x256_40k_hrf/fcn_unet_s5-d16_256x256_40k_hrf_20201223_173724-d89cf1ed.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_256x256_40k_hrf/unet_s5-d16_256x256_40k_hrf-20201223_173724.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.623 | - | 89.64 | 80.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821-c314da8a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.588 | - | 89.24 | 80.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf_20201227_181818-fdb7e29b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.798 | - | 89.69 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823-53d492fa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.604 | - | 89.32 | 80.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf_20201226_094047-3a1fdf85.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.607 | - | 89.56 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032-59daf7a4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032.log.json) | + +Note: + +- In `DRIVE`, `STARE`, `CHASE_DB1`, and `HRF` dataset, `mDice` is mean dice of background and vessel, while `Dice` is dice metric of vessel(foreground) only. diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py new file mode 100644 index 0000000000..e4af542bfa --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (64, 64) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py new file mode 100644 index 0000000000..b45405fe35 --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (256, 256) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py new file mode 100644 index 0000000000..554caca96f --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..4f30bba9a7 --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py new file mode 100644 index 0000000000..823fc6dc51 --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py new file mode 100644 index 0000000000..174eaf8d93 --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py new file mode 100644 index 0000000000..35972bea93 --- /dev/null +++ b/configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py b/configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..c2e995dd21 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=19), + auxiliary_head=dict(num_classes=19), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +train_dataloader = dict(batch_size=4, num_workers=4) +val_dataloader = dict(batch_size=1, num_workers=4) +test_dataloader = val_dataloader diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py b/configs/unet/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..bfc2109e22 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/chase_db1.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-40k_drive-64x64.py b/configs/unet/unet-s5-d16_fcn_4xb4-40k_drive-64x64.py new file mode 100644 index 0000000000..10a45d1f7f --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-40k_drive-64x64.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (64, 64) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py new file mode 100644 index 0000000000..7de57f2c2f --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (256, 256) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-40k_stare-128x128.py b/configs/unet/unet-s5-d16_fcn_4xb4-40k_stare-128x128.py new file mode 100644 index 0000000000..8eeef77628 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-40k_stare-128x128.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..5a26ccbf96 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py new file mode 100644 index 0000000000..c3b1488ad5 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_fcn_4xb4-40k_drive-64x64.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py new file mode 100644 index 0000000000..dd3a6afc02 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py new file mode 100644 index 0000000000..c8fecf34e9 --- /dev/null +++ b/configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_fcn_4xb4-40k_stare-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..ca6e5132fa --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', + '../_base_/datasets/chase_db1.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py new file mode 100644 index 0000000000..503b90136d --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (64, 64) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py new file mode 100644 index 0000000000..245365ca8d --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (256, 256) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py new file mode 100644 index 0000000000..c1eeeb96f8 --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..69a4bbaf82 --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py new file mode 100644 index 0000000000..1abbd53d8c --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py new file mode 100644 index 0000000000..b3256d759b --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py new file mode 100644 index 0000000000..82aa3da616 --- /dev/null +++ b/configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py @@ -0,0 +1,6 @@ +_base_ = './unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/configs/unet/unet.yml b/configs/unet/unet.yml new file mode 100644 index 0000000000..4a01ce33e2 --- /dev/null +++ b/configs/unet/unet.yml @@ -0,0 +1,377 @@ +Collections: +- Name: UNet + Metadata: + Training Data: + - Cityscapes + - DRIVE + - STARE + - CHASE_DB1 + - HRF + Paper: + URL: https://arxiv.org/abs/1505.04597 + Title: 'U-Net: Convolutional Networks for Biomedical Image Segmentation' + README: configs/unet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/unet.py#L225 + Version: v0.17.0 + Converted From: + Code: http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net +Models: +- Name: unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 327.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 17.91 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.1 + mIoU(ms+flip): 71.05 + Config: configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204-6860854e.pth +- Name: unet-s5-d16_fcn_4xb4-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.68 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.67 + Config: configs/unet/unet-s5-d16_fcn_4xb4-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_64x64_40k_drive/fcn_unet_s5-d16_64x64_40k_drive_20201223_191051-5daf6d3b.pth +- Name: unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.582 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.32 + Config: configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820-785de5c2.pth +- Name: unet-s5-d16_pspnet_4xb4-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.599 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.62 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive_20201227_181818-aac73387.pth +- Name: unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.585 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.42 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821-22b3e3ba.pth +- Name: unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.596 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.69 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive_20201226_094047-0671ff20.pth +- Name: unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.582 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.56 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_drive-64x64.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825-6bf0efd7.pth +- Name: unet-s5-d16_fcn_4xb4-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.968 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 81.02 + Config: configs/unet/unet-s5-d16_fcn_4xb4-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_stare/fcn_unet_s5-d16_128x128_40k_stare_20201223_191051-7d77e78b.pth +- Name: unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.986 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.7 + Config: configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821-f75705a9.pth +- Name: unet-s5-d16_pspnet_4xb4-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.982 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 81.22 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare_20201227_181818-3c2923c4.pth +- Name: unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.028 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.84 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823-f1063ef7.pth +- Name: unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.999 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 80.93 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare_20201226_094047-93dcb93c.pth +- Name: unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.01 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.71 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_stare-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825-21db614c.pth +- Name: unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.968 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.24 + Config: configs/unet/unet-s5-d16_fcn_4xb4-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_chase_db1/fcn_unet_s5-d16_128x128_40k_chase_db1_20201223_191051-11543527.pth +- Name: unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.986 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.4 + Config: configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821-1c4eb7cf.pth +- Name: unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.982 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.36 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1_20201227_181818-68d4e609.pth +- Name: unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.028 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.28 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823-c0802c4d.pth +- Name: unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.999 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.47 + Config: configs/unet/unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1_20201226_094047-4c5aefa3.pth +- Name: unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.01 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.37 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_chase-db1-128x128.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825-4ef29df5.pth +- Name: unet-s5-d16_fcn_4xb4-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.525 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 79.45 + Config: configs/unet/unet-s5-d16_fcn_4xb4-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_256x256_40k_hrf/fcn_unet_s5-d16_256x256_40k_hrf_20201223_173724-d89cf1ed.pth +- Name: unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.623 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.87 + Config: configs/unet/unet-s5-d16_fcn_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821-c314da8a.pth +- Name: unet-s5-d16_pspnet_4xb4-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.588 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.07 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf_20201227_181818-fdb7e29b.pth +- Name: unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.798 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.96 + Config: configs/unet/unet-s5-d16_pspnet_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823-53d492fa.pth +- Name: unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.604 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.21 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf_20201226_094047-3a1fdf85.pth +- Name: unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.607 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.71 + Config: configs/unet/unet-s5-d16_deeplabv3_4xb4-ce-1.0-dice-3.0-40k_hrf-256x256.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032-59daf7a4.pth diff --git a/configs/unet/unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py b/configs/unet/unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py new file mode 100644 index 0000000000..82494f3092 --- /dev/null +++ b/configs/unet/unet_s5-d16_deeplabv3_4xb4-40k_chase-db1-128x128.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', + '../_base_/datasets/chase_db1.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (128, 128) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) diff --git a/configs/upernet/README.md b/configs/upernet/README.md index 88a64d848d..e4a5ee4381 100644 --- a/configs/upernet/README.md +++ b/configs/upernet/README.md @@ -1,7 +1,30 @@ -# Unified Perceptual Parsing for Scene Understanding +# UPerNet + +[Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/pdf/1807.10221.pdf) ## Introduction -``` + + + +Official Repo + +Code Snippet + +## Abstract + + + +Humans recognize the visual world at multiple levels: we effortlessly categorize scenes and detect objects inside, while also identifying the textures and surfaces of the objects along with their different compositional parts. In this paper, we study a new task called Unified Perceptual Parsing, which requires the machine vision systems to recognize as many visual concepts as possible from a given image. A multi-task framework called UPerNet and a training strategy are developed to learn from heterogeneous image annotations. We benchmark our framework on Unified Perceptual Parsing and show that it is able to effectively segment a wide range of concepts from images. The trained networks are further applied to discover visual knowledge in natural scenes. Models are available at [this https URL](https://github.com/CSAILVision/unifiedparsing). + + + +
+ +
+ +## Citation + +```bibtex @inproceedings{xiao2018unified, title={Unified perceptual parsing for scene understanding}, author={Xiao, Tete and Liu, Yingcheng and Zhou, Bolei and Jiang, Yuning and Sun, Jian}, @@ -14,29 +37,32 @@ ## Results and models ### Cityscapes -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|---------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| UPerNet | R-50 | 512x1024 | 40000 | 6.4 | 4.25 | 77.10 | 78.37 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827.log.json) | -| UPerNet | R-101 | 512x1024 | 40000 | 7.4 | 3.79 | 78.69 | 80.11 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933.log.json) | -| UPerNet | R-50 | 769x769 | 40000 | 7.2 | 1.76 | 77.98 | 79.70 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048.log.json) | -| UPerNet | R-101 | 769x769 | 40000 | 8.4 | 1.56 | 79.03 | 80.77 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819.log.json) | -| UPerNet | R-50 | 512x1024 | 80000 | - | - | 78.19 | 79.19 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207.log.json) | -| UPerNet | R-101 | 512x1024 | 80000 | - | - | 79.40 | 80.46 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403.log.json) | -| UPerNet | R-50 | 769x769 | 80000 | - | - | 79.39 | 80.92 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107.log.json) | -| UPerNet | R-101 | 769x769 | 80000 | - | - | 80.10 | 81.49 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | R-50 | 512x1024 | 40000 | 6.4 | 4.25 | 77.10 | 78.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827.log.json) | +| UPerNet | R-101 | 512x1024 | 40000 | 7.4 | 3.79 | 78.69 | 80.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933.log.json) | +| UPerNet | R-50 | 769x769 | 40000 | 7.2 | 1.76 | 77.98 | 79.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048.log.json) | +| UPerNet | R-101 | 769x769 | 40000 | 8.4 | 1.56 | 79.03 | 80.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb2-40k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819.log.json) | +| UPerNet | R-50 | 512x1024 | 80000 | - | - | 78.19 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207.log.json) | +| UPerNet | R-101 | 512x1024 | 80000 | - | - | 79.40 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb2-80k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403.log.json) | +| UPerNet | R-50 | 769x769 | 80000 | - | - | 79.39 | 80.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107.log.json) | +| UPerNet | R-101 | 769x769 | 80000 | - | - | 80.10 | 81.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb2-80k_cityscapes-769x769.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014.log.json) | ### ADE20K -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|---------|----------|-----------|--------:|----------|----------------|------:|--------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| UPerNet | R-50 | 512x512 | 80000 | 8.1 | 23.40 | 40.70 | 41.81 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127.log.json) | -| UPerNet | R-101 | 512x512 | 80000 | 9.1 | 20.34 | 42.91 | 43.96 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117.log.json) | -| UPerNet | R-50 | 512x512 | 160000 | - | - | 42.05 | 42.78 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328.log.json) | -| UPerNet | R-101 | 512x512 | 160000 | - | - | 43.82 | 44.85 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | R-50 | 512x512 | 80000 | 8.1 | 23.40 | 40.70 | 41.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127.log.json) | +| UPerNet | R-101 | 512x512 | 80000 | 9.1 | 20.34 | 42.91 | 43.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb4-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117.log.json) | +| UPerNet | R-50 | 512x512 | 160000 | - | - | 42.05 | 42.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328.log.json) | +| UPerNet | R-101 | 512x512 | 160000 | - | - | 43.82 | 44.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951.log.json) | ### Pascal VOC 2012 + Aug -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download | -|---------|----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| UPerNet | R-50 | 512x512 | 20000 | 6.4 | 23.17 | 74.82 | 76.35 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330.log.json) | -| UPerNet | R-101 | 512x512 | 20000 | 7.5 | 19.98 | 77.10 | 78.29 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629.log.json) | -| UPerNet | R-50 | 512x512 | 40000 | - | - | 75.92 | 77.44 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257.log.json) | -| UPerNet | R-101 | 512x512 | 40000 | - | - | 77.43 | 78.56 | [model](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth) | [log](https://openmmlab.oss-accelerate.aliyuncs.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549.log.json) | + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | R-50 | 512x512 | 20000 | 6.4 | 23.17 | 74.82 | 76.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330.log.json) | +| UPerNet | R-101 | 512x512 | 20000 | 7.5 | 19.98 | 77.10 | 78.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb4-20k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629.log.json) | +| UPerNet | R-50 | 512x512 | 40000 | - | - | 75.92 | 77.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r50_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257.log.json) | +| UPerNet | R-101 | 512x512 | 40000 | - | - | 77.43 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/upernet/upernet_r101_4xb4-40k_voc12aug-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549.log.json) | diff --git a/configs/upernet/upernet.yml b/configs/upernet/upernet.yml new file mode 100644 index 0000000000..6892fcf06a --- /dev/null +++ b/configs/upernet/upernet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: UPerNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/pdf/1807.10221.pdf + Title: Unified Perceptual Parsing for Scene Understanding + README: configs/upernet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/uper_head.py#L13 + Version: v0.17.0 + Converted From: + Code: https://github.com/CSAILVision/unifiedparsing +Models: +- Name: upernet_r50_4xb2-40k_cityscapes-512x1024 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 235.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.1 + mIoU(ms+flip): 78.37 + Config: configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth +- Name: upernet_r101_4xb2-40k_cityscapes-512x1024 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 263.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.69 + mIoU(ms+flip): 80.11 + Config: configs/upernet/upernet_r101_4xb2-40k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth +- Name: upernet_r50_4xb2-40k_cityscapes-769x769 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 568.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.98 + mIoU(ms+flip): 79.7 + Config: configs/upernet/upernet_r50_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth +- Name: upernet_r101_4xb2-40k_cityscapes-769x769 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 641.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 80.77 + Config: configs/upernet/upernet_r101_4xb2-40k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth +- Name: upernet_r50_4xb2-80k_cityscapes-512x1024 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.19 + mIoU(ms+flip): 79.19 + Config: configs/upernet/upernet_r50_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth +- Name: upernet_r101_4xb2-80k_cityscapes-512x1024 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.4 + mIoU(ms+flip): 80.46 + Config: configs/upernet/upernet_r101_4xb2-80k_cityscapes-512x1024.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth +- Name: upernet_r50_4xb2-80k_cityscapes-769x769 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.39 + mIoU(ms+flip): 80.92 + Config: configs/upernet/upernet_r50_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth +- Name: upernet_r101_4xb2-80k_cityscapes-769x769 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.1 + mIoU(ms+flip): 81.49 + Config: configs/upernet/upernet_r101_4xb2-80k_cityscapes-769x769.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth +- Name: upernet_r50_4xb4-80k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.7 + mIoU(ms+flip): 41.81 + Config: configs/upernet/upernet_r50_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth +- Name: upernet_r101_4xb4-80k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 49.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.91 + mIoU(ms+flip): 43.96 + Config: configs/upernet/upernet_r101_4xb4-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth +- Name: upernet_r50_4xb4-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.05 + mIoU(ms+flip): 42.78 + Config: configs/upernet/upernet_r50_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth +- Name: upernet_r101_4xb4-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.82 + mIoU(ms+flip): 44.85 + Config: configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth +- Name: upernet_r50_4xb4-20k_voc12aug-512x512 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 43.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.82 + mIoU(ms+flip): 76.35 + Config: configs/upernet/upernet_r50_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth +- Name: upernet_r101_4xb4-20k_voc12aug-512x512 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 50.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.1 + mIoU(ms+flip): 78.29 + Config: configs/upernet/upernet_r101_4xb4-20k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth +- Name: upernet_r50_4xb4-40k_voc12aug-512x512 + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.92 + mIoU(ms+flip): 77.44 + Config: configs/upernet/upernet_r50_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth +- Name: upernet_r101_4xb4-40k_voc12aug-512x512 + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.43 + mIoU(ms+flip): 78.56 + Config: configs/upernet/upernet_r101_4xb4-40k_voc12aug-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth diff --git a/configs/upernet/upernet_r101_4xb2-40k_cityscapes-512x1024.py b/configs/upernet/upernet_r101_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..8f5f6aecfe --- /dev/null +++ b/configs/upernet/upernet_r101_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb2-40k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb2-40k_cityscapes-769x769.py b/configs/upernet/upernet_r101_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..28b5d3e968 --- /dev/null +++ b/configs/upernet/upernet_r101_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb2-40k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb2-80k_cityscapes-512x1024.py b/configs/upernet/upernet_r101_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..cafd8a2091 --- /dev/null +++ b/configs/upernet/upernet_r101_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb2-80k_cityscapes-512x1024.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb2-80k_cityscapes-769x769.py b/configs/upernet/upernet_r101_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..e17572054f --- /dev/null +++ b/configs/upernet/upernet_r101_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb2-80k_cityscapes-769x769.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py b/configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..7a6152774c --- /dev/null +++ b/configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb4-160k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb4-20k_voc12aug-512x512.py b/configs/upernet/upernet_r101_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..be8f0848df --- /dev/null +++ b/configs/upernet/upernet_r101_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb4-20k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb4-40k_voc12aug-512x512.py b/configs/upernet/upernet_r101_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..db1d976498 --- /dev/null +++ b/configs/upernet/upernet_r101_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb4-40k_voc12aug-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_4xb4-80k_ade20k-512x512.py b/configs/upernet/upernet_r101_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..84549a421d --- /dev/null +++ b/configs/upernet/upernet_r101_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_4xb4-80k_ade20k-512x512.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py b/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py deleted file mode 100644 index b90b597d83..0000000000 --- a/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py b/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py deleted file mode 100644 index 420ca2e428..0000000000 --- a/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x512_160k_ade20k.py b/configs/upernet/upernet_r101_512x512_160k_ade20k.py deleted file mode 100644 index 146f13eb79..0000000000 --- a/configs/upernet/upernet_r101_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x512_20k_voc12aug.py b/configs/upernet/upernet_r101_512x512_20k_voc12aug.py deleted file mode 100644 index 56345d1806..0000000000 --- a/configs/upernet/upernet_r101_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x512_40k_voc12aug.py b/configs/upernet/upernet_r101_512x512_40k_voc12aug.py deleted file mode 100644 index 0669b741b9..0000000000 --- a/configs/upernet/upernet_r101_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_512x512_80k_ade20k.py b/configs/upernet/upernet_r101_512x512_80k_ade20k.py deleted file mode 100644 index abfb9c5d9f..0000000000 --- a/configs/upernet/upernet_r101_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_769x769_40k_cityscapes.py b/configs/upernet/upernet_r101_769x769_40k_cityscapes.py deleted file mode 100644 index e5f3a3fae1..0000000000 --- a/configs/upernet/upernet_r101_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r101_769x769_80k_cityscapes.py b/configs/upernet/upernet_r101_769x769_80k_cityscapes.py deleted file mode 100644 index a709165657..0000000000 --- a/configs/upernet/upernet_r101_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './upernet_r50_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/configs/upernet/upernet_r18_4xb2-40k_cityscapes-512x1024.py b/configs/upernet/upernet_r18_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..dbff0e75a1 --- /dev/null +++ b/configs/upernet/upernet_r18_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +_base_ = './upernet_r50_4xb2-40k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512]), + auxiliary_head=dict(in_channels=256)) diff --git a/configs/upernet/upernet_r18_4xb2-80k_cityscapes-512x1024.py b/configs/upernet/upernet_r18_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..dee6349f64 --- /dev/null +++ b/configs/upernet/upernet_r18_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,6 @@ +_base_ = './upernet_r50_4xb2-80k_cityscapes-512x1024.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512]), + auxiliary_head=dict(in_channels=256)) diff --git a/configs/upernet/upernet_r18_4xb4-160k_ade20k-512x512.py b/configs/upernet/upernet_r18_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9ac6c35527 --- /dev/null +++ b/configs/upernet/upernet_r18_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=150), + auxiliary_head=dict(in_channels=256, num_classes=150)) diff --git a/configs/upernet/upernet_r18_4xb4-20k_voc12aug-512x512.py b/configs/upernet/upernet_r18_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..5cae4f5435 --- /dev/null +++ b/configs/upernet/upernet_r18_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=21), + auxiliary_head=dict(in_channels=256, num_classes=21)) diff --git a/configs/upernet/upernet_r18_4xb4-40k_voc12aug-512x512.py b/configs/upernet/upernet_r18_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..652ded7516 --- /dev/null +++ b/configs/upernet/upernet_r18_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=21), + auxiliary_head=dict(in_channels=256, num_classes=21)) diff --git a/configs/upernet/upernet_r18_4xb4-80k_ade20k-512x512.py b/configs/upernet/upernet_r18_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..1a7956d71f --- /dev/null +++ b/configs/upernet/upernet_r18_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict(in_channels=[64, 128, 256, 512], num_classes=150), + auxiliary_head=dict(in_channels=256, num_classes=150)) diff --git a/configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py b/configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..4751fc1102 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/upernet/upernet_r50_4xb2-40k_cityscapes-769x769.py b/configs/upernet/upernet_r50_4xb2-40k_cityscapes-769x769.py new file mode 100644 index 0000000000..6f05b6c364 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb2-40k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/upernet/upernet_r50_4xb2-80k_cityscapes-512x1024.py b/configs/upernet/upernet_r50_4xb2-80k_cityscapes-512x1024.py new file mode 100644 index 0000000000..f3488c6108 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb2-80k_cityscapes-512x1024.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) diff --git a/configs/upernet/upernet_r50_4xb2-80k_cityscapes-769x769.py b/configs/upernet/upernet_r50_4xb2-80k_cityscapes-769x769.py new file mode 100644 index 0000000000..6a8f48ec51 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb2-80k_cityscapes-769x769.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (769, 769) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/configs/upernet/upernet_r50_4xb4-160k_ade20k-512x512.py b/configs/upernet/upernet_r50_4xb4-160k_ade20k-512x512.py new file mode 100644 index 0000000000..5d15b2abd9 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb4-160k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/upernet/upernet_r50_4xb4-20k_voc12aug-512x512.py b/configs/upernet/upernet_r50_4xb4-20k_voc12aug-512x512.py new file mode 100644 index 0000000000..9e96b4eac0 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb4-20k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/upernet/upernet_r50_4xb4-40k_voc12aug-512x512.py b/configs/upernet/upernet_r50_4xb4-40k_voc12aug-512x512.py new file mode 100644 index 0000000000..cada949620 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb4-40k_voc12aug-512x512.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=21), + auxiliary_head=dict(num_classes=21)) diff --git a/configs/upernet/upernet_r50_4xb4-80k_ade20k-512x512.py b/configs/upernet/upernet_r50_4xb4-80k_ade20k-512x512.py new file mode 100644 index 0000000000..322d5d8c84 --- /dev/null +++ b/configs/upernet/upernet_r50_4xb4-80k_ade20k-512x512.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py b/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py deleted file mode 100644 index d621e89ce6..0000000000 --- a/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py b/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py deleted file mode 100644 index 95fffcc76c..0000000000 --- a/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/configs/upernet/upernet_r50_512x512_160k_ade20k.py b/configs/upernet/upernet_r50_512x512_160k_ade20k.py deleted file mode 100644 index f259165fca..0000000000 --- a/configs/upernet/upernet_r50_512x512_160k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/upernet/upernet_r50_512x512_20k_voc12aug.py b/configs/upernet/upernet_r50_512x512_20k_voc12aug.py deleted file mode 100644 index 95f5c09567..0000000000 --- a/configs/upernet/upernet_r50_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/upernet/upernet_r50_512x512_40k_voc12aug.py b/configs/upernet/upernet_r50_512x512_40k_voc12aug.py deleted file mode 100644 index 9621fd1f5c..0000000000 --- a/configs/upernet/upernet_r50_512x512_40k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/configs/upernet/upernet_r50_512x512_80k_ade20k.py b/configs/upernet/upernet_r50_512x512_80k_ade20k.py deleted file mode 100644 index ce5d71f56d..0000000000 --- a/configs/upernet/upernet_r50_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) -test_cfg = dict(mode='whole') diff --git a/configs/upernet/upernet_r50_769x769_40k_cityscapes.py b/configs/upernet/upernet_r50_769x769_40k_cityscapes.py deleted file mode 100644 index 590ab61b76..0000000000 --- a/configs/upernet/upernet_r50_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/upernet/upernet_r50_769x769_80k_cityscapes.py b/configs/upernet/upernet_r50_769x769_80k_cityscapes.py deleted file mode 100644 index b3a6107581..0000000000 --- a/configs/upernet/upernet_r50_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/upernet_r50.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True)) -test_cfg = dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) diff --git a/configs/vit/README.md b/configs/vit/README.md new file mode 100644 index 0000000000..b7f242549d --- /dev/null +++ b/configs/vit/README.md @@ -0,0 +1,70 @@ +# Vision Transformer + +[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/pdf/2010.11929.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train. + + + +
+ +
+ +## Citation + +```bibtex +@article{dosoViTskiy2020, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={DosoViTskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, + journal={arXiv preprint arXiv:2010.11929}, + year={2020} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`vit2mmseg.py`](../../tools/model_converters/vit2mmseg.py) in the tools directory to convert the key of models from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) to MMSegmentation style. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth pretrain/jx_vit_base_p16_224-80ecf9dd.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | ----------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | ViT-B + MLN | 512x512 | 80000 | 9.20 | 6.94 | 47.71 | 49.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/upernet_vit-b16_mln_512x512_80k_ade20k_20210624_130547-0403cee1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/20210624_130547.log.json) | +| UPerNet | ViT-B + MLN | 512x512 | 160000 | 9.20 | 7.58 | 46.75 | 48.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/upernet_vit-b16_mln_512x512_160k_ade20k_20210624_130547-852fa768.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/20210623_192432.log.json) | +| UPerNet | ViT-B + LN + MLN | 512x512 | 160000 | 9.21 | 6.82 | 47.73 | 49.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/upernet_vit-b16_ln_mln_512x512_160k_ade20k_20210621_172828-f444c077.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/20210621_172828.log.json) | +| UPerNet | DeiT-S | 512x512 | 80000 | 4.68 | 29.85 | 42.96 | 43.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-s16_upernet_8xb2-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/upernet_deit-s16_512x512_80k_ade20k_20210624_095228-afc93ec2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/20210624_095228.log.json) | +| UPerNet | DeiT-S | 512x512 | 160000 | 4.68 | 29.19 | 42.87 | 43.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-s16_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/upernet_deit-s16_512x512_160k_ade20k_20210621_160903-5110d916.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/20210621_160903.log.json) | +| UPerNet | DeiT-S + MLN | 512x512 | 160000 | 5.69 | 11.18 | 43.82 | 45.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/upernet_deit-s16_mln_512x512_160k_ade20k_20210621_161021-fb9a5dfb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/20210621_161021.log.json) | +| UPerNet | DeiT-S + LN + MLN | 512x512 | 160000 | 5.69 | 12.39 | 43.52 | 45.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/upernet_deit-s16_ln_mln_512x512_160k_ade20k_20210621_161021-c0cd652f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/20210621_161021.log.json) | +| UPerNet | DeiT-B | 512x512 | 80000 | 7.75 | 9.69 | 45.24 | 46.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-b16_upernet_8xb2-80k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/upernet_deit-b16_512x512_80k_ade20k_20210624_130529-1e090789.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/20210624_130529.log.json) | +| UPerNet | DeiT-B | 512x512 | 160000 | 7.75 | 10.39 | 45.36 | 47.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-b16_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/upernet_deit-b16_512x512_160k_ade20k_20210621_180100-828705d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/20210621_180100.log.json) | +| UPerNet | DeiT-B + MLN | 512x512 | 160000 | 9.21 | 7.78 | 45.46 | 47.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/upernet_deit-b16_mln_512x512_160k_ade20k_20210621_191949-4e1450f3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/20210621_191949.log.json) | +| UPerNet | DeiT-B + LN + MLN | 512x512 | 160000 | 9.21 | 7.75 | 45.37 | 47.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/upernet_deit-b16_ln_mln_512x512_160k_ade20k_20210623_153535-8a959c14.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/20210623_153535.log.json) | diff --git a/configs/vit/vit.yml b/configs/vit/vit.yml new file mode 100644 index 0000000000..613d866ac4 --- /dev/null +++ b/configs/vit/vit.yml @@ -0,0 +1,243 @@ +Models: +- Name: vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ViT-B + MLN + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 144.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.71 + mIoU(ms+flip): 49.51 + Config: configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/upernet_vit-b16_mln_512x512_80k_ade20k_20210624_130547-0403cee1.pth +- Name: vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ViT-B + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 131.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.75 + mIoU(ms+flip): 48.46 + Config: configs/vit/vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/upernet_vit-b16_mln_512x512_160k_ade20k_20210624_130547-852fa768.pth +- Name: vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: ViT-B + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 146.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.73 + mIoU(ms+flip): 49.95 + Config: configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/upernet_vit-b16_ln_mln_512x512_160k_ade20k_20210621_172828-f444c077.pth +- Name: vit_deit-s16_upernet_8xb2-80k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.68 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.96 + mIoU(ms+flip): 43.79 + Config: configs/vit/vit_deit-s16_upernet_8xb2-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/upernet_deit-s16_512x512_80k_ade20k_20210624_095228-afc93ec2.pth +- Name: vit_deit-s16_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 34.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.68 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.87 + mIoU(ms+flip): 43.79 + Config: configs/vit/vit_deit-s16_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/upernet_deit-s16_512x512_160k_ade20k_20210621_160903-5110d916.pth +- Name: vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-S + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 89.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.82 + mIoU(ms+flip): 45.07 + Config: configs/vit/vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/upernet_deit-s16_mln_512x512_160k_ade20k_20210621_161021-fb9a5dfb.pth +- Name: vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-S + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 80.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.52 + mIoU(ms+flip): 45.01 + Config: configs/vit/vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/upernet_deit-s16_ln_mln_512x512_160k_ade20k_20210621_161021-c0cd652f.pth +- Name: vit_deit-b16_upernet_8xb2-80k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 103.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.24 + mIoU(ms+flip): 46.73 + Config: configs/vit/vit_deit-b16_upernet_8xb2-80k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/upernet_deit-b16_512x512_80k_ade20k_20210624_130529-1e090789.pth +- Name: vit_deit-b16_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 96.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.36 + mIoU(ms+flip): 47.16 + Config: configs/vit/vit_deit-b16_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/upernet_deit-b16_512x512_160k_ade20k_20210621_180100-828705d7.pth +- Name: vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-B + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 128.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.46 + mIoU(ms+flip): 47.16 + Config: configs/vit/vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/upernet_deit-b16_mln_512x512_160k_ade20k_20210621_191949-4e1450f3.pth +- Name: vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512 + In Collection: UPerNet + Metadata: + backbone: DeiT-B + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 129.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.37 + mIoU(ms+flip): 47.23 + Config: configs/vit/vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/upernet_deit-b16_ln_mln_512x512_160k_ade20k_20210623_153535-8a959c14.pth diff --git a/configs/vit/vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..39d1c54faf --- /dev/null +++ b/configs/vit/vit_deit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,5 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1, final_norm=True)) diff --git a/configs/vit/vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..706673f6b1 --- /dev/null +++ b/configs/vit/vit_deit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,6 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), +) diff --git a/configs/vit/vit_deit-b16_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_deit-b16_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..23a23582d7 --- /dev/null +++ b/configs/vit/vit_deit-b16_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,6 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), + neck=None) diff --git a/configs/vit/vit_deit-b16_upernet_8xb2-80k_ade20k-512x512.py b/configs/vit/vit_deit-b16_upernet_8xb2-80k_ade20k-512x512.py new file mode 100644 index 0000000000..4c8bc939ee --- /dev/null +++ b/configs/vit/vit_deit-b16_upernet_8xb2-80k_ade20k-512x512.py @@ -0,0 +1,6 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), + neck=None) diff --git a/configs/vit/vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512.py b/configs/vit/vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512.py new file mode 100644 index 0000000000..8e626fe0de --- /dev/null +++ b/configs/vit/vit_deit-s16-ln_mln_upernet_512x512_160k_ade20k-512x512.py @@ -0,0 +1,9 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict( + num_heads=6, embed_dims=384, drop_path_rate=0.1, final_norm=True), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=dict(in_channels=[384, 384, 384, 384], out_channels=384), + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/configs/vit/vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9a69a892b3 --- /dev/null +++ b/configs/vit/vit_deit-s16_mln_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=dict(in_channels=[384, 384, 384, 384], out_channels=384), + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/configs/vit/vit_deit-s16_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_deit-s16_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..9ef699d5d5 --- /dev/null +++ b/configs/vit/vit_deit-s16_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=None, + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/configs/vit/vit_deit-s16_upernet_8xb2-80k_ade20k-512x512.py b/configs/vit/vit_deit-s16_upernet_8xb2-80k_ade20k-512x512.py new file mode 100644 index 0000000000..9ef699d5d5 --- /dev/null +++ b/configs/vit/vit_deit-s16_upernet_8xb2-80k_ade20k-512x512.py @@ -0,0 +1,8 @@ +_base_ = './vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=None, + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..2dd81b48bb --- /dev/null +++ b/configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/vit_base_patch16_224.pth', + backbone=dict(drop_path_rate=0.1, final_norm=True), + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/vit/vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py b/configs/vit/vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py new file mode 100644 index 0000000000..1a7ec16c92 --- /dev/null +++ b/configs/vit/vit_vit-b16_mln_upernet_8xb2-160k_ade20k-512x512.py @@ -0,0 +1,44 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/vit_base_patch16_224.pth', + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=160000, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py b/configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py new file mode 100644 index 0000000000..ef7345057c --- /dev/null +++ b/configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py @@ -0,0 +1,44 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +crop_size = (512, 512) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, + pretrained='pretrain/vit_base_patch16_224.pth', + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + eta_min=0.0, + power=1.0, + begin=1500, + end=80000, + by_epoch=False, + ) +] + +# By default, models are trained on 8 GPUs with 2 images per GPU +train_dataloader = dict(batch_size=2) +val_dataloader = dict(batch_size=1) +test_dataloader = val_dataloader diff --git a/demo/MMSegmentation_Tutorial.ipynb b/demo/MMSegmentation_Tutorial.ipynb new file mode 100644 index 0000000000..89d6e52613 --- /dev/null +++ b/demo/MMSegmentation_Tutorial.ipynb @@ -0,0 +1,559 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FVmnaxFJvsb8" + }, + "source": [ + "# MMSegmentation Tutorial\n", + "Welcome to MMSegmentation! \n", + "\n", + "In this tutorial, we demo\n", + "* How to do inference with MMSeg trained weight\n", + "* How to train on your own dataset and visualize the results. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QS8YHrEhbpas" + }, + "source": [ + "## Install MMSegmentation\n", + "This step may take several minutes. \n", + "\n", + "We use PyTorch 1.12 and CUDA 11.3 for this tutorial. You may install other versions by change the version number in pip install command. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "UWyLrLYaNEaL", + "outputId": "32a47fe3-f10d-47a1-f6b9-b7c235abdab1" + }, + "outputs": [], + "source": [ + "# Check nvcc version\n", + "!nvcc -V\n", + "# Check GCC version\n", + "!gcc --version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Ki3WUBjKbutg", + "outputId": "14bd14b0-4d8c-4fa9-e3f9-da35c0efc0d5" + }, + "outputs": [], + "source": [ + "# Install PyTorch\n", + "!conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch\n", + "# Install mim\n", + "!pip install -U openmim\n", + "# Install mmengine\n", + "!mim install mmengine\n", + "# Install MMCV\n", + "!mim install 'mmcv >= 2.0.0rc1'\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nR-hHRvbNJJZ", + "outputId": "10c3b131-d4db-458c-fc10-b94b1c6ed546" + }, + "outputs": [], + "source": [ + "!rm -rf mmsegmentation\n", + "!git clone -b dev-1.x https://github.com/open-mmlab/mmsegmentation.git \n", + "%cd mmsegmentation\n", + "!pip install -e ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "mAE_h7XhPT7d", + "outputId": "83bf0f8e-fc69-40b1-f9fe-0025724a217c" + }, + "outputs": [], + "source": [ + "# Check Pytorch installation\n", + "import torch, torchvision\n", + "print(torch.__version__, torch.cuda.is_available())\n", + "\n", + "# Check MMSegmentation installation\n", + "import mmseg\n", + "print(mmseg.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ta51clKX4cwM" + }, + "source": [ + "## Finetune a semantic segmentation model on a new dataset\n", + "\n", + "To finetune on a customized dataset, the following steps are necessary. \n", + "1. Add a new dataset class. \n", + "2. Create a config file accordingly. \n", + "3. Perform training and evaluation. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AcZg6x_K5Zs3" + }, + "source": [ + "### Add a new dataset\n", + "\n", + "Datasets in MMSegmentation require image and semantic segmentation maps to be placed in folders with the same prefix. To support a new dataset, we may need to modify the original file structure. \n", + "\n", + "In this tutorial, we give an example of converting the dataset. You may refer to [docs](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/tutorials/customize_datasets.md#customize-datasets-by-reorganizing-data) for details about dataset reorganization. \n", + "\n", + "We use [Stanford Background Dataset](http://dags.stanford.edu/projects/scenedataset.html) as an example. The dataset contains 715 images chosen from existing public datasets [LabelMe](http://labelme.csail.mit.edu), [MSRC](http://research.microsoft.com/en-us/projects/objectclassrecognition), [PASCAL VOC](http://pascallin.ecs.soton.ac.uk/challenges/VOC) and [Geometric Context](http://www.cs.illinois.edu/homes/dhoiem/). Images from these datasets are mainly outdoor scenes, each containing approximately 320-by-240 pixels. \n", + "In this tutorial, we use the region annotations as labels. There are 8 classes in total, i.e. sky, tree, road, grass, water, building, mountain, and foreground object. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "TFIt7MHq5Wls", + "outputId": "74a126e4-c8a4-4d2f-a910-b58b71843a23" + }, + "outputs": [], + "source": [ + "# download and unzip\n", + "!wget http://dags.stanford.edu/data/iccv09Data.tar.gz -O stanford_background.tar.gz\n", + "!tar xf stanford_background.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 377 + }, + "id": "78LIci7F9WWI", + "outputId": "c432ddac-5a50-47b1-daac-5a26b07afea2" + }, + "outputs": [], + "source": [ + "# Let's take a look at the dataset\n", + "import mmcv\n", + "import mmengine\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "img = mmcv.imread('iccv09Data/images/6000124.jpg')\n", + "plt.figure(figsize=(8, 6))\n", + "plt.imshow(mmcv.bgr2rgb(img))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L5mNQuc2GsVE" + }, + "source": [ + "We need to convert the annotation into semantic map format as an image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WnGZfribFHCx" + }, + "outputs": [], + "source": [ + "# define dataset root and directory for images and annotations\n", + "data_root = 'iccv09Data'\n", + "img_dir = 'images'\n", + "ann_dir = 'labels'\n", + "# define class and palette for better visualization\n", + "classes = ('sky', 'tree', 'road', 'grass', 'water', 'bldg', 'mntn', 'fg obj')\n", + "palette = [[128, 128, 128], [129, 127, 38], [120, 69, 125], [53, 125, 34], \n", + " [0, 11, 123], [118, 20, 12], [122, 81, 25], [241, 134, 51]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WnGZfribFHCx" + }, + "outputs": [], + "source": [ + "import os.path as osp\n", + "import numpy as np\n", + "from PIL import Image\n", + "\n", + "# convert dataset annotation to semantic segmentation map\n", + "for file in mmengine.scandir(osp.join(data_root, ann_dir), suffix='.regions.txt'):\n", + " seg_map = np.loadtxt(osp.join(data_root, ann_dir, file)).astype(np.uint8)\n", + " seg_img = Image.fromarray(seg_map).convert('P')\n", + " seg_img.putpalette(np.array(palette, dtype=np.uint8))\n", + " seg_img.save(osp.join(data_root, ann_dir, file.replace('.regions.txt', \n", + " '.png')))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 377 + }, + "id": "5MCSS9ABfSks", + "outputId": "92b9bafc-589e-48fc-c9e9-476f125d6522" + }, + "outputs": [], + "source": [ + "# Let's take a look at the segmentation map we got\n", + "import matplotlib.patches as mpatches\n", + "img = Image.open('iccv09Data/labels/6000124.png')\n", + "plt.figure(figsize=(8, 6))\n", + "im = plt.imshow(np.array(img.convert('RGB')))\n", + "\n", + "# create a patch (proxy artist) for every color \n", + "patches = [mpatches.Patch(color=np.array(palette[i])/255., \n", + " label=classes[i]) for i in range(8)]\n", + "# put those patched as legend-handles into the legend\n", + "plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., \n", + " fontsize='large')\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WbeLYCp2k5hl" + }, + "outputs": [], + "source": [ + "# split train/val set randomly\n", + "split_dir = 'splits'\n", + "mmengine.mkdir_or_exist(osp.join(data_root, split_dir))\n", + "filename_list = [osp.splitext(filename)[0] for filename in mmengine.scandir(\n", + " osp.join(data_root, ann_dir), suffix='.png')]\n", + "with open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f:\n", + " # select first 4/5 as train set\n", + " train_length = int(len(filename_list)*4/5)\n", + " f.writelines(line + '\\n' for line in filename_list[:train_length])\n", + "with open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f:\n", + " # select last 1/5 as train set\n", + " f.writelines(line + '\\n' for line in filename_list[train_length:])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HchvmGYB_rrO" + }, + "source": [ + "After downloading the data, we need to implement `load_annotations` function in the new dataset class `StanfordBackgroundDataset`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LbsWOw62_o-X" + }, + "outputs": [], + "source": [ + "from mmseg.registry import DATASETS\n", + "from mmseg.datasets import BaseSegDataset\n", + "\n", + "\n", + "@DATASETS.register_module()\n", + "class StanfordBackgroundDataset(BaseSegDataset):\n", + " METAINFO = dict(classes = classes, palette = palette)\n", + " def __init__(self, **kwargs):\n", + " super().__init__(img_suffix='.jpg', seg_map_suffix='.png', **kwargs)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yUVtmn3Iq3WA" + }, + "source": [ + "### Create a config file\n", + "In the next step, we need to modify the config for the training. To accelerate the process, we finetune the model from trained weights." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Download config and checkpoint files\n", + "!mim download mmsegmentation --config pspnet_r50-d8_4xb2-40k_cityscapes-512x1024 --dest ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Wwnj9tRzqX_A" + }, + "outputs": [], + "source": [ + "from mmengine import Config\n", + "cfg = Config.fromfile('configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py')\n", + "print(f'Config:\\n{cfg.pretty_text}')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1y2oV5w97jQo" + }, + "source": [ + "Since the given config is used to train PSPNet on the cityscapes dataset, we need to modify it accordingly for our new dataset. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eyKnYC1Z7iCV", + "outputId": "6195217b-187f-4675-994b-ba90d8bb3078" + }, + "outputs": [], + "source": [ + "# Since we use only one GPU, BN is used instead of SyncBN\n", + "cfg.norm_cfg = dict(type='BN', requires_grad=True)\n", + "cfg.crop_size = (256, 256)\n", + "cfg.model.data_preprocessor.size = cfg.crop_size\n", + "cfg.model.backbone.norm_cfg = cfg.norm_cfg\n", + "cfg.model.decode_head.norm_cfg = cfg.norm_cfg\n", + "cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg\n", + "# modify num classes of the model in decode/auxiliary head\n", + "cfg.model.decode_head.num_classes = 8\n", + "cfg.model.auxiliary_head.num_classes = 8\n", + "\n", + "# Modify dataset type and path\n", + "cfg.dataset_type = 'StanfordBackgroundDataset'\n", + "cfg.data_root = data_root\n", + "\n", + "cfg.train_dataloader.batch_size = 8\n", + "\n", + "cfg.train_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='LoadAnnotations'),\n", + " dict(type='RandomResize', scale=(320, 240), ratio_range=(0.5, 2.0), keep_ratio=True),\n", + " dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75),\n", + " dict(type='RandomFlip', prob=0.5),\n", + " dict(type='PackSegInputs')\n", + "]\n", + "\n", + "cfg.test_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', scale=(320, 240), keep_ratio=True),\n", + " # add loading annotation after ``Resize`` because ground truth\n", + " # does not need to do resize data transform\n", + " dict(type='LoadAnnotations'),\n", + " dict(type='PackSegInputs')\n", + "]\n", + "\n", + "\n", + "cfg.train_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.train_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.train_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir)\n", + "cfg.train_dataloader.dataset.pipeline = cfg.train_pipeline\n", + "cfg.train_dataloader.dataset.ann_file = 'splits/train.txt'\n", + "\n", + "cfg.val_dataloader.dataset.type = cfg.dataset_type\n", + "cfg.val_dataloader.dataset.data_root = cfg.data_root\n", + "cfg.val_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir)\n", + "cfg.val_dataloader.dataset.pipeline = cfg.test_pipeline\n", + "cfg.val_dataloader.dataset.ann_file = 'splits/val.txt'\n", + "\n", + "cfg.test_dataloader = cfg.val_dataloader\n", + "\n", + "\n", + "# Load the pretrained weights\n", + "cfg.load_from = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'\n", + "\n", + "# Set up working dir to save files and logs.\n", + "cfg.work_dir = './work_dirs/tutorial'\n", + "\n", + "cfg.train_cfg.max_iters = 200\n", + "cfg.train_cfg.val_interval = 200\n", + "cfg.default_hooks.logger.interval = 10\n", + "cfg.default_hooks.checkpoint.interval = 200\n", + "\n", + "# Set seed to facilitate reproducing the result\n", + "cfg['randomness'] = dict(seed=0)\n", + "\n", + "# Let's have a look at the final config used for training\n", + "print(f'Config:\\n{cfg.pretty_text}')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QWuH14LYF2gQ" + }, + "source": [ + "### Train and Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "jYKoSfdMF12B", + "outputId": "422219ca-d7a5-4890-f09f-88c959942e64" + }, + "outputs": [], + "source": [ + "from mmengine.runner import Runner\n", + "from mmseg.utils import register_all_modules\n", + "\n", + "# register all modules in mmseg into the registries\n", + "# do not init the default scope here because it will be init in the runner\n", + "register_all_modules(init_default_scope=False)\n", + "runner = Runner.from_cfg(cfg)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# start training\n", + "runner.train()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DEkWOP-NMbc_" + }, + "source": [ + "Inference with trained model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 645 + }, + "id": "ekG__UfaH_OU", + "outputId": "1437419c-869a-4902-df86-d4f6f8b2597a" + }, + "outputs": [], + "source": [ + "from mmseg.apis import init_model, inference_model, show_result_pyplot\n", + "\n", + "# Init the model from the config and the checkpoint\n", + "checkpoint_path = './work_dirs/tutorial/iter_200.pth'\n", + "model = init_model(cfg, checkpoint_path, 'cuda:0')\n", + "\n", + "img = mmcv.imread('iccv09Data/images/6000124.jpg')\n", + "result = inference_model(model, img)\n", + "plt.figure(figsize=(8, 6))\n", + "vis_result = show_result_pyplot(model, img, result)\n", + "plt.imshow(mmcv.bgr2rgb(vis_result))\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "include_colab_link": true, + "name": "MMSegmentation Tutorial.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3.8.5 ('tensorflow')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "metadata": { + "collapsed": false + }, + "source": [] + } + }, + "vscode": { + "interpreter": { + "hash": "20d4b83e0c8b3730b580c42434163d64f4b735d580303a8fade7c849d4d29eba" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo/image_demo.py b/demo/image_demo.py index 183f23871b..fe11b7693a 100644 --- a/demo/image_demo.py +++ b/demo/image_demo.py @@ -1,7 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. from argparse import ArgumentParser -from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot -from mmseg.core.evaluation import get_palette +from mmengine.model import revert_sync_batchnorm + +from mmseg.apis import inference_model, init_model, show_result_pyplot +from mmseg.utils import register_all_modules def main(): @@ -9,20 +12,36 @@ def main(): parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('--out-file', default=None, help='Path to output file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( - '--palette', - default='cityscapes', - help='Color palette used for segmentation map') + '--opacity', + type=float, + default=0.5, + help='Opacity of painted segmentation map. In (0, 1] range.') + parser.add_argument( + '--title', default='result', help='The image identifier.') args = parser.parse_args() + register_all_modules() + # build the model from a config file and a checkpoint file - model = init_segmentor(args.config, args.checkpoint, device=args.device) + model = init_model(args.config, args.checkpoint, device=args.device) + if args.device == 'cpu': + model = revert_sync_batchnorm(model) # test a single image - result = inference_segmentor(model, args.img) + result = inference_model(model, args.img) # show the results - show_result_pyplot(model, args.img, result, get_palette(args.palette)) + show_result_pyplot( + model, + args.img, + result, + title=args.title, + opacity=args.opacity, + draw_gt=False, + show=False if args.out_file is not None else True, + out_file=args.out_file) if __name__ == '__main__': diff --git a/demo/inference_demo.ipynb b/demo/inference_demo.ipynb index e47d964e3c..f05a947483 100644 --- a/demo/inference_demo.ipynb +++ b/demo/inference_demo.ipynb @@ -2,36 +2,17 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "mkdir: cannot create directory ‘../checkpoints’: File exists\n", - "--2020-07-07 08:54:25-- https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth\n", - "Resolving open-mmlab.s3.ap-northeast-2.amazonaws.com (open-mmlab.s3.ap-northeast-2.amazonaws.com)... 52.219.58.55\n", - "Connecting to open-mmlab.s3.ap-northeast-2.amazonaws.com (open-mmlab.s3.ap-northeast-2.amazonaws.com)|52.219.58.55|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 196205945 (187M) [application/x-www-form-urlencoded]\n", - "Saving to: ‘../checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth.1’\n", - "\n", - "pspnet_r50-d8_512x1 100%[===================>] 187.12M 16.5MB/s in 13s \n", - "\n", - "2020-07-07 08:54:38 (14.8 MB/s) - ‘../checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth.1’ saved [196205945/196205945]\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "!mkdir ../checkpoints\n", - "!wget https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth -P ../checkpoints" + "!wget https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth -P ../checkpoints" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "pycharm": { "is_executing": true @@ -39,13 +20,18 @@ }, "outputs": [], "source": [ - "from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot\n", - "from mmseg.core.evaluation import get_palette" + "import torch\n", + "import mmcv\n", + "import matplotlib.pyplot as plt\n", + "from mmengine.model.utils import revert_sync_batchnorm\n", + "from mmseg.apis import init_model, inference_model, show_result_pyplot\n", + "from mmseg.utils import register_all_modules\n", + "register_all_modules()" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "pycharm": { "is_executing": true @@ -59,54 +45,36 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# build the model from a config file and a checkpoint file\n", - "model = init_segmentor(config_file, checkpoint_file, device='cuda:0')" + "model = init_model(config_file, checkpoint_file, device='cuda:0')" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# test a single image\n", "img = 'demo.png'\n", - "result = inference_segmentor(model, img)" + "if not torch.cuda.is_available():\n", + " model = revert_sync_batchnorm(model)\n", + "result = inference_model(model, img)" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/mnt/v-liubin/code/mmsegmentation/mmseg/models/segmentors/base.py:265: UserWarning: show==False and out_file is not specified, only result image will be returned\n", - " warnings.warn('show==False and out_file is not specified, only '\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA20AAAHFCAYAAABhIhFgAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9W49sS3Lf94vMtVZVdffe+1zmzI0URVKiLJkWDBiGZfhBMiDJ8ovhV9sfQE/+AH7yB5EBPRqG/SLAgAwZfrJhywRhQTKoi6nRcIYzHJ7bPvvS3VW1Lpnhh4jMlVXdvc8h54x4JFUAe3d31Vq5cuUlIv5xS1FVLnShC13oQhe60IUudKELXehC30wKf9IduNCFLnShC13oQhe60IUudKELPU0X0HahC13oQhe60IUudKELXehC32C6gLYLXehCF7rQhS50oQtd6EIX+gbTBbRd6EIXutCFLnShC13oQhe60DeYLqDtQhe60IUudKELXehCF7rQhb7BdAFtF7rQhS50oQtd6EIXutCFLvQNpl8IaBOR/1RE/j8R+YGI/De/iGdc6EIXutCFLnShC13oQhe60L8JJF/3OW0iEoHfBf468FPgt4H/UlX/ydf6oAtd6EIXutCFLnShC13oQhf6N4B+EZ62/wD4gar+UFUn4H8A/vNfwHMudKELXehCF7rQhS50oQtd6F976n4Bbf4S8JPm758Cf+ldN2y2G91dXdHFp7sTY0BEmOf55HNFQUFVyZrJOaNZ7V/rRRRAAREEkCDEGIkxIiKPP1QECQFVZZkXcs7elBJi8EZXCiKIiD3mqRcRARE0Z+Z58U5BCAHrxnpnEAEBQShvUn4rr6aqqGYQIYiQFVJKAKQle/vr3aULD7slSBA0QxAf2TMnrCpkyvfekCoSbG7E+5NzrsNd3qPMhXpDWvrjbdjzbORsPsrz/UoJhBBQzagqomsXCIKcjXh77/qZPVfVR0LWUQlhtV+UngrSrCFB0XWtqPdXhK7rfD3U0azPW+bF77SGFQgx2FoJAgrLsvg82t0xBltr3r74i3Z9t/bTr00pkVLycV+vF7H1jfh68Hka+g6R0Aye36dqbyxCCPh7ln9n45gzS0rklE9euczAOrunc5LVBiB2HcPQ0+2eIbFjeMe+//koE3UmS4++wz51ujv+9aSvN56itKloGtktL31ulWmefV0UBrWuh8dJTvaifRTYbAbnJzAvC5vNhkP/3YZ3Cee32d5WUjJe3XW21h/yuyoMzu4+//5Pnr6eQJinGvny93QW9M7PT8SsfF19/jrosZ39VTv3dXODVV64pCSQkTQxTyMg9MPAPE+klDkeDpWvg8vnZiJUV/1GkJX/NnJYfe+FEBrebN9JMBmUc0ZEGDYD8zQ/OnnqMkJEiF0EIC2JECMxBFJaTvQREaHveyQEqtArdD6s51P0Lmb85NQpKSVyzibLyniFImdX/QRMDom/U+ECdWx9nE/ZjFR5n13fyinZeIRgcvzRDp/pJGijI1lbpg4KXdc7vxJUM8uykJO6itRutNLjd63tP8rabdt7atAf4xPyJd+fX/sYv33quV/Wl6c+excPLzpZ9nlfcYN93cqhx/Rk10mz67e+psq6UCDGSAjxUX65dtf19frc9Xktvfri9eeq+tFjTfyitKUvJRH5m8DfBLi6vuKv/Cd/lfdu3gO0godCXSc8e3ZNP0R+9vHH9fPCuDRl37CJaZ65vz9w3O8Zx4W0ZDSn8kyQwDD0XN/suLm5Yrft6fotnCjdBtRUIt12gyq8/OQlh8M9OWciiefPbpCuByAliBH6rqeLEQmFgwUkONBTUAkQInGzYb+fePnJJ6ac58T1rqff7hBVcGY3xEDXd2XAbKJ9srOa8jzNM9M403WRYeiYcub1qzvmaeF4f0BIaE6oUpX4IAGJAdQArgBD37GoAawhCkGVLCtQQ2FWWJISo9CFQOgiOWWGoaff9IQQmKeF6f4ORJiTEkOgj7YhUs5kVdKSMCgjdDEYqFElJ2dWaveFrgNVA58ixAARtXfXjCA2RtsBkYA6CEEzQaIzx2QAHkgpMy+JOZlg6aIgOdP1kc1mKCuzMs4qGBUIgTkpXTDQmnNmnBL0Gz788AXPb65AOnu+j9u0LLz67HMD2imxAHMOPLu5ZrvrGYaelJRXn37GMi8cFyUI3DzbcdwfyctCFwOx61AJfPTdj7i+vrb3AjQrb9++5e71W5Z54niciTEQ+8g4JYYh8uy9DwghcDzcMY8jsd/y7W9/i6urLV0QpOvICuNxZFkSMUS2uw1dF9yQUIRZB7rY2kuJu9s9b97ecjgcieJrvI8EFVKGUIBbsHnWnE3Jp+Ojjz7iV379T/PBn//LvPfBR+ziOvY8+O2PR6rKPL/lO/KScfNL7PPm3dcDC5mIEH7up//LoafEpDY/v0xFfTegenitizsDRzmxvP4pv/nybzOmkWWa+fjjn3H39h7NxpPTkr5UkQ9BXNiZ5hm7yK/++p8hxsiyzHz++Uu+98u/zA9+9b+ta7IIzKJkwSqA53nm9vYNb29vGbZbXjx7wXYYvP0TdOh9e9i5VuRnbQxVrGszKUSx70/H6bSNcxXrMfWn/aztT6u0n/fzsc/aHqyGIG3uMcPXQ1p7Ye1Sr1sNOKdtnQCHJ/tilKoyfdpSozNVIPhUM2UO2vu+jB6OdwaElBZC6B4AIchu1JJqEC3vv177tHKcc3JDwcPvbDxXo+TzsCe+/jF/8JMfk4h89L3v8vs/+F1+8IMf8fmnnzGNE2hiGDr6YaAIlpwS87yQcyKEQOwiAdzgZtxrnBdyVq6f3bDbDhwPB0SVeV44zInrZ9fsdlvubu+RvuPXf+3X+OSnP2GZJ3DgpQAOLsbjCF3Hiw8/IGrm7ds7uu3A9e6K6f6OeVlIKsxzot8MfOd732W7M56bpslkfXhESdcMzXhpTid/Pxy3dl1nlICQePPmlvu7e+Z5JsaOfrNhs+kJoaPve4bNYAAO5XCYAWWcRhPtIsTYUQyKMUb6PhAEJEYQQVPieJw4Ho/M88Th7pYQIy+e3zD0PYgQKngr7xlO3nfOICSWlDgeRsZpIsTIdrvl/Q+/zfP332MIwv7+ltdv3pKmRBwGgpjOI3m28XK+VaFl2Rf5lOOLBAd9Dd47WetPrePHuJX9DATbQXUucnN/bt65pYcGh4dga32WljGMtgdD1Xsf46Tej8q7AjzG3ySgCuM0Mk0zOS+m0y6LjW0Mph9DBWRRFRUhp8Q0jhz2e8bjwpLh+vkVu+srQlKmeUKi8OzZc3bbG0IMrvdwCroBXUam/QHUMMkp3l+v+x//+7/z44cvYfSLAG1/APyp5u9f9s9OSFX/FvC3AD748AMNUtCt4o4jnxepSkKZjHWdBEJQsootFVX6fuDmJjD0kcM4MR9Hljk5GhZQGIaO6+stV9c7YtfzcNECZMZxZr+/J4RAWmZymjDYwAlajrHsmlS9GNb/dfFotZacCmURaaSRMbAnyZmVoAQRUjXAFM8iiHsUcspUq54EEAPD4szFfo+20bNbxiWQsqJdIBd+IAY4MxigBJAIwRRy8bko9sOq49dRtY2V0dqWBEGTWbyyZEIwJSGEQD90pKSkZUFSInYR7aL10YVoUNBW2VIgBkK/QViYp4mcMkGMhWS3KGrd2kqQYHNRx7AwN2dOwbxd1Vrpc7W+k02bCuRlYcmZGJINtYTKmEQCmhOCCwdZQfzqWgtIEoIUQK51SWZnWrkoEQ52SyNV0Q2CRAO6V1dbYjwyHifevvqC3c0zdtfXSBCm48TnH3/Ms2+9z3vPn9P7nG22G8I8Mx1HpiUQ48aYGY3n1ZezhMDuekfKsGRlGQ8GZJeMhg7I1aqc6xYIEMuWSGz7juvlNVG+y6xlrdr7dwi5WLvqOjIRbZ+d+/BOyUZe2eVbuj6whOCWh6dJgIiQ/N74rwhwg1NwJs3Prw7HGqF39t7noM5mqZkHafa9CDH2iBtWiif2yxTsnLUaugQxPqMZMCB30wckzdVTGyiWf4/vL4DB11zfdbx48T7DsOHzL77g4/0nvHj+jJubG4JEui460Drz2Jd3bvSJshVPAEVzbTp7t3b0VnVnnZ3HwJk2v1tfWgDUzIt/Xr5/CO6M35j35HE50gKxU5C1Ar28btp63en4PJzQP2pu/DnwWj01D68ta7nwowcqoX94+rkBMOfczec2LjH2j/dLFZGzSa7tlXFfPOIhPvV676BW0fTdpDBOC/v7e37vRz/i5WefMU8TaKbvonmtHEgnVTdsZkKI1da8rg8Y04LmzNXNNdc3O5aUiTGiOVuUR/WomfKfkyDSVWVZ6oZmZcAW0oIQkQDD0DP0A1EKwCv6m78iAjmjlUc8wk8fUa5VHprMQghI7MwQlNK6Vproia6LbLcbV7zNIygSCLLOUTg3CIggDeiQErETVjmvKSHdmZp8uolXOXxCZTwCipBVgOTP9NFyHlTkWp6OvDqOjId7lnm2NZ0TyTpnup08WOiNTtkCm1bfbLotuJWplRqPrfeHFNTaliYqqfAZbRhj4VEW+LN6fs/HSMT0wjIOKhYtFqSDYMZ88zanE11y7fM5+LMXfNKAFIwXmL5p4DJIRMXfSRUJDtKBeUkcj0emo+lSy5JM/sRIcJ0LIGbbE0EVlYwQHhifTqP+DIeIv3vxlLdev3fRLwK0/TbwGyLyaxhY+y+A/+pdNwybgW9/9BHH/eSLIDeWt3UhqCrJJ98WvVZhKp15kSRlQt8TBLq+Z+x7E2IK8zKxzJmb6w273ZbQ9TblAloWpEBROnNeePnZS/KyoGqIO3Qm9FNWg2+qvpnEUXNZnMXi0EoaA1dlr8QgLGkpcIdHN04Fs4KKNEzRv8ZBRTArG8kYZS7exeB98YEKIVQFX0IgpbxaSzFwWbeFrn2lgF5rFDBQlEVOLYth3V6rEJCqxIsohMCm6ziOs7mqcybEYj0XYufANyXI5vmQYLwmN2NUvDkKaFY6CQzb58R+5rC/tfnyFyuCI5hsW0MTADz0o4ZlipJyBm0+yxlVnwOKAIU8JZKWkFAlEAwIFsYszR708EUf0EbBfhi+UnmZg2yD1dFn28e10csKGI/Rwihjf42EwPEwsX/7BvSKrhs4piPjkkifvSSnxLc+/MgELzDEgG42HI8JyUd217tV2GpmjcwUui6aQpAX7nJimUeTB8uMR32ereuMJEGJTMcj98c915//iJdvXxE++nMEDNReba5AleX2c7j7mL7rid/7Tfq+Y5zMovW8230JY1Myma3u6bsbxndc2ZIgRPQEuP2rA92MWjF8qpL/0ZTqp9t3XuGbx34ERA3yFi8Fmh4IzuLoii7QU+OiyinX0Ktz4TVc3bAkU2BUwsoD1IF8yiuUrFZ4Ybu74qNvRd7e3fEHn37C7vaW73/0bUQ2zhfXXXgu4wtYk+ivs6yg4bHr1/F5x9i5DGtDsU/et4wrp6Aq50zr6jsf19UjROX7qzft8RX8ZaD+3Hv51b18Dyk3XrZ6b/tZwxbPvZZwqoo+9sSH3VAg8XTKfvnuoRIpYipRSjMhdITQAjNFSOuieIROrz+lVsHNeTHF1AdA8sQf/uQnfPbpF0zH0cJ7+45hs4YZ5pwtTSNZeFYXTfZmFIKZa1POLFNid7Xj6npHWhZ224H7eSIndVBiiqmEM4+gajUrlJCvAjLace66aCkBISCaT2BoI5FdCQ51j2V1mV0mfVkswqkVZI3huoQ4DkPHZrPjcLhnTKkRoQWUG7ALXUd0kBLO5MOJp1xcHonpE1KvCUwZtuuV9piUfA37+DzYfw5+s+GCEnJajSvFMJ/X0bEHrvvscNyzLJN77k1PSW48qLrtktawJ13TNVqvmYhWXfZJIGZhX9a2GjctRnpVyESCrGuhjIU6mCnvrw56RUrEwilXWcH6Y+CK2m91HRIRJHZ0AiqRnJZmFnhwv8MelLLTxdfPw1e29VzCUS0FxfE2khLBwVgWSPPMeH/geLTokZwSKa2hrNAA0KrrBbQB4Ke6nlFA8Vc6eZsyMlLH5N30tYM2VV1E5L8G/h5mivrbqvqP33VPkEDf9xyZmnZsEZgHxt2WQFC3NtidINmVaLOYhBhMge/MOhVDrNYzkWtUMzF2BA9tlOIueUQcaEpkDRzuj+uHU8dmm5nSwpA9vtsXbq890TgGq1uioE8FLcxyFcz2Vosp5I/tscL4iiXZEo7AgahIYCihPwAaWOYM3qcgggYbyxgDsYK2WC0mKUn7OArQMpXIvXi+/VKC2K/DVRayCgYE/bnF8lIWYcxKWvkWXd/RS2Q5HgwALUroLSQPsVjxebKnZm1Uzgb05KYPZqnIpKR0/ZabZ5F5umM8zrBkZ6blHSvUtXUmZf1YOEEU34CtJVCkUT5WYZfTwus3B2KM7HYDUSEE34bSQKzWwkZh/+KeOb9KGjVDTAgHDNxnYF4yKblMi0LUVTiAeV+XJbM/zuy2HdfXV8QIx8PC/vaerpuYjxNKZpkjOb0kp8xH3/oWw2DvvokW6nEc75kPmee7awPNcrpDRIS+Czy73kJO3L5dLBTO/f2hKsXRlqxiRoLsFq60cDwekOOBuz/8PUQCz54/Z+w3oMo4HUjzxNXumv74mtGV8c8+/4zv/Xt/nfeu3ieEJ9iXQs4z8zwxDIPpaF+BfFdW4JYcJn/ToJs+8ftT17y7LX3n3+UzXf9YVbNs6zyTyT7IoRuqBHpKsW/sVifPqECh8jLziB/mkUkVXQ4srDmywY1QtU1vNJT9pCYHPnjxnC4ov/vjHxLzzEff/iU2fVcNHVlN7Be9qvYDkFQ76DzmVGdzLtf8vSpMxeO4KlTe1/y4UbK2UT1bLv/E7inv2gKpFeSdhj2ee+rA5GgBdfnBdY9TO4ft7097Bp9uVyiGN63v/y4P29p++X4BngZMpxQp3qmmV2c/Cz0EcYW3pDQRa/g2ID+fD36N5OiApcr1aRx5+flLpv3BUjBiOAFsmpR5WshpIUiwaBkxA2H0tRskkNJssmOIzKrc7J4Ro6VgKB7donoCjhUY82xrLgQzUGZFHWu4pk5gIeuChC2aFtLQ0VV9xo0Rmlx2FeFM1YXsXcI6L/06rpoTEuIKbHMiDBu2uyu2Q09WZV4sHymEdc0LLtcFYghoEJZGbJsibQzJ+E4ElhNQFxx0iCElurCukOBAJcjZqssKse5sTqCFtN7pAisSQmnYDeoqhBgRFfIyETzMsoToBhTpohljwQ23RcU/1a/KZ1pCh5AT/uCL72yj5TNDiI1DfGAeKWBzhUfliet7N3yv8m8H9A7ICv+0rphUCRIsOqOsI0/dEXwpnRmfss+D+JoUWmBolFKqhu7izTJDS1mTzitFCJLJ0cyzyzyx3x8YDxPzNFXjeT63JElkhU7S8GAhakSjcE6miwdynGE++7IYGt9piF7pF5LTpqp/F/i7X/X643jkDz/+mJvdjSkIuorBat335EEJiuZg4TTSTOiJsFVC7EwJdk3dPCy2HEMItllcE7WFFBA5dddnhXm0+Oe0JM+nmJmnzP2bW6btaLHQbinIcebZ82uieOihFpuweabOY7ULMKJYp0+SJwSkFI2QuqgrkPANGYIQckZKGIAkt7ZYnp0ECy0U3PoUA5T394UdYzJMmdfNKQhRXZy5NqMi1mZVsIq1rhRNEUTdvxKkYuucc93TReEqOWl1s2JCJ3Shuqo1JcxFXhdWGThvy/LUooLkZHH3ofM5jmx27xG6I9PhnjzOJ/wqa7BYadSZW6jPyEoViiVUKAQhqvV98XEKDlLTNPL5pyPXL254dn3N0Ft8tGrjZVRFiwsKCmRbO9R4jX32SRgDVTNR1nUMGRIkcSYXVk/vlJTxcCDnnqvdls32ipyPHA8TyzSZB9HHMGfh7RevWJaFDz/8FrtNT4zKZugQrjmMB+7ynmfXV816XUlCYNhs2KXMtMDd3S1RDbjFoSOGzoVhrMJvmhKqmR///k/phs9QlHm2sIO+sxzSYqQYD/dcDQNXu4EQO2K0HIlP/sHfI/7Kb/D8l/5dpN9yTpuY+VBfsvQDIW5Zlq+uZvmqtjWCupqo3+g8ty+J/HwUiD32+XmI5FMArlDlSXFBsu35LtpoIRGRfApwGv3iHKQEN5xI6NyYovXfkkGmI3/h7n/ifzn8VfoY6YIJQctNDeteku60uJQq+2nP8ThyM9zw4598yn7OfPTBc3abKzPgucehBOGUHJI1h6pY8/F9qrbvWAGWrZDi7Vvfs/1lBVUm41ZQ1+pS2ljqC/9RD+nR2s76e5mL8GBMz8FUSsuTIPHBPH/Fzx77vHoInZ+Vvq/h1l/dS2ftld++qrpSJ+68Jf93DvweesfKOBlgO5/Qd3sxYR2D0/E2D5tqJoSOnH0+xDyPaUkVlAybnlg9bJaLtszJZE4M5ilw+340q6CpENlrAqiy6XtShEOaTUZkS0UQzRT/l6g9bxMNIOqs1bJa3k4Fus3gAGqg8/ZDtpEoYWMWCmmyOxNMAaHaO6p8q1685hlSDPP+Hn1vgC3GjrQsvHz9mvv7A1ebHtPV1MZQ1YF0IAQvEOEuryAdFl1jhpkgQsklym5ADeAFS2zs+pBRLSDYlXEfu9L3lDMJM8Jq84a236TxyHj+mXrchqzaeohC3PQ2XiWlIitL0Qucf5LV5rqksPjzypjS/LauzbBe99g281tE7V+Stq2zix5Z46sasCosp+GZa95d8WQWvugZ0WiIhBjoPEw5CGgwnTT4eOcHj1YP0aw9OXv/YhAxXli2heJG+dDX/lqobAYyaZ652x85Ho6kxSMVTu31/tOcHZ0DaanGflsjqfQtaU27aV286cssUzS5pO+gP7FCJCekLV4vnbbByDkQJQF9vVBCmyBcgrojeIhIiBHUQJg6akeaZ9SiI2XSiwJt4KrkiJWKlWuVPIMzeUkc95lpnCnxqDFG5s2GzXZgGyNSEEtxYyO2EuIqICR0QFd7UL1oTlkzeLiGKQqNcBapK6tUSCtovu9gcoZfkmOT3xMlmHUjCISI5Ozb3K7NyT1rQQ0U2MMpoYugRFk366rIN/3GvUSyKg/1klws5IJohGCueEQIXaDr+2IzYi5WP3Hhos7w3XOTRSz3yN9tGY9MxwO7Z88ZNhtUe0LY0O8COY7k/b0BQYFAtry2MrOeI6D+jNwAs+xFFQpjDoIpkmIewZyVvCRuv3jNeJh57z1L/i4Mq3qptGUwYoVpMItPyVbLeqpUmQfOx5qM+rrqAiwpF1ltAtw3vqoyHyfuU+bqemOJ6mX6vCXLS7sipZm7t/csS+ajjz7gercjdB1DFwg6eDXWbAnZNiK0MCGEwO5qx5IDeZkZj3tSzoQEQx/NQFKspyhdVwSUhfJmlN49nLok0uFI7DsrVnOcOSwW7tv1GJiUwDRN/P6P/jm/KpGb7/8m0u9o6aPNkdd3B+70ijxu2ac/OptzUwkJJRVh8CcM3M6ZufI0YHsqR+38+8eeUbzDj13R5lOpr7UVi5z35vFWnlb+qdzfDB3ZQKHnjP7uP/tn/Oj2e+Shhxi46j0fOXjBIt8Pfd9ZroK/fcoT+8MdLFYZ9fd/+JpPP9nx4Yv36fue0G/IYnErQ7AqbuVdSzRCFztidHd9iGaI0AXbldAFA38KaJvzARZ9Iadh56Vd21sr6FRWUFPCjqCm9FY+v+aQnAPvwivWMFFqy2asKukFpsiuRtKi3J6oRY08Ksq2NJ+fg5PyJGn+le9Ds+oey097as39celhW6e865RKbtNj++VcMX4q7NJC6nPVTaAd/5yTz2sk54VM5jjOLKXKc04EUWIfa1GEnK0CcUqJGGWNlCkai/9XinCZcVHZDgOqwqvPXjFszCMWMENfbNenG3BhVbytSFkB/db33XZLHAbPoTOjRUqZw+HIMk3EEH0nUGVo+aMYOUIxVjS6dlaTw1VPEAOsV7sdIfbc3t/x5ouX3N7tif2AbnoPv/cInIr+OkTMsB4CVecraPF0NgRp51BtnFWELqgV0gr2DiGsqSVVKkjhU22aTkFC5bviFYcasqiCklH1ypMCEqNF9iRlScmAc9EX3L2X5pklJSIlDDw02Zrtujxfp+uOCq4v1dVbDCuCm5vkbE8LNc+hWqLW9qWEpp5sl3YH27VF8ksB1yLuPexNby6FpUIJiVUWzYzTRJ5mNy600Nif5NFQq37djIJqXU+qjYEonVdHDaR54XB7z3GcSEuyeZL2Gqohoes6YtfRb7cMWzdyFEeHFobmoFkBL8BXwf1jnjQ5nS97p4eXtfTNAG0Cd4c9OcHzZ9d1Uz9QUpSqeUqzicydLNXCFGKw+O1w7jj1+1qXhi9WzbICLVYPy7AdyHPPPLmnRmvg8kkSeloSi1dKe/beM55dXzXevCKQM2RbnDEUpKqoJtRLNRbLwPrOuQrZdQwiHhuJOsOJ0RaWeRF7ukHRZcHqbVg1SPPMrVUBxa3V1W7onM6sCGshgfLw1shRXr1uOh/bFEsun/VH1S0f2UI21xE2QGRKvc1VKEcwYApFFFOai9k7BzHZ6qEbrXZgVTCVaZxZli8Ytls2VzdstjtEOrpth3QDevuGtD9g4airtc3GtrXUrMU0ggRyqSaJjVv0vmiwTd5FYV5mjvd3vJwnbl5cs7u+Iqf0QGmzsbT7AmtJcrMI+ZC7hSarWu5PwCpreahqoijta5Wi1v4rQF4WxvtM6Ae6GMiiBK8co9kS2q+f39CNI8f9kY8/ecn7336f969uiAIxdnSRdR17L2MQkkbMamlVPZ9dD0i64q0mDocD8zQhAbabLcn3Z0lkzjmZh3KB2HdI79bZJBBNGQgi1Qq3ZEXnBc0HY5wBOlU+++RHyIvvc/3ero7ti26mY+YwHXmz+T7yxwBsLQVXRZLt3m9UnltRDd7laXNodfL3V3mDp+RGC9hMGVJCgLSANgxCBB4TUqu1vTwnYjGIZ6FsDcNLKUEXefb9v8x/lL/DF8uG+9QTdeZbm8nAf85M88KsgZdjzzwZyokCvzTMTAH66MpZFsb5lvDmjq4zz56QyJpJIRBCR/JKsCWywditkJcZukAMnYcRuZFKhKGPnioiaCdEDUTNxNbAVQBZiIvcBVsAACAASURBVB5+5mF8At0wuNLWKBoi9KFjiEKW4FWO13GNPrt9tFLTwY9G+XgcGCXSZSWJ1XwzILDmC1Ul3XlIdG23VY5Owq3qxJg3okaJ1HzwFfTVXOCmLTnjgUU4F1b+VQBbu3be9VnT/NdAZczsr6/iJDwvBGNH1oBqQCSwLAtv396xfPY50zhSxq8fTDEEB2wpsyTzjIUuunfZlFspVaBR83T73Ids8zKNE8t0RNhgAUE2UCqn75NzYnavX+ul0AqKhLjZVq/BkhKx79gfjszTRH++zUUpHq2i+yMNO9BMSTHQM14Uuo5h2BJCx9vbt3zx8gvub++Y54VOI1yVVAJ5ZHJdmsua+S6exG5GzTJ3az67yVAlEiy3273pmRKavKAysBZrc8jX8jaROif+gqy7yHLhJVju+5IWUjLgFcMp6Cp1BkwPDGg2vWaeZ6Z5ZOgiXQyUgM9yq0iwnP+TNIC1L0HxFJM1xy5Qtu0KiQoMrakfTTSU8edVwzg/Qmfdg0X7WPuQshC6SAyxAjUP2aprvdDxeGR/2DOOE0PXs4ni41va1Qftf5k0a8HoSSs1mqrIxKITi0eHrYwldh3d0BO7aEbBGE6wXaCtcgm4J46GH1a5eTJN2ixlxyuPgbuGvhmgDXOxz6EUzzBQpNoACNRzkoqqYhuCBhSZt60kOa+gBmyZSYxo8QVnkBp76gAjF8Sf6Toh5cjVVY+mrZX+XOaKrO1MLN8l6iGEITDPE+PxSBcj/bBh6NdFLMUfDXY0QH366l9UCt4u9xT7fm6muzAiV8oUBxkO+CTQ98KikRiVJB7H4EqD/fPzRWRlHGV8s2ot4ME6lCfbtG5sogWXiBCcYa6Iu9ksUja6F/ionsOS1yU1Vh+x7/PJdiz99s6cJ/VWUtKijPsD03Fkud5x9eI5gYGu3/D8/Y+Im9fcv7m3q7OSY1FgSj5e6ZeQckb9vBQkOJOnWuFCwKpcihBDzzIvBt5u71jmpXpnsjPLtsNtiKTg4Z6uFGdntOV+y7GjMj3L4QnkvNr8LLTA4sTLyM1LphcXyIX5+TiO93sg8+y9F4QQ2N8fePPJJ/Bd5f3tTV3T7RyCWSItv8R75mOxu9paGfhlZpom8jwzS6AbLM8piwkWCZmYk1kLk7DZ9uSUmZaJ49FmfQiWlxYkkscFFVhcqRGUOW2YXr3h+M9/i1/9i3+N3eYGEWEIiWU68KP7Hd9+/u4y/19GZQzLTk11Jk6F7ddJj+mDjymfevbTfn+3NrkG8xRF/XQPydnPk7Yf1VS9Elfy3IWgxHja2mlOFLRyzSj51vJCTpUXZY9aMx4VCPz7f+6K35n+LNtpR8lLeN7NfH848ofjhvtk3rXva6xnVaoqy3xgs7zhV577mZtLYjweQMyDNi3JK9PNLCkzzs5r1fnrMXl4kikg0zKBTiiweP6PaIYFjvNCyokQO5MZebQ9m5WjK4/qY7FoIHbFWh9QMX4tsOavSb/KgAL+mOvkCOYti8VjInieXqTz0YzSIRIJ243xHM3EENh0wpLNMNJFIQyD83G4myOv8xVsBsK80HWRRQJZhJ0Dxzan0HIIy+8ZNLrCR630WUJO1X8+VnSkXSePflf+02YP6Prd6X54sgUsr6lzmQlPe8+a9r60v8FBylPFSIz/pjRxuN/zyQ9/j+39Szp34PZdR991ZqBUrADCZCF1IUZPu/B9olr3R1EeLSVLXE54DrWC6IRqT8Ll9ql2b4qrZkTT6RmrxeNSQITrGsuSuH97y5wywxCJ52OnoYL9gLAQIAtrjRbnO95+q1T3nelLX7z6mC9evuWwHzmOE8u8sIuDr7NS6r/oguU9OiRMJznMOWf6rq/K+lOFborWpRrp+1X30iysDTYGCV9GJSIAYi1E0ipOxRCeNaAsFXO4r9v1KvF0HvEiTbafFxHmeeTu/hbJif76ijbeo3ADVYWkls5Synk3IZIZq8g8l76zqswnhjUHFbUCes39L0CpzFNTLKZ53fN9m739buit4qIm8HD0FqylnElL5vb2Lct4ZJxni9a5Cmxikzu2Mj3r/4m18jQUNITWuOVvUAv14eNoBucQBZKF3BY5ZB43n/Ng9SCs3oLn3DVG7BWsKtXN24zvGojihmmXwCc5d+W9lAb8P07fGNAW26pLxbrpv6r44YmhhI2UfIfs3rTi3SkHHSqaLO/N2siUajkAImvYYYkBtmIQ3obY9uy7yLOrHZDpgjJNkWlarJok9kh1q1HOhrSHGHm229B1Hd0DORBtsTfueur0xZNPitXU/nZwimAhcsWKtS5MLQLT8+j6boME8/zlcaxjEdxLZB6uVYlX1XUPtMYM/1M9niV0PQmhwxZYF09t/dL8X6gk0atvhvPcKJ8Um8tQIKy6Q9Nz3rRYr0ByhX8WslTXeDOq5oLk7s0dh/2R5x+8oO+vkBC4vn6ffrjmcPua+Tg62LEcxIy58lX9sEQb8dXLKsV7q5V5BXHPmyrSd0QPAWVZmDPEPlKhVShu/eLCZ50HV6hQs9zgpdCrgqMFWJvQCpLZxMBRLPw1IuRgIaddNCC1jDOkkk8ZmsILSlI43Fl54Zv3X3B1c8X+9p6XP/mY/K0P+ODFewb6oZ6ZV2YhpURXPKhQPQTb7Zb0LPPmzRsP+5ms0lnfeZnqcvSC5e4cF0UOE5vtQAiRIS+ukKs/Z/FCRDa+5XBLXUYLU719w6f/+H/jl//i3yAOV2SU28M90g1snlSevjpJ85uY2P2XwjTfxbaL4eD0s3cz+vN2z69/l8K7hurq6RdCtc6TkkcrrCMWhNU73gjVItxFaK53Y8mDDti+6YLthU/SC0r9GQEOMvAv5mvLf+gcpKD1fEtVO85C9X1+5hX2tM/ollrAIDRFPApfD9IWDbL33srCd4YDKS+gtg7naWKp4dOwTKNXm7PS5JoVHVctUlVJmhiXIwUxpGQVARVlXLJXADRlPWta+W/TH8kzWTqCZI+icp7jnGYNZfS5k0jW9bzSQCBKRuNAqUpr8klAE0EnOsmE2JOwMzk3oaPrA3Pc8mn6gDgMDJ1wNViVxdgP9F4BNIfe+4HnHrmcqpzwdC18VSrDEJr7whNtnK/p9VOA7uxIEx9bLWN8Si3IPO2znniYnwajiqqdrXbc3/N7//Qfcv97/4Dvfvs7xK0Zl2IXTwDbPJthrBQQKznsgqkzIsXav4KIqtgWywDr5+WonBBKcKWthQIsiomw6pAhVGVcdEEzzHnh9vaOJWWGIRBbj6IbvNutmytAbHKtSlGIcuZYNPOCLgvznHj99gteffEF8zjZETSzhT0OvR3xpFVXWgfaQOW8gqrapeB6z2NzYhTRqneQLYe290cU3NqCtayZlKm5hBbhVdZFAaKncw+uI/i5b5Z+ES3UVToDBqpITgRV5iVx2O+5vbtlmWaur7Y+t+s5gutaNn1AwQtDFkTj10lmpnj4v9w48TiVXasnf9U/aGWKXdfF3r2HnXkx44a2YqnmzDJN7Pd77o5HlnG0iLV5IQO73VVjLM7NE9sN6FxOqdeuRoBMqe8Alo5SDOcGTk0fi1J4IWsFY8mVPyO2RiWY/jT0HV3oSDnVuRbBwubFKhE8quMGM1yEFNZ9Ia7tFmbZGDmfom8MaCuuy0e+ocSlLsWx5cJwfxzpHAH3/UA5X0UkErpkVv3C3NxDI51aiEJVP8uG02bTFWZmVS2fX18z9j3jOHLYHxgPYodbptXNXxZj30c7v8StllASFv15mszbV+G3C1ptF3x5z5J8GivjyFpghE12WRvFCiYidF1H0oWgVk0zh4C65dly8E6ZWhGlFiREFfq1O1rGSdCc6lko7RyVPjTyY91YYu9SLftna7JsBhCzUgprQZOS9xWEoMESucXCJuNZO+rztoZ12AVpXnj16Uu2Vweubm5gs6HrejbXL0j5DbpMgK7lmsWAZsmRkSqJpT4jeC6bHdxewhqyWc3qLi7Mxt4jdJ0VWlBrqYBTKQy1WtItdLQvB1zjAsHnyax/lttXMq6CiNUgjYEQpZZkzimD5/GJGHhKXjraFoAwTwt3r95wdX2NZmWaJj7/xA78fvH8GSknA8ohkAkcl0QvZuE3IBhJaSaXfbrMRBHmnNBFmQ578mLV+qZloSvxdBIIcWQ/wjRviu3LjBjuXZASmtnw63lZkK5D0sI4L3z2+i3Pf/z/8N73/20Syhf3C8P2g8cZ589B0YWRzfiaPbT+/8enczZ9HuraXnN+7VcBbGuO2xPPO9mURcF+uMFOwisVasBJNiEcmjWMh+XlypepeSghNBMq5SiO0D6qUuFQPx0/JGsmNmC8ZVPrfasy0/Jg/wQp/DSsWrgVjiq5fKbcrPvTPh/p+P1l58/1PJa+KHTGInSrPI8TL+J8krdmY+yjWsOMMs/ibAbJlEg52/gkOxg5eXj6nJR8XOqcTDobyEsLc/aiKckKZuWszCkzLzMpZVLyswqakH8L9U5Mal5SdCnDgBL84Fk/wkQWEGEUk5lZE4nAVfwECT05JLSzc7TC9pqr3YZhs+Fl/JCuHxhiIBK5UysgoZSc5VPw80DH/RJ6UNBNVk9eLe5Svmu+L3QOrgqAO7+ufCbuuWp0KzeQtiAC58WPATdTuOd55sf/5Lf42T/6+wxxYzKkGkVwLJPNw6YmBzovlFHSPXK20uJ9OSNNpIb1tWNZ2F9KAoPx1RKeWXdpVSDU1qmseVrFuFxC0+Zp5G6ayVNiO3TrwDWh/Harpxao5ceFlCzvro5pCe3siH3PdtNzOIwsSyIfDty/ect8HBkPI/O8IEHY7LbcvHiPLsbGYyvrGlLjPSd1KtrRL/p3CVsrvXElv/I1CSuI1+x82FI4rB03GrpR2OavrBEzBK1GqYbHYXzHxn6yitCq5kHtBmJngCJp5njYc393x/FwRDSxG/yg8FrIrj0HzPY2qOestXy1vMda8+B0Wdo4rJVnnfs9CRoaWNboK83XTdPB6hUEq1VQikOpmjFrniYOhyPH/R3jNLPMi/1bLH9TCaScUNaaDiYf1F/86aSAtUjTCt4q5Cw8WGJd+xoCEiMhZ6/m6dFruUSQrLl7MUSCF1hrrZBVA86Bp2zF4sb1amwyAboOW1Ub361NfGNA22l5dU+NLKWbAdRzrNwcO6dsSDebBavrmnKxxTwSHJzJurBL2uXastaN0C6E4gFWNdf1ZrCQxi4Gun7muD8wT77A8gqWNpuNeSVONpC/AELOC+O4ePnWBNXDx4NrRdRCGdrEYVE/X1GrZToEOWEQMUBO5jmKQUjBD3csVoPClULJSzjdzMXytlbHccUqOMMPTdldtNYfKODB5lPdO7cWokDzyTzXipPSCkdqzmBZvCswLd9Zb7OIV1gs0y1+/sZq2V6ZOhzv9yzjyOb6iqvrGwSxs/qWuYLqAp5KbH+xmNcdpVrXoYiFbanXGy+erJUBUnekqnop2kgXxatLekW1s0pEZS6aopGg5jBTpXpNy44XH28JAQl2lEPf2wGVYZrMW1ksqmK5g5sYmCY7Jy8GYR4X3s5vTSFIC5qUt1+8ZpkWpnmE5N5ShCV5MZqqshszCs06Uk+6zz6pR6wIhObM7AaOjK8pVYhW0OTq6hlK5+V5ZNWCzgSJGXEycV6IsWN/+4pnL3+Xw/yMP7zvuPnw5wuNfIoKl0j+7uthJD8/FcFSWjwHaXry+0PBWjyYwmpJf4y7PLjvfGwfa7sqhKfvasV4IoFmv/qitG0jtRoovkYKK24LFLVWfYB8IpSVRMc/vP8LDEN5bunXw/epoOCJ0jHlliDnSgysxpM2h6u0e84ni0W99SIKt3nDbT5ffy2EWGdyZCSKoJ23IWvoeO2PNgkBmvnlfiKKgb8SeohmknuhcaU5pWz5oMn/+bulnEh5Bs2MSzL+58+ak4WJ5pQY52S8YLbOLLowLgt5nplSAp0QlLc+WhL3vInZwqH6j9ntrthsOq6ePSf070G34VW6ck/P46X723ktdRAqBC/KcTOa7YSsyvL6s4COmq/cAKoTW1DTbhGP+kj75Yt13TWFxXh8PZb3GaeFP/jh7/Cz//e3ONze0z0rOsl6UU6ZZVns/aN4lVRqkZusypxS9bypi6VSVc+MyKVAlUfsaF4Bndo8F0/DCijMc7Qo1VRhhYCM54+jVSBGM33vc1e2r3jxlTqODiSxnO3FwYaNvc9ojAybDdvN1uXsHgXikmvlxhADIVk4Wt8P9OX81jo/4XT4vJx/620B6nEgti7MY28BLScraZ2oB2tiBTSmX1k1ZHu/bNX/2zV3Jq7K2bdZzRNTwNPQ94RusPxWEss4cnt3x3g4ojnRd5HtMNB3PX3XOR8Fq3J+tnZ8BJqVeIoGHvymZ588JnHa/bnyrYx4Pm/T2klXXH8NYmfnOWDLOTOPe8Zx5u5uzzLPzPPMMieWZWFZkldQzZRwilPnSj7ZyzZHcvLzpPBdKWZYVAk3spSiOIU3FA+2BtPLyMlDQ0t+YUktwXWs4HJMauSstpOuWMh868JvhuZE3kob9ilP8o+WvjGgLXi+kKpYiFgtg+/gCVcmVKml/jWgJCvUUAdqBV8B9YpYsC66bKHnTbhku/gLH1uPWSsKb0fXeSx/sLKfyzQxz+bSV4XNdmCzGXyBrDlABoBKWBwc7g/c39+T54Xiss6hna1msps2ihKhhtoMv1TFAQcMDm7xU9sBg/6nBU1wy1xoBFcQa0+97ahKllDPoev6jnm05NQCxEr1KmP6ptBTAKdILaQRPVcthlAP1T21yvvAi6la5bBwlbQuZk0e6od/VqyDJqz6zhTEMM/uCX24A+Z5YX7zluP9nn678zFZ47hNUK+hHDUkIuOHjJpHrlphEAvNksJUQhncat0r7aZ54TgmpiEjEXabDfg5gidMvlEX14hyC4kSEmhX0VwxAoUgVsAj9O65jMQYzEIuK6ouDC2IsNn0zOPsc2gFQHJz+qPmzHI82Nt4+ToBhmLxVeh88Qy+9hZ1ZtVUBCteiZAyoetrAE1E6bKFOEdZzSk5JWYHv0GEac5mVa6KvQG+lBbm0KHjxOG4Z8ozr29f84H0xOHXTooff11kwdgrrJlR/MTHd9af+6qkzT8e+Wm/fwWQ1fK0szZPrmsWXhtlUJLQW8/aY88pe7coK6HdR80bVSDn89eazVBqmfuiqNdwQD/Soj2I2wxWUt+rKOLnY1/uKLJT4KR41PnbPOWZfSx/sRbaKEooLeBYFQd1HluOMbDr1nDMV8vDIyvattAyTuvxCUcd6FwBLu8Q/SVVTD4IgoZyTtLpyJTUgiBUr0EQywXv/J2eh4nn0Q55JhfFNzMuE4f7e5Z55Hg/cjzuuR0X5mn0XNZEHBPKyH13z5sewqcvGa63vHj2Hu8//5C3+YZZdvQxUqsjN5NSgbuuc1cAWwvGfHJOi2cU3ikP5zP4Z+UMsRKW3hbeKIpcGXuador8XKNawsmaa3S2E1JV5nnk0x/+U372j/4v9vcHckoc9vfs7++4utqZZpJXD2kMka4anR06ZAufjV1ks9uYZ7sYQnzus2Ll9invSg2fLSt1tZWsOVfrgKqX0acq2lnNMxIxw3Rpq7SXl4wZxu04F6oxsvGcq4XjBzLdYDlNXdeR0sTr2z3j/p7NMJA7K0a22QxV47GCZWsJqKIfnunJJ9Tq7RoaY2zRl4LJzpQ8cqIYSnwMNUHXr8q9Acs13z1K4WpywgtqPpiUESohir5+KblRHSH2pjMtM/f3t+zv9yyTFRwZhp5+sEq2BSisRqLT4z3KbHIyK85nKx4QHw7nurV40GNtrWvhtK0WqK17JGZlaa3L/s4lFFr1yDwK9/f3HA8HK6wyzV68r3jYkjlA1O4seaFt+O46t65bVX6qdUzK9VVeaGOcq5EcRXPzPeygLZef2PFOyd+3HKkk4lFM5dis5JhNVwfKA9SlFria/UiHNgusCE1TK4vxrWGAT9A3BLTZmVHr79SSq6pYOGHvkEtBl2RDriuAUc10IbpCWBRnE4x2ynwpkporiIt10zebC0DFFV0BD0GwwQ700jug6Eh9b4BGYVwWttEry1S2hgFLBStaYjlAyzhxeHvPMo0GZLoetgP9dqCn48RjWDdDyXvLtXzrg0P/fMEoQBoZuo7NbsNCZsoLUrm2eYhMuc/V8qPQMB5nZnktXxxDoOsD6GIKWC6LemWGli8HmoUQ7biErIp4XltuGFs7/9U6JmbxkGJCdIWlXEczs0ENSKVkhWMk2Pk1eFWgZS7llE8GCRSW2azJXRfpYzlsMTB51dE+BiQt1ldXcEqeSNOV0pyXBm7X0/q8mhosIJoYD3uWac/8/MZCEh0MroreKaA1MBiNOWSIIZNzrCEx6uA6xKJciws1s1ZaZT8/aqH0JwSEjmEr5MlCTgtoN+VGG03f/MKqll+RPYwg2FapCkwqB/emYiAoSpGzySrlfQh9UIKABjjs9/TDjtjZelp8GCOJpJmYO3JK9t4pgWaWtEDf8epN4Ob6DR9/8gnzvPBrL/40w/YZ193VWr2Pr4eKn2DOiSiBoy50RLqyfut1f9R2jR4ofXw5MGv/PveuFXEOpyCt/F3XHWuo7nl7xXunFah42yJ0URilMhZvd+28CTRxYKX1vtNZOe1XSquyAEIg8dde/H3+j8N//ACEtmzwEbWi9uWcE6xvvf71FHBTihxpxsU7Ia0mfP4MWcesBXLnQO9JKvq0n3Oiqozac8wrTxQRJFFn68HKK6Ko5BBVoGKN1xGIq7f/FTteZ+c9NZc8kENGNg72vqX8Sn9Hn4/c399zuL1nf3/Pm7s7xvFImo5MUyDGzDLu2b++ZXP1is31jm9/+CHbeMXP0vtmBKOrRU3KbJysYVkBx/kcLSlX0FrkyFOKaFvd8imrfaECBh9MSVXaz0faQ3+lWZO6sMwTn/74d/nhb/893n7xhnkcTU7GYhRxj0CyggyhVBFEDYRbx5nnhRgiz5/fEGNkOh7sTChfhIIZ70LSakQBO56mr0zJC8j43NvZZTbY0ce3rOuS+xwQNt2aRyaynnsmAurrQxHEqzhHWTP1S85OCIL0A5utmbnmaeT17R37N2/oNzuGHpPdbhTX1DEva9GLImsrMKo9aubXdaQKpk8myMFk1vV7sFSMCtQzaECC6XXFyFO9vv7u6qDN7i8gcpXf52spxsCSlRCVhIcNBuVwvOfN2zvmw54Q4GrTs9lsiH0kiBlecTCxtt2Cs2aiT5aqrnpA5T+PrdezXLHmu0IV8InUsMjWkJVCu1uLgLe9k5bM/d1bjuNkx1vMC8sysczZQsKTpWrkE6OcRxWx5vKvcoMK2GpExMnZeBY+XIDaaohpdU6/zvd2EHOaSOwszFsN2pZ07GLcsccFRCJRxc/IXgHbiTqePdIsylpQrxrnK0P295LqqSvb8V30DQFtnMSBtYuyJhLmTDd0LKrs93t2wwDiBUHE47y7shWVIqIr2q+Ta9YTaujl+UZb7wV8w9hDyvllOLDKeWXqz7qdV/Dyw7ZVLZlxXWUGgzxmN6dMStazvIzklNAAV1dX9H1PDNEYdl4FVSkVkiv3OptggYiVf9VsgjVI4P333+e+v+ewP/pIrIplbn63g7rVgRVetAJfTDY2Xd+Tp8XaKHpaPGWi9Q9fpF20Dq2xxhnVeHLtyVlzrOEV6zqgKsRZTCpmByHF/Z40k5Zk4RSbgdhFunlmcq/bqixZP3JWy+moQiczLxZitP3gBtGJ5Ec9FAuMumX6RNirktLiIStUkOLLB6GJgxcrqkBK3L16wzxNa4XIBhS2v0v1QmeyLmh2a2e2AA0RQVOqMf0KJvxiXIWyhEa4GqOKXQcamTw2v85fAdYezirqawLPoFMLlZKm3+mszxnoRKzypq0uC9OdFqTkYvgC1iAwZy9xPZGlh5KvIJnC9jKZeYG0T4h6cRc3riR5y09/8ns+xj2f/Z3/jps/+x/yG3/mz/Ptb/0S0cOLvk7g1ktkxoTOp4dXfHB9w3W3e1Lxf4pOlY7z7x5n4S2geqwdaADaCVh4pK1isXwAY9a/z4EbDl6CBCQMwOwed7GQoUYRKHkg9n5ih9/ie70kpldFwM6KLPHpNRcY6J3ft+qBrWUfO304v+2150rekyDnEWqFchG0qxZ0Cj/b8W6tv+vXD5/3tLVb1h/NJTW/o34vheU+uf5E1vO4zkPI1n6tBa0UCCHW8CITCi4vPNf20/y+9f0Kbp4t/Klwz6+lI/e3b7m/u+X1m1tu7+44jhPoTFpec7x9xdvXn/HivQ/56P1bxu37LPGaMfcgfR2vk7LqDeht+22/r+vpnDefjjEePq8P2jgH1S2gewwESkU3p/cH9MyIoLz95Pf54f/5P3P36g3LtBDEPFZdF09WnqpaOGQQjy7y42AUxsXm49mzK/rBqu0GBMIqG4ryGHxNWq5bcNlj63YpYYFF128U+Sxr1UdE6hmuIrbPSzGyqshinjjTwUKV4/bezQjk4t8RutgxTplxHLl7+5bjYc88J6Tv/fri+QjmYSugJZZwuVNqg5hP9LmSG60tfzGesvgZvut+9r3g6R8GZm16Qxerm1dkadalrAp2LQzS6CotEBKPJPDzGLsuMk8Tr9685ri/J2hmO/QM/cCw6eoRO+ZdVAwA+Rs2xecKaGnBlK0jf67ieq7PQy252DKTNtSyWRAlvK2ZyeDyXRpv0IOd5oOimlmOR6Zxz70fWr1ME7OHQCavuquNXthSDGe9dCPBKtLWlZDVZE7tZyg9s75Yl05lYVkTppev+rRdWzZHqHyvcPggHZ2ns0Sk1tB+oPu6UCrg0YY0uHNqxSTNhK1j+SXi6BsC2hSGdfPYuLpFJwO23dhuN9xcX3N/f18tgqZEeIZYUkSyWyUc7RvXAS+vWhlwUeAFLEHU+wFYJcbCkKGUly7r2dovlpVVAIYYkVijZe27ZqErZrlRtbK5WrxY3Rvy8AAAIABJREFUIZCXxHh3YD5ODNsNu92O7vqKnLOdwUZj3VCtcdLWn1CFlWLWjcPdPX3fs+x6QoDdZmDTd+aWTslC21gXigKST8P0iqKlrqDNSyb0vu1No7YLixWoZVRVySubZS1ef55JUqLDy++mC4V1gxVF0ZuWXObJpZTPZQjC4TCRU6bf9Fb9se/pFWZdHCSvVI6TqO3V9zY+fXX1HstwZDnsScts5yQVQZlMEc2+SKxsstKG3BAEXeyNrTS6D5WYi31JdrBjiHbuU+1Eo+hVJcTXmikFqa5LS3gpZ4vomrkpgWKhrLkEDryCmDDsOjsiY6r7Tr1/oSLPVvGvvMQVt+CKYzkSIeKhorjfujEbVUUsrEIlhkBfAL9Ajh2iwnazsXBUFAkQfWzV96hOkJP3GQcPqpYbEJQ+wHKcef07/zv/4os/oP9Lf4P3P/xOVTS+LhIReg28nkau+g1TSmy6bIeFr6r9V6YHoOvsk6cB3Nnf7/CmnVO1TjbhkE89r55ZJu4hUIHgHtxg3p6TA+vRBrD5iXcSEcmuyPmZfa6gGH/1A5/zVIGbVXft11ZXneHEI92+YSvw302nXv/HRknPvreQoqo6PXpdGaeTcdfHZ1AaZdc33aMVCB8CrSfAxBP0OCg5jXxow6DqGIcWDBWlh7N74D4P3OeeD7qJ+P4Lrl9kXnx3Tz684c3rN7x69Zq727cs80LMiZfHP+T46iXXH36LD997n274kDDsuMtXD6rcnQO2FXiGRysDPjYuhZ+WMDltvntsjE/mhDb8/RSsnYQYs669nBP393t+8H//r9zdHTyNQum7QNcXHxReWdOV+m793AoVOMfSbGdU9j1IYHd1xX6eWZZlfXCZmGZvCBbCXlInoigTrj94EambzRXjeGRKix19IWLeBgcDtUEpsizWvVlBqjS6g+ha9RTznKlihTZS5jgeub2943B/z3QcSTmzub5iDZi2e0LsaiGcoHYYuYUZn871+b5fDdwlpFCds9ncWcp8PC1copzIhiKzLezL3rfWo2gVcbz9xtN2TkVHy2oRPvv9nv3+nmWe2PQ9Q7ehH3ovIhYfeHY0t2GXZe+FOuXhZEBcIhYgRwnbb3Uzf0fxiKazkdRmLQX3xLXQGDk3gJ6vf6lzMM8LeV6Y21SiRq98jOGWo6jKE9tLWt5Vxig66CqvUsr1F+9i9W7pqqGqel0G1wlKlFAQ8YIu6x6vVc1FiF2saRrtsK3BoOtwBDxlqGAAtDphtIzVYwamJw14Rt8Q0OaDg1DP4QJMoIDF84tVFyqVtZYy8eYFSMvCcbaQqd3QE7vOjQVm+y3t1XwIVwBzWlANxCgOEm0TuupLtVCohc1pWgECrAw7xOguzjbOmbriFAsVTNnymWInZA9bK+BtnhMhJdKcyLPF+Hb9huvrDdthUxMbxYtYNJpDtboUQTxPC+NxYZomdrsNu5tr+n4gxsiyZCQncwf7Csv+xu1mqXu3jHN2TxaFqVkfQi3l6wy6WFv85vbcvHWubVRKVabgVrDgG8TerVVk1rXyqJLqmpyEQJ4XxsNopZJ7WwtZIeepAgYRalWgUu7X7GC2eZd5IuUdMW6I1z1LOpLvj1ZMBCil+9cjbwpYM8tRwZIKzqTUN7cznCD00iFgVSqbnMYSalJUXRGrlmn5CoWNuftdjRlkLWErFMllP9qkxeyn2wQrIR1jQPFjMtI624IY0nOAlT1vUMs6E7PY5mCKfnnRBLXyUlkPVcOWtPbcw48FmDClP8ZA30diUJ7dXFULcbVCqed+qtJ3C2lZAM+5yLkaIoxnCJvthmmc+OTHP4Ag/Dt/5T/jevee53fIiar+88A4EeGjmxcc5pGuHxDBi+P80SDbadmNFTB9uaftq9FjQK70/7zNx67RspjLdboKUDugmmq8kRCQ3HiXcaEmUPIrxT01pZgG5FpuGUperj1NJLoSPNteCjaDWvnIOg5t8Yo2D6q95ssA3QPw9cjnj+lnrYJxXsGwvefUWvwojDuBz095eqri8aDN8uTTTrYetvU6edB+20Yxva39b5/XKlBr379YhtrWIFs2N+/RbSf+re+85tWrl7z89GNevb5jSTP7w8z42Rfcv33L9uZzvv+9X+GqH7mTZxz1/2fuTXdlOZI0sc/MPSIy8yz3kpesItlFVnct3QNpRgIGgiDphyQIEKA30APoHWcEQYCkwYymW5oButVbVVfXwmKTxe1uZ8vMCHe3+WFm7h558rLYoz+MKt5zTi4RvpibfbaPJwDtzcpVH2Lef156/tH97ZEErhy6/OyV2X5NmdmMwvrd0nmL+3OiofsCiBZd+OQv/xQvv/pSi0uItqIYxlhDTvWMtHsFU4A1uIdXBOxycxhG/T6kKZIO8sUAvtMakebG1eJezbBIHLC72GHOBc8//wKcVUGqhuaOFjzHlKqoN2xkdJG96BhruGo5pVcUoBDu7+41lPbhHsf9Acu8aOE2fQhgzeard8x+CgTzvCBMEwJ39I4WjwOcgGFiM2ifnoaOdk7OSK1ADgFKAcXYMIw0OY56MrR4TIiekoPuSb7OWuL9fr/H3e0t0nxEJOBis8HU5a0BsGrRjo4UT2o+m+LRHlcKqBUtOmE0NeyuU1ROL1p90aNwtJUQ6m0NaxhmEquoXS0lpDQGDzk1vuD7w1YZnAIj5IAFeX1m+l05U7hD0PHvanxsP70xNq3et+90sJHMmCFoMsKWCiAzDKMZOlCfg9WaE0WAredcd4/V5+AqNdWaC445Uem2+/LvUdJOr++M0hYLwYtjuAtRLRj2d1FvQTFBlUvWUp3QBdCmqAkjMw4Q5OOMKQZM0+SUVK+q4JSM43FRZkuj5UI5WNES5VIUcLbFZXgDaRFzpbPrGCtRC9tl1P5epKGCQ9BGxBDBfJhNIVQmWAoBknE4HDDPM4gjuFxhfCsauA2QtFQCfLTfhrcVwAvSnHBzOOL+9h7TdsL26hIUNLE/nwA/FSKwENBOgBmxlVKQSsLo1VKtxLu3uoSYNzNYeWgvk88MKtYigdULkUuLsSYiFFgzWFPee29cDQyVxkx8hfv9BGSVF5FzgWBBHAYtHJMY50zYJWtCd/HQ2bLg4SaDGZi2F4hhwDBcIFxvMB/ugXlWAczab2ZJqjhoXgXZPbPmWwKgECwH65G62YWHrYGgH/AqmME1P5CtemSRAogmszCohrUSqUXVvWouyYhsL1Rj1VwSEc/IboIfpgRnQLjli7iABghRG8LUfWq9ajoloFO8K98zYc/SrFfRQBFCwOGgxQ9ijBp2eqJclJyR5gVLTmpwgW1pyZViHuaCYSoYw4DMGZ/+8ufI5Yif/PiPMbz3n4LDoG1CpogxTAiw4jeN4DrqWl/nXiUibIcJCwpIlAccJWOiaIBj/d03sWg5+a3PH5PV5/5xTL5+z85yAwXyxsGsvAc96ukREmCggtb3Ii0QpQq/tKfJiXCTx8GdDvaydmG323lYn/NRVeIYLVymV9T0S30eSnuE1HYp7i1+fCzfpPa4/UPOfLZ6hszzEsyDnHLXrNhWnqh50tpe+L2oFhJpz9CFO1W03Yvg4yh2/ryoy1rJehxeeKoEPVbczr/fe7tWStAZYpolYM4BoAEpbLF79y389Nk7ePH8a/zus3/Aq9sH5GMCcsbxeMTxfsEH77+DZ0/fxefyDBnDo3v68/px+Eo1QOefwUop8tdA6jEqRUx+r+faE7vOMz963qkHuyqLIpjnA37zl3+GT/7m3+F4NJ42aIEJ5+0lZSTzvrXVJjB16QmMhoeYEeOAw36P7WZsSpXNh4gQCpA9XNSEigTto5c93NGgOCPjbr/H7etbBCFVJqF56bmcgH07t0qnJocd/BaLKLKzRCBEYlBW76HWETAP+rLH8eEOx/0e85whRTAE7xmrYNfbL7mXo55xy2P2nrT9Fdk9+lXQtPxarPeNbX7c3du/JgIM7GdOFV0BICXZyvUft7Bxi7xq7/SMkpBSwv39A+7uX0OKYIoDNpsB4zC0AiPei7QR4ck5PVUIXTE4LX8llaG5MUPtEzYfaeOCeKqM4z0YhuvuJ6sVrEpoxcMGQsmIoeHnZhjnOGAkRqYFKWckM/yvlKJOLhBpFA76WffgCMpDXZ4AzchQ37c1XBuU9LNu3IUZFbXKZYA7Zuoudh421REYMWrtAEmo+Nm3WrqIBTXmK2/R3H87dQGQxcf4Zvn7Tdd3RmkTq2/uzJODrBqKqk6gRSckZ2QHelUL1w0cB3VfpiXhMC845oxIjM00aVy0EyURgKzMpBJyL5SSKWJszyaIBIByDUdkfy6v7TVKJC0ZU0kggFg73W+nTa3Ux9Dqiupx05/ZQKg26ku4vQ0Yxwnbi63dv4W9MREWaXkd+roSWwxADAQSBQ/3N/d4uNtjmEaMIwHYNWAEdeUKe75Rdwik/RQIUmGMwPqU1D8tudUtfsjKiDlAqGDJWttqjAFZaK1Dnd7PQIc7njs7ps7TfqpAsNy6Wu3CvkFaKMGFSR/G0aLDzUsEuw8TpCTcvHiJYdpjd/0Eo5Xe3VxcoUxHzHd3KFm/xUEVDrFQLyINlS2WAyC2P16tyidHIIRAdf9PmbaWxhZjvoSlkIYWhaBMBg6C25SlwO4HVRax1lOpFCDEylADOlDZ3ciXMXSV2ajSgrTv+M/q1vDcom6RQVYJzi3DGkbJMFDP0N5ttg9ZtHl7zlkrbhbBcb8AMuua9x4g1UI0rMcs1+NQQFkV/WkcQTzj45/9DA9ffwrwvwLFEdvpApsf/5e4enoNgLCNAW9dP8W4vVZIT6yKHMH+9umQS4QTWtVQycWEaKSAY0mYWBU3zxw4IfH1GnbqmEBQrNl738up8SjneSdAHI89J32OmodQn7638uScAJlHr598vpDmg/rA1PAij77qY/TQtGJC3xWekguChUXnVJRXs1mIit+D6/KfCupTVuLevOYVAfpGTs4za8XKdjRrfpw/o+qZ3f3rnGwz3EPO1YqnYdAiyuNr5TpoIRv3jotY4Ybe+kztGUWcD7TiU6cgps5v9R5177Xfz+WQnOZ0re9Lj/kTmhxqfedaCKFU8NeuRQJeY4fXtMMH7z/Bk2ffw4svPsEXX7zA7f0tJAkgt/jlr2/wzrPXePb9B1xfPcM/zFcoWLf0WVm1zZNxWgmvt7q3c2IrbDQaang2wZs9g4wvrZS31WrZGLjRRVXkCpZlj09++Rf45G/+bxxutTlyiIxxGmtBq1R7ZTrg9bBN2zf3nqEPhCNkyRpWN8aOMKkaFBT/9muATvkRIBcUIaRlxvPnr3DYHxBCwDBaLj4EWSySyHi9pxHkJEDgZojxjxA0ioMJ2c5aFtEesTnDE63dFDGOo/U4PaIUsZ6ibozRuRO5QdOMAd35Wu+E/XQjPrWmxSgwAwig+dmWSnJ6F7Kw7hWNN1rT24XaroA6pVhMVuhITiKDXN5JQWDBJg6IISIOATGEqrDV1JGuUlwzuvgtlZ8ImnLAKFXG54IOx56c++4lcQ+zDZMhyNRCSUEEYm0XJCl15N/xHVGg4SjARozmHPHzZRgjKBWjaISPBDW++hxFrFqxe71BEC/5Xxehyb3HgknfF8cmj4wr7fvtu62fLgEt5LejjcYr7FuBMATtFywUrMKltovKMN7sxjuFqkjUvIAAEFkLmBQRy8MzmXgSMfFN13dHaQvOVMVowhMnLSxR37US9mpNhBVfAGDWMCBlqSXJjQUgzTNmCBAGCIDtSM1sCuhmVTrQg1ZKQUqlWsaIAArmXROrXtldawFlzN4r8ClX08MdBwwALrFFCIzD/T0O+yOWJddyrDlbKTArbLE/JMzHI3aXm0a0UqyruofiOSDyqpAqyNiYQhQFS7kUpOMR6SCYjxlhiBqeiXWIVk9oMLBTKwrWOevPBmT9dVOu+t57dg2BLUxUGYzfI9TeI/7I5nGjnnHU0EnSWO9+PDAFqg7HmSzAZiEpmdAOeBPIYvMt0EIILtSX4xG3z7/G9vIS0+7SQvgGLV2frRW5KK1x8N5umvjN2jCvE6ImPL18vluCINUCCqAzYKBj3IL5cMDdnXppmzXfgYV9lqn2U2NhBCareufPqx+v+WsOEg2FaoSyFEjwEyStvLgpZNozxi239EgQZhEFm1JW+W1ujVUUUCy8QwUPBw1BSvOMaRqQUsLrm1uknKBpv3mlwDiwECYIGJfbEdNmwjRtcH9/MKOiYIgRl7sdXr26wzRtIPkW93iNzYt/Cfr+2yCOeCDgbrMDvf8niBxB0xXG6VKL84SoxW1iwNXmEtGBS7eHHCyBHoRFcs3zOJQFA8dWzQpOd76rsjp3rkQVCJJkvL6/xf52DylFK52ODA/74RhAoqXBvcVDCOExxuzuvfKy+eu9u6ofntPKiaayUggDgeKAQMGK0+BRmEujDPfAK6fTQgvSmvqy5pgoD0yV+FVZmbU3WAsaXoEa5Vf+h+6JtsCgajRxJetUMPbTVD4PcG73Pb3I7tOEezvhbiHuB6fhdY/vE9DzWK/A2wGOaizpsn0MQDelsjv73fPqmtTxrtcHQPXInYYO9jnSNphHYGntpQur36XjNTWM7sSl8Q/HHSLv8MEPdnj69Ct89tkX+PrFF5jnGYGAF59/ivv9Hh/8IOG964IbXOCIDQqFVVVHN5KFEOt8Hl8OjPrcPQDgdThtPR1q6HOLfOkOqD83hNj9LVB5p704n//mb/HFv/9XWG4fVGELjGkarZCCoCQ1RgEtJ7PycTcIGx5i6eQymawVQhLqzpWYh//k6NueDQG1oIcaoQXHeYbkghgCxsB1HZVtuYLQnlAKTPErlZdr1L9XFPB/CAWuYAEcyeSA1PnFIWIsugYZlicEsnu1xV4ZIVyB7iFJ2xU9tKrRNCplVOM6KFhYpbiwtw/p94oUMAUUyViSGv8hXnRFxxDYsZaHdvervfZEBwGyyVURxT1htwFgRVaqQur71IqYUeUD7kkncxAICgF9H0cY7vXsit5j3IdGEhHAAipWyM1eq42LSBUXTaeISstmfKIaCtjPt/rnVsaRhqeaksXEVvhczy4LKn/hoPWsqXSmRaYz57gZjtx71htjAKx47DnDFGSNUU65uyuNvYyuPXQBEKz/Mun8smRQaTyZmFBI6c3173omzagtZ9pQifGY87zr8fUdUdqoI9T11eeTMRPGIWAaJ+yPR30fJ6QkGYIAdymHGNUlWwpKOmIcB3PRajJ8KaXbuqa6LJZTNgwaPKVUYmN0b41b/MQsi+z3QFX0ig+ykrhakIZBwy8jK9Pa72cga7hAkmCeOOvBUjIe7u9xcbXDuBkeMTOPN4a4T8qtS/paIECCNjFWL5JYo8MFaV4wWHLlqXWc0Y8favX280u+3k3pcQVJQLUdgH+mt1qSAZQ+yqFtvc6J7cUu4MEfWOdVANM8pDGBDjCy7wMr0H1kbXXrRn/wCc1CCJgRoeD+5gbLfNQQ091uVca1wGP6HaBZz7MiNl5bI3HAabRh4Z51at0cnQHWvDUilJJx8/oO85JwcbXDZhhQ4+xhSbT2s2EBRiEPIaw8x2SghuLUXCRygOiVOVFpvMqIDuyskCKad5YJoNLiw4mpKm61iA1IewyFgBAZwrUjDspxD7ncIcQIETXUbDYRyIwiBYclaRUqIVxsJlxeX2KzvcJuO4JQcJgz6LBYTz8CgTGNBC2/nbAZBzDUY353vzcFOODh4QB5/m+w213i6Qd/CNldIRfB58sWHEdspwHjByMyBewPBzwcj9jGEaVkbLZbLfBCDA7APC+Y5xlDHBFCwhCielWltORvBkhMcHqYm7R8lHme8eWnf4Or8m9QlgWIjBS15LBwAIYBMEVt3F3iQP85ZPPTWk313HWqrNWd7TWXtWxeffaRF0ZTzFC8jlYP/kwRQzV0ldryw2mtf6gUUcu4GzbsuSBWoZayLdpaA3IMFj2x3I16qEPS1x1UmeHnVHwGT5Yppyvl+PfEi3myxt9W6J6GGr7xe37ubOzOaftPO7vzIXkpemCtfLGDDbuPyFrBPKfn12E1FrZS8vS5577Znm/fQh9eCAAhqOfiy3yFy8sJH/34Gm+9/QSffvJbvL57wCER8sMRv/3V3+Pd9+7x3jvfBzZP8UVWzzif0YJ7r3OvdNYQ9P6zrsi5AkuCUNbhb3a3mv6g8uXNfa1KyXjx8iV++f/+H3jx/DnmeQGzetiC5aFLKWqgLaKhVmY081DonAs4Ui0iocUwWj9V7jfFZJqJfr0H9Yp8A84kauDMEFX4shqBYgjVOKwk4kaBdt5rOXZBzf8uXUUFhVrmwenkgsUa2bmpVGEGpogQg/au4tCFCnluWIZUI65050+q0QTibMt4RadcGypS4+Kj81VUOc7Oe/Q1NQhmlJwRwojIoY5bozhc6et5qI9BunPByIzu3lDF1ItnkXnnTsb1WGFrey0eLmhnlXOpPXX7a30ufXBNqSJT8AFBYQIh6H5Y3iMxAxxR0gJkaDuDx+YAX93V4DVE1k+Nry0BHMDFMCgTSMxbZUpbsR5fHp/hRi42z7kXQlrzHsdU673wypHfxJ9c4bMFrXzd8ZtNAG6EcNrT3EPqalvY3oidGxGw+R7E7w2cJFdLowFxj+S3kxt+fUeUNqkFF4DGbE6vGBjTOGKMAXPirupfI94QlCFQkdrsQUDIoi7ZyKM25V3UKpwsrCKnjBBdoOu9nP8oUzQ2bqCVHAGDIaV3kfs3/Tttjh2HAxGg+bdavGCMGv4hAg3rPByQUsZACmiJGQ/7PeI4VoElgHqbIG2sJNUqA0K1AmQjnOBKZ9D8Luqbq0or3U5nSr7GGJCImpm3DkK0bHtnXUGlUfOcorRik7UAyCO+hXrQxQWVWvdqf4ET0tBpMTyE0IVPz6xgnggHLlmakAwQi7tXhhYCY9oMKDlgmZcqsJgI6TiDcsbkSljR+OicBMnAJkq2UA+qUkR826sy60LHwUUDCACQS4EHcXhOWyv/XHC4v0dKC9LVhTYktXDIYqAgcNfIgWjV1qE9hqrwq3qwvykFXEM79dUeBANatyRwC+libsn54ptvaEdEwaSHBlWrrtEbhBVwlwzJGcv+oN7fOCjgAePq6VOQhbvMhyMOxyOIB1w/ucJms0EuwN39HstRywqfchBmxmazw/5wwGERXExaO/X4oMafBEYgwWazwdPrp5jSHrg9oJSM66SN2y/4KZjfAyHieDjg5avXuIU2bAcxFkkYAIhkTGNEzgIeRjx76x1cX12CETFQhJAgG+jJdv6KhfUQCMia15SXA/7k2f+KcvcLyCYjl2RAwgCnEFAYWBbEssU77/4WL/l/QZK32/k0On0kFyoOO2c1OH+demR8XYOhxmoMcdpiaP8wB+xUdVN49Tlgrew4OK6h8WZdLQYeqUZkdOqetN8JjWaZGSnlmpNZTbEGLqh7qACtkpcLcWlgsb9OPT3+k8wg5nOta3oqBmg9Z12Oc+Ci4wo+HvupBSvWiqnfsd/ues7skPdqcg9ZT+dJ1OYitJ6CRyHICljiZE3W89OwVmeEbXEWMF7miAee8OEHG+y2W3z66Wf48uuvsRz2QAj4/LNPcJiP+OD9gj+4IHwyX1ZPF2B8qHRAzf3/b1Cqa5ANNcDGEI/XbmtSh+mArt5pNV9XEI/HA37zp/8S+5cvcLS2ONM01DYD2fJxSyktL4ganpNSrGF2XCs6NXyuhZ09usj82KUp5QCsCMQI9Yaq4hYh4HHQqsX2nDVyaXlt7pmqBk4zotSrhgJLK2hVWiGSkmBRU3W54YZNZraQcYLkgCKkBeSQAYpgWjqeApUPIhXvOeQQsLrGe6VNrOqi57A0IQyQltJv4N9y68YNDocZhIwiA4RQe3yW0rdLaDRMpCkPYejpvs8zc8lqCgsERMHT5UwWnq5+zx/0PmIKRMVkogpxMRoqoFpFstj5ZANcdTRdqUwKDLYQxBgYYRgBUiMBM2EuGWwRZ+5dbfy/dIYQVXS9RU9PSWT/EgmEA8hCIFkMtzLVc6DfNc8es7U9sADCKgrWYdH9eTz15LtydM6+4twLzsPsOx7W3iWDNAUa0LBRinVLramEHWNqdLYeHODeOgEIvZ7Q5oVUgPjmCqT99R1R2mgd5ma5aqs4emeuISDEiMvNBssy16pxDhiZe02gT8hWQtkfFu3/AUFgtTgVgZYkzQGDWalLVQhRAbMSp4WFlQJhLchArKVRc2EQS7dfBYRg4Nf9VhrCJyQoJSBGdUtP40YBdskYpoxxGlFywXY7IsaxegO0elpUIARlfil7YqcRXyBveF8b/EV2hcCUN1bmUpMnpXnh9M+W+yK2/pGB3TBimY9gYiTjkxMqW63/NouFE38LjUoCDH1nePTAyy0bjCLN8lLPIhOCjTGblaS6y4G63zX8ykBuDQUkQoRgbik4Gg4IqCexaMheCAG81dzItORWjpetCI4pHLnof8s+4+lbVxg4YzkcIVALYuvkYTTM1prCtFYKAEq2cFE/DUBaEsj2lUgFKMSUo5yRjgvu8g2W7WalXHtvPZetRM1j2qXaWJlpWxNfRxtDcMsQa283RmeVP7lCx5CiWc8V8KnVtbamqAm+3rzb8tpYq5uBvZJlwcEAerAiKIEmBWnBh5wQglLdw/6Ih4cjJJdGrWcYn1tAN5sNDvt7LEkbxvKoLHAiQjoekHLCy9ev8Gz8PgIKUAp2lIG84HAz4+3bXyOGiPF4j226wS9uBtw/ZJ2nuTlCLPivf/R/4c9++99jt9vhbgNcXzP2B7W4D0OEELAsx5p38fBwh8+fv8b11QWe7C616BAdcbl5gZevMhgZeU6g2IrtuFcVAJYk2MbXeC1J99HTc/q1OCPAxMHLuffOSLyVZdMqY4WgZcjVuKKN1yvvNsHeMEivMtjfVc41QJq9cigxooVvuaCt3gayRsGn4zQazDmfPLdddV3MuHCqlDwGCOcd905lAAAgAElEQVS/f7q+6kEqtfWBoGlQeuZPhmocT1jOrndvMV7xVDesASfRIg0q9hcDFTwUU4q7nazj6zi+8oRHc3cQqjL7tErgm8bvYM9lw+n6zhLx68NTbC+v8NM/vsLF5SU+/d2neLg7gOaE5599hmXJ+OgjwYeXwO/SdV1jQuclBVCYUNLK1r9SSpynOqZoc5eaBwmgenuJWm6vG0NLR7MigsP+Ab/403+Bz3/79zjc7wERbTsT9SyUItXDxkGLL60UMyiPHMZo3jd7jvNsJlRXElm1O1bQ6CHqplKgVTlUYyaHoOfTPjNtRkg2r6FhzarpGZ/uczoBIJlO63tO8MJNihNq7zJAIyikyatcEqwlrb7PauTWnGq7MZuXrhoimoB2WlVM5ikBRoMOmCkAyFrROuv3vYDRCUXCdUsmwWxxbDEG3N3f4eH+Bk+un2qhrZIgHE3JpJWxpg819xDsNYtwHAU8MuifiicBgFYhsi6Ul9uvrFQ3SwQaJsktWoi6sDsPRXXe0wxtyjM4RlAcgFIQA2OcBo12AbAsgnlOOM6pnqdTD/ZjL5aG3PacQM+KecxKazvgXrRi42bj4VpqjTQXcuUZf8x3XbbocsjZ99ApcnXcUPJezElRnJbsfYPEPalUQwMCYYgBYwxAIFDug5OlzuV0rKuraMqID9kdBwBAUc30Cl8e45f++k4obQRgCAE5LeYuV+MWmzVCP9M2R/NHBtPGgXlJyIuWg57N6l3jee1QD4GBMeJwXDBEwhijlqoWYE4Zc84oKSEzY4gaQkliDQDZbRVqVUjLgsOcIVKwmUaM44i2w4AHBsDCNJuFokvcJjLlSoE8TElSog6IMZhFIyBa2fp2ebJ3RvK4chGNuVYKawChavh6yGPkqqh6Iqbz7JL7Q0r10NqgIWBsxggmQRwjsGhhkRZ+w7WXk3vrfN+KFScBlFlCxDyl6xhl8b4oACict5TkLgQydDlORFwZVsVk8DATVbyzH15WJpWJgJJRJCJI0ZC+OOh4c8YwaONghjY+LUUq6Mm5YF4EuZApVgW7y6cI0wGH+3vkJanybOFYZIcSaOGcFYdVClNFJw4ByXtZGSMTaEXMmkqdC9LhCCHGwKqIK8hSZUo06qB6EJoFvmeIHsLmYauwSkcwBmJMUDoGWZmcvl6kQs+6x75tDhQcFBB3Ia/GoNY6OSMVwTLPmKYRgXit+NbLwMWS67kjO/Dn4sZ12BqSMI0b3D/swWHG5cUECREDAbzRSnXbacRblzsgiCrsrGw5pYRf/vVf4vBwxPbiAm+/9QQ/2syYh47mhRAjMNCC//YP/3cgBIxccMUf4m9v/yfkvCg9hoCH+zvQMONw/B1uvvoSz1+8wothC9r8BM+e7PE//LP/B19+eYcAnWMGY3DB7evrYIk8Ib3Lz5UmUE5R/DqsZP3mynvSAXp/r/98doDQWRmlgmkDcSH0VplzG2M/188uUlYRGMeUQZFW1LtCS51Fnaglmj+e0/rZ54TkN73WAxf3Gqv3xIEIr97vv6MxGo/XvnmJHq+33//U0+l9IWvhE5sPd94iSBdO5n8bQJEzpOH8oSrzj1eh/STnAVIrV/ZzavPSSAn3zJ3O2YYNgHFEwMvwLj76wwlXV1v8+uNPcfvqFQSC25sX+OXPH/Djn/wE37smPC9XKFjnYwEAshoppV/ffjwdjZxViu0K4RzoR8UWqkgUzMsRv/7bP8Pnv/kF5v0eKAXD6PhC+XxaFpSUwQREK2vvchfQ9jnDNGLc7YBl0UINZkRV/YfbmJ1vF/fOA16hWsG8nT8O2Ow22Oy2ODzcqyGNVf5zIN/CSgPOh4t5QVypFCIEO1tCZPKneVqrwuhKBpsczxn5kXzzs2lh1eRg15UcXxX/xmrh23tStDiN39yKFimGSfYMW7NGYAbCdV0zMQYGlpzw8vkrSE7YTROGcQRRhLcn8UfwCfNSUjJ1/wR31J3tFCb4elF7X0QQSJWH0p+tOubmF6oyuBbkwXlZ1xXUceSsS6HeVbORgqYR0xDM8Cw4zlq1/OHwAAJjF7zQjp3hXgb0c1yhF1PY7Llq3HBc4WdVUwiKDUz8HPZV1q3OTuhIwHFp8/yZnWGVQ/04hN+Ahho4BGogLgVMBbk7hbCqx/VQkHlBCRDr5VyoIFKAR9EJKc7SSJDUyVv3LrexcWBIOpcf2Mnpb3F9J5Q2v168fIHtdsIu7joQQDAHiF51DRjEevA30wgZB0wpI6cZkGw5UYxGaIxxiJiXBSmppycw40gBBalakNyrkksG298lF0zBinvkgiUVUFE9fTnOWlwjDKqMoAG4Kvx86JVxRNTQFhJos1lSJbGL39XP6tjrnCsTUMHMgDZ97vpGAC0PqWd9evDdM9jGB1NEGsyysbKDMZuPHRYOAy53GywpY56PXS+Kbo8Eq9cq66lCQqwy3Ll8g34mrVBDvW3x8CapjMC/pRUXuftWXXwFNOQhfaKWgaIJt2oF0v1NktSr5rlBNGjiuLSYbUAgWfvWCamSvBweUK4uEOIG26uI5XBAur/Vio3dfvpeKNP3/DV/PbT1Mk5NcOauoQwlC5IxbwbMs6aMyanNb+kWZZRWlKHmCviYPAfUwWDwMQCSBJkaDfu5JGqhkbq2LaTNmaZWzPM1s/eKK6q6B+xKNKCVS0XHmucFmEabCzfAUbxPDDVzt9MuK/U+Do5sxKPK+4DLC8LDfo/yEhhGxiFriXliIB2P2G4iNtstCIyFYLxEtHgKvMl5Qc4JkmwPAEwckVLBZ5/+N9r7UQg3d3dAWhDDvwZB95FEK4eG8SUYv8IFz/jgyR4y3uDTu9/hg/gbvPpaUFLLlXAPZZtzR9vSwqRr7qQp1f38v831TXlKcvLcgA4UEQGkFcKqsYq51mNqgr5TtGxoDqb6IiYOuoagzwhdAaje61Q98atw9AZeT683WTJPFVIfXFUT6w+qv/v3qsX73D26Z0p3PzV8cP29f1avNBUrlNUHxhFRDd2qiolXemWu4e5vmi91Z7LB7naJUC3S0bHt7jv6r2Iyy42FVDDTK0RqiX+TJVrv15pdEx7KiJfhKb7/3ogwXuLj33yMVy+eIx8zjsdbfPKrv8cP/gh4+xp4Ider+xUL3QKJFQl4HIK7er69rLTmc5VH52Ylk4HaiiTnBV9+9lt88Yuf4eHVDXJKGIaAYRx0PYpgWRakpEXBOAbLAe1oqCqR3MJk67h1zR0897JOOp5Nzvv9XiDEEJAC4auvvgJDwZ7iZI+uMdYBrCmgLpWBVvE2GT5cDydW5UyjOCyzjPwfqlZ3kZo6BOVLpYXGFemMHD6SAu3X5p5ZH4/xQghgbT8EAIob7jwPHaiGe0X1pxuvCh4V7A/3eLi/wxACtldXiMOIEAe7X1OuEQIomOfelXZZj0lsvR576dn2cH1GVNmgDqk0xc4Bgv7aRaJRsKbYnScSOGnS3GEUECREBAaYIwLLKsUmF/WsPewPeNjvcdzvkXPBxXYD4bHRwGouOFHjunVwnlTngio7lZ0bryzUeD6o0iyx0+dj3q339/SK9ev9z8ffAxwT13kwNRxkf+teW8Bk7ddn2JAj4hBrLt4p3OU63+LLUXmJY3EGkLxQTh/a7tgYb5ZN/fWdUdqICp48fYJxnExz9Ulp77LIDhhtowNQ46Wg4kP7mOhrwcCVWHEAB68xDlZqFxWAZ1PAFOq0fLFcCo7HGYBg3AzAMEKgTbJj1NCD47JoqfcQUZnVGQ3GbXP+TuuhIdYLzvmdM2+vrOdlgH1Ubq104Sjd6/a8ThjBDonootbD1AOQ2sxaBL3hpncrCwTzcUbCA4Qirq8vtTFkYBwXZZaPlDc7yA2s9cKfDGR2jAq91aQ1Ae3Tkq0UgVaoIkLoSks7g6AzAIdd6YZlJJLz8nZYXfEBoNbIUUsRw5gkQcNp+7wVnwNBy9K/fvEc28unmMYR0+4KHCJKPiIdDvB+a5UqKhhojKUtkjF4mwOzgvwYWN3zKWsOhNagA2BhT25Z16XomIBUUHvK3B7lvPn3bVF7a1cVEjhhMETwElYOeqqS0dEinCkyq+EjBm0W76yPNIZ+WY4QXNh49Tycs4bX+1Jbz25B23VC1xwidtstDocD0uEILhrSlkSw8A0mZrzz7JmWoQ/aPFokA3NCLAJaZi0UFALScVaaGCJkCmAExEwoyCAGBg44Lg9YloIsGpZNWSt45PsN5vmPgXkGU8Jm+zmu5IjnX/4QH33wHCnd4ggB8qKh2N0euqJNBMtjNA+LJ0PL+rStl+w8LZx6QtR4sF73/n0mbZ7KaIKvlulX7NwkvO2Vi6g1EG1sy638pwVHanPjb1DGVnPuwr1/39V7xup8+yWTbs3QlK3Kt4h/r+D9JoHcr/Wpl6vfqzcqhG9USk722eam9Lya3kpPIcCqDVeJsfop3efa62YotDdOR1SHSFSBvEtEPlGu7koAUsAHzzSM+ePfRnz55XMsywPu7vf4+De/woc/LJCLLR7nfJFP4PF61D/Wr1dl2XvkETTqx/fV/u7XUkTw6vmX+NW//Re4+ey3WOYZMWovNl/rnDJy0jznKs+CRgMwaVuR1pePMMSIRTIkCzxkyw02deB1HTWNxKtruwFWiiCXjP3+Aa9fvgIRaZVoB//U1CNHBAJl3x7qqO93VYN9A8VvYB0PTfHIRfnaKZ91QUTIpuz0qRwaaSQMBArd+QqApHWrGma0fo1mDKyg29CbWBIt0IjSfi+lWPSW5l4/3N/j9uY1QISL3RbTMGAYRojlv9v2aSio8xHDpEUE1qa0wxqGkuqhMnr2Y9ftH9Ep/9LIqQK2KKQ27no7PyuCprABVrBmfTU6p+rF4hC0tULQYhq5FCwpYb/fawXzwwHzvCAtCbkIxhiBaXAbQBtQt3cNKTfDs/Mo5mBYsnEIcq+4eEiks3nnU2weWEKvtPVyoueBzfN2KsO6deu8juLKHsE8pHWVTOH2z3fyj0hTcqLms/Vywqevv0qdA4B1RSi7Y3ZnwRuuUxn9pus7obS5kLq62FrujGurAljrZgfOvcCo/wggxQ5JUAIaGHBvC220a/2cEwIK4sCQkmuFQFe3FPAVBCmIABazpE9jQIiDLXwxa551sV80PHOKA1q8tV3meWsFSYwBOuekYjzQNfvQvk0RoBZyA1BX+MEJRkMk/d5FXE41tujveSNlydlJtFnM6lJa2EApWjjCK/oYI1lSQZrvQET4CguunjzBNAwIkbAss1kWlMgLdBsrO6sFOHxIxUqmNwHIzCdJ9Kjv9VcXFFLnB1g4ZlWmTBD539R62WjRKGc8Hiqkh458F0hzDV1pQ9KwWw/prL2famSSfu5wd4/lOOPi6hq7yyvEaQOIWraW+wcce8uoAyhxoW3zsP8xkVnQ3FrTrFIxMIqHmfp73dzbAp4HuI9YxynjcyulZh2syoOvPuc5IEZLRXGD9n9jVgFnm+rZIh5GSQAkFQyTgY6i+RAFhDlr8QR9Wiv/DgdYbiDoj4MZHjZxg8NyAKAV3SBScxv7+XKI2EyjVmZMCmiC0UAUgIs1jXcDCgCOAfOyYH84YP/FF6BhtKgOrQYXl6PyGBOOAKGkGXQ8IMZBGa6oxF9SBuWihW1CABCQjz/Uyq5zwN/97AMcDkdM48d46+nPANKqpL6DAoskzgV5FqQ0o5S8KhxRyKpVii/feU/Q6XWqYDwOeeuUNxuP70+MsRZf0LLgBX16iVhJ6VVtWHJvjN4/chPShVRp1lDs8+NzOlgpaqcA8mQejyeNppzR+hl9+ErvRXIrqjF2BbLfsHbdY85eLdD4vHImBiKrfeeM0v3Gq0NgpQ+htKt3Xvu9O/zYjX6Fh+2nr0cH0di9Bmtl00Re99z1evoD7mXAPyxX+PBtwTiOGAfGJ//wBdLygPtbwae//nv80U83uNq8g8/yE3uqj487wfZ4fU6NeoAailevM60+X7h4IilKSrj51b/DX/3b/xP718+xP85aPn9orQBSykgpgYIaNwJrKwH1gjVeSOLyRj2FpWgkj/N1B7dNzOirTgsVwle6L3i4vbOUCcY0eA+2JucJliqgzF1lP1v4rhnHS9tMA8JS+bzv6ZzUGDyOA7IwcrHoE+fvQftfSWneEd9/j1gQaI9Tkf6cBfSFDogJ025j62ehZ9BcYs+nVUMgTDnwEE/3jKg8XZYFtzevcdw/YBgGXGwn8DAhsuUqUViRjC4Bt8V3cO9rU9ZAXKQZIHSu+r4WSGzymBi1GIenzTi+1cmZSkQMrlWm+ygVqntEZrhrw/BYk6BJOjFgGMc6nnlJeNjvcbi7weEwY5kXLMuCZU5Itnf5YqdyvMuDX596qXNpfTAJjlXUgN/WgakVHlfDusDb6lYjCQgs0Nxwe1RBa42y5nV9z0otuiJYp6T55/wuFROu5Fb7WDshPg9UBXvgiEBaIMeVxYx2HpoS1wnfE97DrJW8dZy0Cret+O1bXN8JpQ0CA2gCSF5tNiAWeoHqScgAgoVLFdK/I/sCO+NtCkGICth2JEAYFBRaSe5cAAqMaHvL6PpssTLbxQBWjEE9MGbBZFIPTipAzBkh9IA5oJGQWFaOeoYgWIUBFVhja8uLKCBNTAcjSLFQwGIxzy2pVV2x6uXR9mTmKyTBGIE5UVM6gK4/iPaCU0tiG0cu1ozbxgoYCLKkSzbXdCnA7as73L66xXY74OLpWyARpJI1NCqnelRg/U/IqwtWLbGFo2jYpSqVgTR3TEyg+Iy1+iChiHpj2mWeSRFIyRiGocvXaB6yYHMBaWioQJkKC0A5YzQPVi6iTF8KhqCHMWcPQdV8shAYGQwu2udGxKsAmnV6WXD78gWO84wnb10h8ASOEygewEvWUr1ozD2lBDPowH2BLijZ5iFSwCiVgTKzlo/3VbD1iZGbhbkYlPJ/yOiMSmVNTMAQ1PMoYmE7ELMiNguvgnD9WZurtuOr97PPq5BUBToQrFCgcqsAVKtjiBFDCCoUi0C82b3RZ066zzk7LXpYZ/fgeojaH66wAaq0zfMB47BBGAblEa6gEyEOE0pQJYMMAEAKXs0JT6ad5TQCKWnKv0hGmAqQCuZlRj7sVXiWor0ge8q0sLYxao6qn0OywY8UgPg4b2a0n8stIciA+fhP8LtXP0HcTVjAmDYv8c73/gLMM0gWFdrMePWScZgShl0HPKUBWeD3KC3d+zVcr6JFu98ZwRJCQIgByTxhu82IZTOhhoD49+1gFmghKCJSOvNwGDY+DKrhZaqoHIE0q6LNbxZuzfjRgfPOOtsrBMpdu9DE7qqKbf04dUJ9rcDVZ9tnHzdOcZDdKzhvvhr3bYpbVUKpkxt2w3+MEi421379XEmuR8qNEw2vv/ES+1wLOa9SA4DKa6fu1TOZUUx2BVnnfZ+W8j9ixGfLE7x3OeFHP/5jhDLhiy8/xcPxAXe3L/HxL/4KP/6Tf4p3rglf5evKd07nfk5JO734xANyGrbkdJBSweuXX+Ov/vzP8fLjj5GKeuO9p6saKwokJTUKckRkrkWbkuWBD8YTtGiYYD4ecX9zhyEwOLrBVFfajXVEZKX6GZkEsy70el8EQMmIMWJgMzQGa7NjZO19JrvJ6p7BFDnznhfWPHERVO8SSGX7/rhgWTK25lkkyViOsxmlzFhGBBI1FLd0lQB0ecoBXDEfd2e0csqgdKHFwGI1tq/zrLgawHWd9NwkmpFRcCEjgAiIIJJguLzEMI4YYgSR4iIwuf5q+6j3I4a2Y7Lztuqv5mHRDIgodiMBai44NUpCB8y1yA21KLl2ABVj6btVkauqhPcirC1iqljpLveBqQkoQMXMcc7YHw64vbvDcpyRDgcsKamyNi9IKVmF3rCKuJLKZ2yoLhdMto+GPF1VFPHWS44bHY8b/8ouW1TWgj0Uu+VJxiDGp/vnNyN3JVsiMwZbzVhq57V9zNQjYXhWMUz5JsOiJNRFcDgPdEODefB7/ouMWmZOgL6Sb+0f7EDUbptFo+pqSkwnU76twgZ8R5Q2ty6lrFb2WvrQTo43cUxZrQCRpMYpc1sTqJsZ8Pwa7xWm4QHaj8QBZyjawDWQKitJDCh6ZcJckHIGcwESIQXGELW89f1xwWC9pXLOmOcFS07YjCOmwRrpsqlXRtwKQbA6Yc3aBDv8Zm3Wd9uH0AvrzgtpHFhEkARahc+0/3kuWFI2pQmre3nyuAA1wR0m1IspckHD0a2sfQtzJDupgYGSCh7uDri/+xwhBkybEeM0YJk9FMLXMneHToFkFg/sozodIh1PzkkPNLfD6GFDmq0mOk0QsuVxBDIB6IDcGawpqa6wkCXtsykWSkCsFi0QhhCU8ZL1sAkRYPVwUGnhEgMRME2QoMpvEEFOqVqYiIBl/4AXy4zLi0sMmxEUAmIWHEXDUKMxjRgDcjb7i4XRKIxV2ohE2sdMpCrdADDAGZj3QRPktCCOkzFFFZaBK2kBfk/Wgj+q7LkSIyagmgUJphDAjA21V41ddZfF7me/O0Csg7WKlMkYFYuAi4CCh5c6QNL5SF5Q8nIWkK5ecvdANTWaoKzlqgUlZxzKA7a0A4cB7ikhIjx9+jbefvYEm6FjhXFCjhd45513QGHQ4jNzquC/WEnr43JAkqS0mDNyTsgpKx9LGVISNnLQNbezKSwgy4mVUqyKma8k1fUSaeeSoGHdyEAYB+Tle5j3f4jLq1+AeVCjCBF+/vk/x+a9K1xvpFrye+j9bQG+fhjGSpoQfNPHpmnCZrMFyhFFBNvdDpvN0BSmnGpKYDUXdCxeLbOszkZL8m5NpAuODw9AGCBlPquA9crUaWhhry01z2Cn+EmbW/NUt59nPTMnqoy84fc3fWb1+u9ZX6AZd0qvBPvwv+1+9s8TsdYv3RsmE1xHP40qPV1SwOJKDPm4TOmv4K/RY0U71FKwWClrzuP6+T7kAb/MEc/igD/8aQAo4/MvP8VhDri5u8fPf/F3+IOfRMSLHTKN9b4aRrcOuzy9Vt7UR8p7P1t7jQk3d7f4m3/9v+Gr3/wcEjTHaJoGqy5YzFOuOdAhNM8NEZBKwXFJ2I6N3/SPcXDIFqUgEGvrgxqO2E9FXKkoYgqVRmF4OfcQ2IqHoPJpIcv5dpkeWA2z3GsAZvA1zz5BYZkUQSoF+72Gh49TxLSd1BBKQBxirfJYclbepA9Wxd75NBkoNp6vZNnmxuRywegRLuXqxkCbYbd1c9omZOQkKDJrqXkQSlQ5GmPEdrfTs80BFKkuPkFTEFzBYtsHqTnpYpEbpdJKMcWuKiqVhtb895T8HodIrqhB9+sMb6jf640kzg+611pCjo759c0D7h/uMT/scVxmpFl7lqak3rW8pPY8aQkTrjDVu4pGNEmdPJC6aKPVGarj99W1eWlVkqqccsdzFf8yqtfLDAUErKp3uhIItPzS9shePvjeNvkAIq/rXvGsVOOE8ysYJrZqrwMQwwCIOjbE9txj/3oZxE7nhhXZcHoWLTjkuN8/fypPft/13VDaSkFZFkjwEENgJdWLlqaXAkh2V3H7yNnLBUVP+GwlygXgaYQUy1UpRWN8lwXLkrAkj+vTxYzM2I6xMrci6pqNPohckMqMuRRIGQBWspvGAWQNBQGYogWsz52XekXHuNvZl+7zfolj25NLCYBRs6yIq8VGgBpy0X+1WmD1BrXaj1vkekWz4nmo8hqifj6VgrwsuJ8XHAeNnQ6kAiWLOqa5gkCdZ+ielwoQRKzfWGOGRYzRiyBrKxDTGtoiSc4oSZUBQBUt9+ixgeuUM6JEJPu8iBl72DyVwpgpI6YEsURfAFhCQKBi80vWq0XAKVu1oGK0UKwVRdCebZbjF5iBnLA83KKkAZILUhEcZ0GKEdOoHgcUzXXKpWhxE2hVzFw0rJdzBiBYUkEMgDeb9PCAItAmzRkarhJmXKSEq6sdSslIVjAlBDbmJ0hZ5zIQUAoZXWtFTHU/K0MrArXUGijw8stkjJFMWCiY5qrcV6uY5Tp40rwansw7Dmk0KpqwH4Mq0CWp9a8vYtLovGNyOvkGDk/zmOz+aV6Qyz22m8va2D4MEeNuwvfefQ/X2x3o+gOAI2jYQrZP6y08DKJYNdlSCnLJGFMriyywPky54JgTaBGMnLELCcWMTSkvmPOMS+wRylG9/ZYTC2Qsx4Cv5wm5JByXAy7LCywpa96mr/d8BBGwm+6wGe4NlABxGCrNrSpunYTH/r4wwUcKj332PIAgzLTBV/GP8eTJHY7zc1BewIUgZGUPClAY8CbbpJ0gAdA6tJibdRmkhroigvu7W9ze3OPqyVvYbHbV0+YKVk8aj5UpecQn1yGyZ+bfAbhvA/T/oy6xsb3x7RYi7X/3z63f7XFhN7bzhg6lYfdkUAeAKtjtlK+a6ySPUsMqSG2K72oA69dOFLa6b93fK+W4+2xfpIVAeJE2KBH46EcfoXDCl59/jSURDjev8Pp3H+N7PxjxMH0PCcGa9mbNZ2d6tFQe7r1+Xmedpw5m+HxByDnh7vUNyjLjagq4iwN2g3r3ShEz3GgEBoWoBjdfMxKIFGy3A7bbLXLKiJGt6q/m4w9RW1yEQBbyRQAXDd32vmZW/o9DQMiioNOU3wxBCIMiAdX+zDhqBdIMCxNFFFZPDhGBo5b/pyIWzgnN5enAs/a6LVjmDBLB9ZMLXFxuISmjLAsWgbYzKUUxjRciASw8UJSflwQmxjAEFA6IQY3ETV8s5ozR9RFopE2pyboWzunubSlgElMACpIIGAUUtAJzShkyDQCs8rX1+Gu+9p7A23zV89bXdNQoEA6k0Vv2mlhuttPQ+csVAXQAzhWh/nPSKUrSDe0cv+h5mam2K51Rz3I6HHB/OGA+zFjmWSPMUlb8kkuN1HDFXtvsGIqsScmtAbuO0c9EU6J65bQz8cNV7lq9PBXFxaTKWzQDjt+JA2OIXYVG9DKo41n1sSenWwCv0O5roPi2KL1LVsWQyQTnX1kAACAASURBVPZUJ8a23kQMBDX4BNJq7sSK69ch0xZZYXxEc0A1tYhIlVOBwikOZoi0KfQ8VSDqNQLcEnZmr9v1nVDaOCbg+neg8n2IXGLd16JdJrqacgMFcGLVg4hCFTj+DTErvJgiVhk4s5bgtAICIRcMw4D5eMQ8zwr+Fq+qCNXOi+nUwfp2Sz8qC5GbZyt4ocRSiBEjYxxjHXszOHWWI1HrO5jM6uRjbRt4Vlmzv4tYPLCtSY0zhlYZ8zWpzZB75c2Ft5/KHqxBvW8Q0Zh3Zi01bEnQ2peaNO9EWjx7sDh6H7IzIgJqb5lwatkEoCEGLTdQBaYXOOlAmH03VDpY36lWcWK7F3l4i6417FmNI7cwS4+FZ269ZBaL1azJ5GJCAhpOoAnOQBgGMAeUpBUoAQ2t5JSx5IIlCYowypIxbbcYtyPm/R3y8QjnigXkhS2tSqF7bS3nz/eQvQWBh6KqYlVSwd3NHdKyYNpsFADD6iCRIINU2dTYNHh2kaaeUUvYbqSve2/MpFhoB9n+rEIXbD9WoWi0Vh38OyEESDBLKExJlYQ4DJBSkJYFYYjqiRcB5dwqevZXJzckG7NmAhUgRMZmmnC/LDjuDwZcLxBCRAhjnQee/AC4fh9uIenFQHBQSYwTHejRMATALAlj7c+oAkRg4RFSMFIGS8ZUsuaAlASRguUooKR8ZF72yPcv8Ku//ltc0AHPhhkegsEMHJYF22VWpVSAr159iBv+EE/HYcX03wTi3+SNWk3ccQM30Hr62VlG/C6/j//i2du4vbvBw11GzosCSbFCFiWjlKYsCtDyN6DAp1jijpihZskZ+4cZx/0Bwzji6Sbi7befgRKfWFzXc/qm3Ls3ztfnik5hOqExMkBy+tTHvOebX/dnnI7Rx+meFfWMn89tOx1X/5lvUsb7d7wHmY+1FWItLcfM+KfTXcOPndnELYy9pbF7fVU0xuRvP3b9/zeDlH6NXucNML6Hjz4CkAd8/uVnuD8e8PyLLxHHLd77g4Bp2uLX+w0Oy4InO0boDHFOI/v9HpvNpr7WlMZujeAQXvlyhhYmuz/scfX0HQzL+3iLJ8zlCCkZh/0DXry8tcJmGiLlcligLSviwNhd7JDM+JOKGcpIK1GWXJBYUDJMaVOekVK23q7Z/isWQq75vnU/K/4hlMBASubZMCohAIcZEmNV9GIp2reUVH5wsSiXlCtxJAH2hyOWeQExYbfb4ur6EiSCGRnJvIFUxAxk2hIJIYCyhvaTyQGVNWbkWgrAAzgvAAYF05ZDXwpp9AqprAvwKBNTVDt1ClAPh4SIzQQ8HGfcvXoFYsLl7kJzZK3IBFu+bRVRVVQJuswrQIK1L2l0LKKG1JK0uriFgdm587C7xkTbcxTHnJzCzhjWzlmj+yY7zp2FUw4jpnR03EBXphSU4wHLnDAfj1jmpJFDEGv/ZOsNN5Awlpqr1+bSQkdlNY82vzbfvoufOmsFMCO0mBwErOn8ykOh+5RyxuA1KaossrVi5+uEGnnWCy5XWF0iV7EltsNqvNbgJjXkA4pzJasSLmRGAmIQhaqUgxkIakgoWb3qxbzPsW5fNoWTECAonRW1wLGAQgm2eZ0UPv7G6zuhtIEEYVoQkrhJBmf5eCmgnFVTr5q41Ka2bnFa3Tpo3DCtzc/toNrrHFUJGbHBMKilKoYjDvOCpWTc7WfsNhsECIYxoqSMglhL93qYWrEy5xQC9sejNmmWgNt5QeCAzaS9MgCAeoIjVIVN31u/5fRXraLdZ0s3aYHqguRM1wpJ6D1dgWvWi/q94tZVdkmlAi7lVkSkuKKi1hiwNvsObCF8wkhcVr5SBwQny1/5Qa1cuXpD/2Xqk03PICZoz7ZApL1OeobmBUPsbiw9OKD2L7fvEXkPGqoWFbKwNgKBoV4wka6HDzMQtOF4iAHJrKcZESQFxWLtNUyhqNereBuLjBgHDE+uwQ8P2N89WL5P3TCQ5fkUW48ay25ro4CiEU0koAQNTTo8HLDfH7VHoS3qm4w4rq/7EpecLWzUvKseJuN0IaosouRH9wHalgoxSLJW/TSDSbEQ1WVesNmMUCaHZlARXfF5TtgEZZoAupy2teDy0Pg2hmJ/qyFnu92AIHj16gb3d/dIacb11VNAtjrQy++DOoVtNZ/zy3V+De3n1DJ5dE1t/G6KEgDJT5vAYLcg7LRRvZgn7+72Cvyr3+H18QG32YU+8O7uc/zonc81V5KBVw9v4+++/O/w/odPNaS3rgOtfq4UlE4ekxl13OtR/SzdZ4MVMlJQ0INz4NP5A7wT/xN89CHj/vVLfP75DV7e7JvxyR6HkjplYQ0I+vUCzCCBCELBZjfg6Yc/wp+m/3FlJDhd/LMKS6d0rh55urHOj9604342pM3fA6n6r1TvVf9V6kDzmet03GowKY/25NFn5WQvfs+1UmI7JUtg5/qEQXgoudiY2udFrWVlbbA5ZS4V6Po8urmc866dVcTb4O0zwE2egOl7+MEPC+blAct8wJKO+OJ3n2AaGN9/9x1cPiR8fbxCAOHyYgcE9dyJaM+0Fy+f4/r6KUJkbDdbBFbzH5/ylm5zGYSL3Q4/+cmPkD/6EPnzn6Ec7wAISjri/v4Of/3//SW++uprjQAppeZSO74PYcRSBFMccJiX2uNJ+35q49+haGakpyUAljYQtMKgCGlE0Jyr4dgrGidRoCgEIGuLjCyk1Wp9VtarjQiQwFgA8xbpHhMBeclV9hUQ0jxjfjhAiHCxu8Tm+gpLEQQBxHKGUtEKrwmo3hsQAcELhFRqA8S86RBIEkj0FdcIEiICBTUsimhES0bBIL4vlRIBEHIJCFH39tXNPY6He4zjhItpwDhNVuCuyc6VsaSntJXhQZWTUsSchmZYtHFnEeXXlQk85h1kfKnJxcd5Wfp6Z0QhAhfN3Fg6LPYffwmIA5gL4jCg5IKcSm3RIEW0v52PEYDWGNAPtPE6D+q9bo/ni0791RoUVM3wXqisFA0zrOkWPXsWoBR9hrY16fEhagGX+nrFj3Iy3nNXw78EaDSYG8sF3Zkz4K16m8LECmL1ZzbHQPGzWlcPAAqCWC0EoBr5pRtHmxedI503Xt8NpU20NG5KGeNKX6M1GkM/6RZ/DzvYVVO3uG+xqml131f82ImU2nsEhIGt0pG6wOOwoOQEh1YhBow54pj22rurlFoUQsvettLyOWcEYoQQNKxgmbEZY8MNBWoFs+IpcnIKOoPNGlS4vO0ADmz+DRQRYOWKq0fQhWT3gJrTVr8llsumi6Y9l1pxFhAsJp4sVyoiJ2tmyW3AnoROgLnaO8IUqfsag3mAWENBijTvWd1v0WIU0V2JPTHUUDgBlebq1nGShoT6s1d5PrYSRazSYVvUvucSAWAEA/QeUoNqAWOOoDhA5gNKKRgGA83WvKvuRj2YAqYFUhj7+z0CA+N2h2G8AF1HyM0eJR3hzAeiDcCdhNmAbfVAVjSp0/YEYmbCGANSzlqQhAitWt8JnVnvY2lLaTmjniunwt0LDmgp8KIVLB3IidJK6WjJP+d9owAtIKSeMLZE/FawR5swt3HNhz2GQfMO+zy0/pjQikUojUuhWtRH+UHEZrvDdl5wePEKh4cC5ltstmppF9KQzPNi9x93uSA4vXpd6fQLPTAkKKgCAeM44u233sWru1vkNKMsR1AAvnr4AX7+5T/HP3n/LzBlwc3xXWTeWRhHazbcK2WrB6ApavoSrcZRz0vHN92K++h+AIQC/vzwTxG2gh/9yQESAuLwHPd3RxQhRC5I5YyLklDjRBiAOudZ82PHAdvdJXgYEK9/gL9+8j8j09TGslowV6Aev+6Lv5pvD9ikvXZaiKL/rN9u5dWqoOHx1E6v36ewef5W3zpAn+veljc869vrbPV5zfDXeZXl8dr4a+556/OFPScFaMpZG1Kn7JDlqHQK2psU4155W4VUdlP1129kh/euvo8PPzrgcJjx+tULzIeMX338G7z8+ktstwHfu/wBXh6AzWZaNcqeU0Le3+CT/QG7iy2eXRU8ubq2HKumDpxCBpCWpr+eriCToFz9V/q5+R75q59hHL/G1cUlXr98bV51sVB257uCLAXjMGrfLPJIlX7dLfWiUyzqfQxwllIs34qsR2eDCAFS5ZdVOEM18hYBBbY8NcUFJWu5ebbKM+zthpg1h65oSCCKgELEOI2YthNKKRgjg+OAbJEXkbU9k3qNXDHUSAChtrJKUz5nm6cZVpQPRwAZan2mCvmbwr/eFAEBsuD+YY/bmztQKbi6uMA4bTAE5SegFgXyJvoiQq3Oqz1BBTywFgDLpYZmr47cSmgq/1pfa3nl+6k/dX16JcN/ZlLDnhqOQ82X7D3kp/d705UtoiZErXheUsayeLVOzYHMaB495r6YTzsFp4pn/3sbg+5p88qR4iuyYmfU4Rf4nhBAnYyvIfCej+kY1oQSrZVcx1U6jtOx+gc6XCLZ9olqJfJznnaCyQXE1XzFvIUksKiw3LCvuFuDTdlXw32ywdQzUPWPf/z1nVDaigjysnTa+3kizCAt126AspKJxo45V19/6RSnn14d4NVQSlQGx6yJq4i6TBzU2jxExrIEHOdZizoMAYek1iwPcYkEHEUs0XMAseZHrQCDKWz98NpZkTafRqsVZjF7Un83VeqS1hHA1MroNouPEqjninmoUa3saJ7O0xAe728CE0ba8BgAWXEMGxsFrtWdvMcPRD2PgFoEq5ApQIwjtlMECDgsfWArjGE7oCxtkqUlKCtINyDCQKvaVBHmGsydXFXxJ5h1mxAQTZkwwWLJo4U8dFX/bon4ln8hpSrtS9FeOWQatoiLHb8K8rLHzcsDpuOM3eU1hnHC1dMB8+FeizesCEJXxY+9Cm6yvD/Uda5V30xIxqACS1NBzWtoDNPJDFAh6vkO2a3qpGfTy+C7FSoVW4/SQpE9x86NBKpAtnWvyp0Sovp/a9UotSwHE1AlLQCAtCxIOauXx3LX+lw+fa7vsy4CeSNYB6RMoBjARXNMXXgf90fc3x/wjtMB/v8rbOeuHvgRUMODqAMhDeRK3TsGYxxHfPCD9/Du8R3ksuDh8ICUZrx69RqfvPwpfrj9Eu/kW9y++kgdvsEsoJ3RAcBZZQRoCgGtFH+q76GOaA2oe09JDxj+/f6f4T/bDrj68Q/xg/f/FstRK8QFAkJJSDy0ZxNrie3QxkaKRA0wBnyS/gjjMOJT/BRFri2k2weKFU9/0+ur+Z5RpntjgM/v3Ge/rW50Cgj7e/b3bh659Z1LD0ZEw2nq/c4M4putyuevyld6Y52HnHbK3GNZSvWz33h12Q2tcXLbmpUB79z4Tj7/pjnsZcSzd97H+3cHyLzg5d0NcGTclTsEusJ7zxJyyFWp9+uKF8ThDr+5EWD4PnLKZ5/BJ4OsZ5b6cREwXoCf/QThcMBmHDBME0padG9qbhc0ZNvD7kkBs0jjl+286p1VN6YmfwGt+pey5uUytBsKGl91/gZR/ip2P7HwY1/fqryThw3WIaqi5R/vcEMNSiHCFAKIg1YcTMmKjOjnSTSPVciqMTtdkQ9E567OErZiDejeNzjv7YC8OBepnMiOY3wLSsLd3R0OD/eYhhGb7QZDDNqTzOjPVhYVb1XvTMfpxIpddHsuWUM8H1XmwSmuoDMK2/nz2YdErsbSPmEyLGghG9Z55LQAKG/kUetntOPLnu5BAgSTE4Fh2jQAaf3kBI+L3XReNrxRCe0lnefOPl6z+h3qjPzU3qse+6DYxeW537c+Z7X0HTLu9AfPsyfux6br3s9IF0kNxl5YpuoiROBIiOTGeHNgQMv3n6jwtqdZ14lZywOwwZPOKFRxdJcjZ6v7jXgV+I4obQRoHCmr4sTRF7mLw4aBTheuuZj7vdemT+7r77Xb6evmPugMYA2MG+dxJc6JqAdCagmesCyzMhNmzOkAt4iWzBoyGAIg2sOMYkTw+qx1qK5gtDFrnxR9T3GoP9cBnX2znaHzwq0KTa2I5EwJQHXlimn+q3CBzuOlzL7rz8UMBqtrvRSwOkHq43yZowl1VdocDnfx21BwcjjMiOOEeDkgxIAwJPOOtjYPIFdAzntcuTvSnuTpZ5iLQMKJZdf2UxVH/g/UvVmTJcl1JvYdd4+4S2ZWb9ULCgQJgENiaBppRlzmTTLJZNJf0F/Tw/wGPci0mYkmyWSSaSRwSGGIoYYEiKW70d1odHVVLneJCHc/ejjnuHvEvZmV1Q3KegLoysx7Izx8Ped8Z5U4gUZQPWftJLZ5EKKfc7XyEiQIWjRzKEW8vfdAcmXOTdMYNDU61HLJzDjudkjThM3FBVbbC6y2G4y7vS5WLvuSYASyCpikHWRlek4JjYEumRPrhJYMAGPVETxVBmBAWoB7mglsc0ssUI9NfXbmYqd7MpU5W+5RBWiz4G3Zdy5IzR7ScQ/DiE23EgvfGS1jbXIudFfaqADbe4R+BXJyDruuA7sA76u7RPPY/a95xH3nvrPxP0SOyxrrPHZdh2fPnsmSsCQ5GKcJv/zol+CU8Yvrf4YPLn6IHLZYu5VkUIVajB6SeJurZNe7h1ksBYOlm/KS4fz18Y+wwvfx/pPvl2QPDoDnhOhCERZranezKHPZQ3KqPD4a328mfN43R24GcGaWNHOHPMEc87621wmIKkIkz87CUit+dn4W95V3KNi1ObB1Lver0nAuNZ4T6r7aZaCs6ewcuBVJ5fz1kJB4MrdUAVuhwC0Ztj4t/m4/O3effUYg3MYeE23x/gcf4Hg8YvfxiGnYIyWPuynicJxwud0jFg8J2btvdyM+zhlP8gHX0zTzDmithPdZBGfj1DXk9Rug1RNwF7BebzDu70rRbAMFABUrRrUmUPkO1IgH6lJpcoi4wmdMw1Gs6cbPuVoEiUiEwMzKx8h0WdID5ecmE5nAyGgTaKj1o6HpAEqiCuecpsln7PYHvHG1lXI/CtwEqLYt6bstu2UzTrsnc80QLTyMJZufut6bdwlyEm6v8RbSbyATEIhxdXmJvu8UrIkAXeqI2hoQney1c6dLFESqVCIAC/kj8zKu6/xeaUH5q+61GRGrWAdyAY4kuQsAxDjdQ9Ir0ZiFvDDq+dM9Z/HZjkjqvJK5xNpMcLW03dPN2VkvdO5Ubqq4mApwLncVer6YH5LPvHcIQSxnplSobXJRMBg1ADe020BBEd6KMDzruwMUeNGczpbxiJVXCmtLCA4rICWg5LicXZyhafZUjs9AlnOTyBTdzVCXjz+CaX8jQBtDBpORVYtVJ01XQ39tNGItsQPmKIaN4DUCXDMXrOj3pBOgxQZS4u2ae9SM2/eduneJ1WPLvcYrCWFJmSWWyHlMMcLFjNAt+rzgSFIlXolkVi2dshHOkup4RnAakGJCN0PiT7zLGKfqKkGgmnabudb7QCWWksRC7vZN8HJ1qZHjQmBJK4y5kOxQXWgASQKRtIxDex+BkFPCNB4wDCM4jbh88wn6rpMg6yT13tAwtaVy13RmrWxGBHERU0Zk9W9mB6EAR7bJKJM4A0QaI1c0Qnr+i3VNQajtMypaVCnI7TzV4qQ6/85J/TcXvICxaSqC1DQMSNOEcThifXk5A0RQupNZbWVsm0cFYCqsVkAVoyTMKMQ36VpmxrjbY5omwAGbVS/zq7ww20Q2VLQkINF90jmaCTrnGJIIHq4IE3OCTYBzGkfSAEb9LKsriHMkcW8ssQThJI1dWdTS16LksH2iE8KqXOl6D8dA6DsEYsSUAY7nGv3/7WpJ/0xoJLXuQ9hA7wNCCHhyeYX83lNM6Sn+cvgdjOst3r5Y46133kHf95Ux2Zls2Rk1gtzDMun9/X1IeAdhxBofTd85/TI140uVdtzX3kzQf4iX2XD1DFsMrlt4DFj/7F4DfS1gXYJXkRHuEZWaflthZmHqp4lSTPEycx1svpu/EGe/n3lpfFUgtwSiDwqQ9zVR+cbpl40igKqnRPP18nb9ORel52fCTvbp/O1zj/dXKzz79gfY727xm88npDRhOuxxc32NN0IHuNi0xfhk3GDYZ/xi12PtRiTzkKHHA7baf6WNqOrJmFmsM5xq8WBU63QFsnPxrUg8JY57TjNdCFitulJkPqfcnA8FakllKCZRvqlFhZzEh5GeDxE6UWLP7F2sPFEUH9YBKoYmAnAcB0zDhDhFvPPuO8hR3unYAj1I+BskDrxa1wBJFpdVzqYiV1d6rV5EusfkeUK/WmG16pFSlFlTF07WuVlvNqqsolmiota7qN1Pdf0w+9zuzwXIzPkJcgUQhbeTuOObkHKqyMEjL4euD4ALktyMJFlX3zvEKQmPbvp/nwXv7FXmwGvIi8krsleSBOqVPVompDZQlQhcE66YlW1pNdRPURuqm3mKERkeoZu7ILdumVJTWD1sVJnVOiFUmQSq4Jc+GKizfavQXr5vgLbszcVVBBGSZCMOcMHD+yDym86/2Ios5njRRAsWWOkAGkV36RrNN9FrXN8I0Aa0jAkibTfJFUxyZc3WUivJSzpxQLdFzshmxqQzmPXc4WlusgLAdjPRmXuSFvHTvJ7kxJWpU//yxFIkMyUpbmkujAkZzq3mB7p5l8ChRlhwQvhsYYmS3aaxcPKnZBC0OTQ3NCpJH8pGpGaegOLikrMBODs0KMGh3ommoQZnKgjUkgfOtxkF6zEtFhbtu/RJg5qzAQ2Co4wpZdzd7HB3c4v1xQbby62kfeegta64zeh+4qbAQAn4lqlsJYR5+nMww/wHHauvt6gP9TkqTEAcRHxp0lEtiupKgWaN1crQOm8Z5DoQmRW4KTZN9ZwSZ/g+oOs9xmFEnBK8E2Y1DQM4TujX6yKsMAyYmb3R3GWEoHEG2M+8kuo9ORVCZMUsmRjjMOLw2YS33n0DmKoyxJFYSGvqWQu+nhPYImDY8bznah2PnGUfVYImtVIagcUaNQ198MhTdYsl70ump7qk8nJPkhhorojQlp0kaJmI0AUPYilQa/14Hfn3dUXcliwvSbQNN88+q6DBNXNjv3Qu4Dvf/g6evf8MAHAYpJD4qu/Q972ksaF6zg3Kz45BIwwuSNBv7brXctf0pHXbtL1kiqKT51RAaJyZGpftKiiZkmLm3r1YtaULaHsVaxh41u7ZMTZ7rbVYnrTZKI7a/tt3+sErr/uEsse4SLaujwxIvUBe7oxmXGf2w/Id5+6bATai5uv7YdDJ+pz83f7bCt+yVn973eMHb1zgW+9/Gzc3e+z3L5GmiNvra/SbC4TSA7kmdvjZ4RJEA47HI4J3D67zYy4HQs5ibRqGQ/nMWAwBmtaeyqAsNrycUnKacbjuCVYwRsGXzLnEwOFwFIuazYideeVvrJmoLUsdJ5FEMokXEDNXeYfV5ZwURBmjKnxcrHwgUU6kwxHTmOCCxNC64OFJEjqQ7XURB1D0lhYzTtVCYvJJO/OZWcNQ5A6/WmGz6nH15InKVVEyQVZmAQIjdD3Mc0j4bm31VWvLQI0VowrbRVnOKgsApmwEGJYd8LSlV++jE1dsJ+WkvAtgIoSug/cOoA7eReyPI6ZhhzTV0i+PfYddZr00QYo0VwEU6Ft8pAqVZbjzNs+PcQ7YToHu/AkGOOl2oJnSva6Zh3cdGB7gWMENtY3JH8WZ9SwNbARjMkW+jDs0TJVMyZ507xJEQc+WDMWDgkPU9mOScbqsMuQZoCHlQyQWslqrgyrT5XxlTeL3utc3A7QxUGJTZNfgxPcYGVJ4tdkYbIoPB6IMLokMuC6uyNZy+3JyFxJVwYfnpCujow06bl2FssZiBNXUIEcwA+MU0XmHmLKkLtfA3qLxLgehmQuIYI+ikVMNLgOZuBTuNv1hNWI1WkiSei8gAoKJf3JjSlKXzrEeFO9K6lJq3MxSqqZgSzKR2d6kwn22OC6Zi6R1TZgzcpSsT+b+RmWOubpCOHV1Sozj3R7D7oB+1WG1WQveXBD1ZpqadVuw+eaPrPNGREIImYGUS00XmRDxUSZi1JlyWtPDlxSvICqZggqm0bs9TKMnKYUpkWw8yoXeAAJcUqJiIVutejiXkJOUCMgpIyXGMv2vjdni12bFlxsN8T2zBOOi8qysie8Yt89flPptQEO4Lb4TAua9awByyQp3vyBpk2OszTRbknTUoQ9+DqitvZzUxdOBvNRFsmKmKaUzo5QJSeyKRVwEVDGnMwDD396RuJyAZ1q9c+z3H+pqGVkrzN4HMk4sRST+9aEXhcKq7x98Tzsua2mZNsL+PQe07gNfD10PgaLTfi5AKhHSmb1fe1tpb055to6tReoxFpOlAq0Aqa+IXpfukicgR8dqXguvamvZ1/vO2utY3YrlgQlMy0RGFVDYz/LeJYA1Nqj0op3nE0sDaBYfds7auASzy+/uWxMG4zdfPse3N5d4+sFT3Oxe4OMP9xiGAdM0YTwe8QQTJhYaD6iL32WP6S5ifXEB33eFz953LfvQ/m2CXgg9CEGACAGAgzOX9EYDbMDIYofsu+12i3feeQdffPGbBrfp2dDHMzOmGDGMEevOlV6zyhPtDDLXkjwC/lD4lI0CnpCmiHGY4Fcd+lDSOKrFSmi3JZPqQsBmvYJzR4yqQJXxOVEsw0AfAM4iqGqtNiIHUpskgzThEp0QX4bEwU5E2FxcYdUH7Hc77Pc7rPtO1otMqb7wIgBmf8/aJbV+NvRiycPKd0CzI5fqUAnLSEzKk6vxYHnNFDuorqOs8wvyUvDWefguIATL2sgYxj2GwxEvbq9BU8TFdotV58++x97VhjsAc3pkSkxnxgYF6tn2IDV7rQHFKDMye9tsX5/SumZlmhAJB0Lwnbi/ljW026j8JL9CFcqpjoXq2EqjqOs1I+mVycJOrMmu1lCr7DNArEcXcKZolhZcCMgxwnuHRA7ZiSXYrGiOaokwS1BSeIILMrGx4b4Nlnid65sB2qBCaDbj7xw5MSvRlxNWH9JFXAYD1kaBssrLvVcWt25Wkmc63gAAIABJREFUAObiO7uHLUUTaXVzR4vnlHSymGC9ZsXjzFiHgJwYY4zgnCSwVdMLM7UvmqPEamzk8p5ckq3oD5uSlmFqOzkTfCexVSVRCDMAqQcn1qs2m6AyEMdSH4YF8CwJ2oyxNZveXDo8iUsh62dUiMg8/kRcGDNcJgQvg83skFLCeBwwHEe44BHu0SrZ0lriAsv9t4QKmvJCCbQSgcLg6lUKe+uZCk4kfQtaNfO4a1IHwyyQhkaBCshLqYDaV+cIaRJLMTRBCBEQ+oBpWMTsGbBt+pkzY0wSF+eDB08TxGKFhnhxYbQn8V02M4RStiGDkLKk9zdwWYLauckaCrPksWYLmx/FatlSgZrnrI6IZolcjGEzACapsSNgFgWkEVEpj2HDkFXxYB5PaIFl45o9wI0bBAOAB0hcWSyx0LmMXP+Q1zlq9RjL1P3t3Qfa6/cAGoXEqeVHpooLg4QBjNcFbDNa1PYBZz9vT61dbQ3Jc/THxuv82aiCr3S1Cjjp7+M02idt3PMZobpjtpfNsVtA6ZP7XsccfE/fTsQuPnXnooXwOQOYtBgj1X1bXf7mgti5cTmcZshc/s6Fq9HJ5+2VOCNmwufHgD98y+GDD76FLz7/EsMo9cyGwwFP4kt87t8oSW9WfY+LzQoxMTZX24am134sz8e56+TMsiVPSKKQKrxBlcuqPRaAU6SGM+2ihi8YLVbN/G5/xDgM6rKlPXZm4ZJRWB1NB1Tlm7Wj1jcT8OOUcDwMQGZ0q1MFEFONrc/CxOC7DjRFuClqfxuFmrabMgFUswk75XW1OlEtVFyShRSgIEp7sf5k3Nxc43C3k5JJ634mmbcKYHn9Ayv2gDKobdNicQFVYi9iaYocUZwV1fOoWOHaJq1frriMknMgJzVKg/cgR+i7gBAcYmIMhwGHwx63hz2mwx7TkOC8w3q1lhS7D1wVRJ35zv4hlLVCM+8Owvsd1cRypwKzfVrjc9uZsfsqSyVx022SG5FzIB9AuSZbkyRU2hdLk69nyOgLc+1VOVsmt7a9aJR3tXtc7sXs87r0ZSxO6Zh38J1YQYOTer3U95JUj/Ynsje3XmCKGGdRrYYjHrgsX8JD1zcDtBEgSFr+ZC0FbBa0JpAHb7/9Np5fv8QUR9gicGMVMNrUtmvFoc2lEWjObyMVFwyjveCksUqym0Xg1PgmS4s7ex+gCy5AU54lOMfwjpEhCTe09RmLKH8QCtApckRBcK0EXwV6Yx3WYgpqTUsTslcQCQFqpJYwMYxRYQaVQcqYnG7cEs/WLhYnSLV4c6eUeyTjFMHDLHhSWwW2V7mukwAZVxg/SFK7uuCRs9OivLn4BNfXNxPO0PTFBPRUvnZUU7mKA0oj1J7gZEEwjkh9/SVDZswOIRjzrgTL3EoINSOivZeUICtymxO15ry293olBnkSt8FSm05pEruaakViEQljBC6eXMGvBoy7A3xusoQCp4e+7BPTJhnNo2I1y0AhNMUFVZUTosUVhm9isikNSM9eAoowAsjntYgpab/kS6+MSt4hW1tc1xOISxJenSurR9ewc8t+Y2tTBgSwFX7X4u8tIWVy8M7iOcR11Ls2tum3f72OqH2/uLFknO03c8DWCpItwHl0H5Szm6ICWDLB09/tb+vhnFq0/ZFrKbRjcX8ZWXOLM5VB84ITa81Mq/xql8GTdzda47btzPlsBs5Xtf9oN6ZHdPO3Ec92FlSesRCcAKRCr+hkbqz/Atb0nBduQufbw0N7/eHvZxau0hfGMOyx2yU47/H0vXfw8vYOyBN2ux0+/OQT/O7vv4lf5ycAAEdeM/M5bLugPEJBTE7iWXAiqN8/5wbIRaE1SYmfHJVGVr5k9F0ER3dCp438UhZvGiFtTpN4ZIxjRJwiVsFrsWhU+UctaRKPVmO6cja5R8eo8gUzY5gihsOIzBmXl5sSow/V3aj4VDmggSpyErdZtXQAs2T2LpQ6q1uFA5LWZyQbZGlQ51Y/51raIDMhpoS73Q2m4wGrrsPFpkcfQnGjOwfY5vOpoRjmGcIW3jCnYwY0i7LI2jdVsIWq8HmvH1fFyvbtOtQMqzMayaHrBHR67+FCQN8JYHWOMIwRv3n+AuN+j2kYMI4TpmlCjIyuC1i6Hz50VTHJeKRYOYF57JiBNkdaWkgEKGuljMWWSB6p5+70snE3EZst7bLPijOd9aO24D1h1Xl4B4ysbsZFfuTmtbY5m/bb4MsyEdVrj0FSBUMbKXKLyWqKAJ0ToLZZrXGxkZIhRISUEvIwIcdRe9C8W7USUsdOUv93qiRRgbH0w7pWzgBzxaGvuL4ZoA1qwWAjxACK3kgu88nvug7eeUwQITvJlyKItcXvyKwcArrY4mE1CLbVyM80Zu2PAuG1vTLJrAJhDT6t90GAhP5NSv2o6woLk7pZDMoqjDvoZ4A4MDSWDGWY5Q/UTSjNNcBRXUB8lP7HCBAyQhfgfCejjAlWWw7Mpe6WCfG2ecSFToVmnf+UsvRNBWIEqoxCx82Qsgyka2rTYgGvZr2p2eOEyTiWcUs/GM5xEdZtKco8L2gFQ+rfOLVESoTbZBtLLDxlulRgNLBnQNJJliD9U4NbzSKomiADp1n2WqmZppog01A5sww3fTbrlaTalRpi3jkEBfGTm4rFp+5FASwpSaFT0aBK3RtyHn13BR9WOOxudV3TiQWx7k1hWsU5QBmoBaizFnhjoMT9ITMyZUmoQ1SYGqERHM3lFMYozE20+d3WyaxnALymlZbtpEwjZ6QY4UKo58f2pI6dgMZCrT3mOl/lsKtW25VzK2vqFUiy9q+UpfgHvlp2a/8tWTA3n8+3+72+BK945/mnatIEmt17auWw6x4rIFXWbY5PLbycC+3L9u93eavKjupS1PbpoWetX1/nOufet/z+VcDJEpOU/Xtff7m2/3Wtaa97lZimIr9V4dcAUQs67DwWPqaXCdHnLGZAXe/Hgre5qHim380pSSkhpYTPXuyQdgOueI93nr6DX3/2Ka5fHhB5xGcff4pn7z7Ft1Y3mGLC3eYDeO+wvrjAe0/fg/ciBr28vcZnL17gg6dP8eb2Umk/N++s42kZka1uCJJAoiiBmvmkpg6VqaQVrcxYmjchOtQU4wCkkPYUse37YqFiVaShsbI5yzrJWucLQi8p6wgU4A1xwnE/As7h6q038cblJcaj1p5VXp6dCrOk/WShRCUEHLWLRGbty5Un5qR8zxaOIDWtqlcHaOmOJ8rJ3X6P29tbODCeXGywXq/hGlfwskepOWMLhRIzw8UM9PP6oSL/c0nvb0PJbErPOSRhVWovyYqJh8Ua3Qjhc2Fcilp7ciAviS363iOxJKxLccCXL69x2B0QxxFxGhGjxJ3HGEHOo1v3MzB+H407pSHLTpu1V+dbx2Dg1QCkPNl6YrWyuMhoZ9s/w9nKGhlAdpJhPPL8Oa81/bwL8E5jJAmoggLOEgVu4jHbLswU7gruqsJ65gdUbnOOsF712F5eYbvZoOukTM0UZd+yC+g8YzgKzSBOhUIATRkDFSTLvktcFB1m9dbpqDPwSLb1jQFtRVMElJSuduXs4CkhwWJ6ZFMQAaRCbSKHkLK4zOiimXWgLOg5TaMSpNlm0MPH+q8kPtEvVItAFi/MQKkR1WgtGxokAm9mKS4IlGBEdjV2j5hLMLO5cXBZVMmgBJjfbAtpz7sGdJ0HJsngaIACAJJaNpi1QCcEUMyKbLP45BI3qZIBdMEh5oyYVaZPwgBkil19R5KNnLSwtYa+VcDgHFJxuzPii0arqNY/EyZke1giLAFGsGoYmNUNIpPbjfBaogKgAFVknWuZUJjgn1OqlLgspQka8lanafLZFnJBRDIF8X5NRRICpQQ4eV9mUv/mIMBvURdmfqm4TjXFsyMGxxF3tzfYXmzRdT0un7yJaThgOByAqWZKI6K5pakZkWjOUZmns6BrFEWI0DJSoFoLhbc8mFgKejNUOKBq4refLYMphedBsDLzOWU7ViLQFAurEWKazXNOXMD0zBxjB79dD1YFV87iZ04o698ynPtY0Ne9qnh5/rtWQKXFZ8v+tH2sv58Kwq+6lvWnattzK0orh7SWz2W/7P3zfp+fyVf1twjJVBVeleK3clGjLW/72+41pThLwfvc3w+CyAXfqHEj9wtQjwJsj71+25sS2med0NaF8+Se0oUmLo3qZ3MA1rZhgu987I+dC27+PTcBKoPh5vYO159+iP7m77H3Ad/57ncxMGOzXeHFlxmEERwZP/6bv8N3nr2LiYE3rkZ8/vmIfr0R7wEFEaPWhYzKv2w859yEWWWGrFQx54RhHMAxgqmDQxQlWdGokrpMZjjOACSN+Ez/RJDSQF0HVg8VU/4RgNV6pbUsqZQjyhqCkIlAmZE1sy+Da1ZGq+Di5J7DMOE4DHDe4/LNK1xebJEsnTyRxBUrnTYnHyaqJiWar7Rk6BV5InGtgZUzC8+GK8pQch4+l4wFIEfoeo/tZi3ZglPCy5cvMA4HrFZrbPuA0PdwPsg7dU+24KScx/pB3bsaA9bSzGqFrFktG2m+ubfGr0PPy2wntmLjUtEzQxAAkxQml6QXPWKKGKcBtzcvsbvdYZoiUhoRJ5FnnfK21WqFqzcu8fTdd0XOSRGzLOoPXg1wYo0rhCma1YPGLU5a48ZYp/gcEGy5Wsu9bCpPfMnK1rE+tLzbeydxbonVg03aC85J+MayG4UREMxNtS3hdZ7h6piaMwly8F7Ca/yTS6y3F1iHHiHIdymJott5D+8IzAljTJJ9O5qHjuIRJDBYkg6CmvAQk+dJRc1cx97I7oDIQue8H9rrmwHaWDaLO+msDYiVmACcIjglpElcIcDAcYg4Dkf0wSP0PUIICF4sLF5PVmUg5xgsTqSlIiuydrCezgIyynfZfG71jjYlDIkmICtBKJFOTYZMAkoykNY03erXiRjMVLRBNTK5dlz8qlEEZtOqZGZwSnC+Q+g7gCPSNAlIsaLazgmRN/DMGRm1sDJo6TIFOLXU5cxSlwzWlBJuBTcVaNdptiDnnFQgsLg9fR8ZoG00aIY/GDUrofALsa0RiWn9jasV7u4y4jiVHpv3uSMBy+aCCEBc/BR/MVvOSK/En0Hk1GW1sRwRodTkkA5DliWB4Ksmy15iWry6M1DqyJAk58hs2ULLDQqss4JiWSfvGOP+DkhHdJtL9KstVptLOCLsbm5LAfNcHctngN56xagaYCgRzToJZbVUqHOuWk6TWkpLeyXQvGqRTpiY/nQ+IHSdZkmzNXGIrXZaNbzU5qZvCHx1byHRPubKxCQGcREfCIiCRDNP8rxrv/VryVuWfOQsP3mgvZY1Lduv93y9QbXxbu1l/V6Kz/P3nfbonNvmfdc5wCRCRbPmTf+M7BLqvm1pU9OIktgqqGRwSbZUcQsVr5vXcat86N6vYu178N3tInyNq571eZtLF7Olq2nbDfvNRPdTaDVf+dn78dhh8MmZ4eZzuw5f/ALjL/4av7r5HBdvXOHq4gJvvfUWbq93JbFWoojj7hbPv/RYrXp8vrsFdg4vjm/iOIzoNgEMQtDkDH6xk85bCAWwMTMSRxzHATc3N7i+PWDTewwRJYlXLsqkFmigFPK1xfDe4+LyEtfX10hxaOLUuCYIwXwCpfizgjhP1WVSV8YZ2IK8izT+jZzHZitKv5wZoetEeUapCO6i3JWD4QywEImVQkM/bCwwCx81FnHLFIzc1IQjZZmSKO1ydYH1ZiOWvZxBzOhzxvrqCr1lUYTyoEaBbLzN6rAt6VSN5272cwvulnuarH/6LEmMtJUeMjaUG3kEUK+k0pxSkzNnn0ESCwXG4XCL/W6P29sdhuMROUZMk7i+5szwwWG93YJihzfffYp3336CfrXG8XBEihHnTxHXcZY5aL4uHigqf6ryvsgkpoAqYTHzuHWi+c/5RWffayYJhih2yx5iyeBc4uDVQEMspSNMluL8KoUXK4G3ztUx1L7MB9BmW69T49CveqxWa7jQqWiXQV4yoxIgxoopIsUJcRwxDuNMWdB6S2hgV+0XGAQJg0kqt5isuUxe5bzKpg9c3wzQphepexkW4M3SrzMzjuOEwzBK5jASwpQzYRpGjEdG8AN85+H7Hn3XYRM82HkhVASxkDUkmcXrSn5fbFD9S/1vxdqWzWxk8Wy5xjgxz5qu71Ctw0zD3arY9OPM4nbXCkZEUkZACEPVOIvLmJJ0c5csB4uqxcnuZdaskwQfeviwkkyFcUAcx5L233iI1zgvw6vV2kLti3TcbXKK6kNuTEv6qP7n9px+Jx46GczSZ/VCLOMoAoHOFzclA5IRGrYDAMSYsN1u0fU99vsjjodjBYxEoJxhBasZbmaJkj61a6THrQU8xQJl+0WBJflK+FD945kzMjkEkiQ2WSmZrZHVknJuWT58vg+zPuO9x2Yttf+mMWIcrzH2B3TrLTZrD8tVI/vFwVxOWxxVmDrJZLdCs5w/qaVjhblt7cW90hhXjQOzLFzOzoftVVtHcFlL59Q92dRQLX3iynTK3mOppZZiBGevcYB2/yk4rG0V6cVmowxemJJ8nPUcfT3I8+rrtyFzP7aNh6xGj73OgbVXtTsXbpfC+tIqc/r9ufaW4M/60vYT7flctkHzp1u3ZdMFG/g7uQ+2RR8Y81cAZ9bRAtJaVnCuvYVUes7a8MrXlcOv43V13O09RQsOVEB3Znbvh2WzR0++r9EwvLjf6M8SBC7VAyb0qCL05hMkiiDy2PYrvHjxEpfbNYKXJFzTNIHIgzBhGDOePXsXm/UaKX2Gf3NgHI8HXG22IACr1QYrf43NatXCq1kv2n5nZsQ84XAc8OHf/xgf/vB/w3B7gzQdSp/nNJ0q7ys1WLmABDDjeNhjGo4zEcjWiXXtbDswq7s9GvqlQqDjXN0o7XNUwdV7KSHgvUfXdQu+R3MNqcoFJ6ebdYAG5MzUYfsTVPgMFe+UqvQM6y26rsPtOCAfRlxdrBBCwPbJRUnWcS5OVZSI8yQ6xoOsW0vX/NkqWJv2TOnvbMjyj4XtaOGGtkUbvr3/QVrAGYfjHrvdHvu7O4zHI8YhIsaEOEkoR0oZm6sneOvNSwTvcLef4MMa6FaybkTIbu49U5o3JebiPM8H7rTUEpp7K4AANJeBWz5rBolX0xtza69K67YVky/kb9ObWziJWf8cieXNOwJyDc+ZxSdmUabbdjN+DjKw18yByWJtjxYgM0POpJRW1tpqGRKbmiJiShjHCYfjAWmawCyeaPO8BnIOMgG1mIf4LjGABAexkqo8lGdJNGbz9ND1jQFtOWewU4JghKKwVQDIyOxwOI64ub2DA+PyYltqiwnzy4hTRpxG0GHA0Tns+oC+X2Gz6hCcB5iK9sYWuPgKi6QLKYJW9doGzERTxU0cG8HKwmmVv9mYBFhBGK1VI9BbLAZEbpTlbQthl/iQwji1jwb2CoWpW3F2zIxg54yYxW3UKbjKVp+HgK7foFtdYBiOiOMB1mG2Q60corpPshwkzQwldE2TWZhFTO/1Tlwm5TDIhi6xccqolI2Uel9FtnBUlsBAg7M10PeY7E46DzklfHm9w3pKuLzcYv3WFVYXW0zHI0CqZfFOAapZ0mp7YNVWNkzHOV9eIltDDmslnFQ1VvqPjSoToQ16Xl5S3NIBnjTI39azXkmzHzrVbHoP+ODhvEMIhGlKGEep7ZbHDsiMKTKSA1xncWgmIBQRtcwrEWT8LSPQTGAyHumjpbN1ypwdiWXQ3Kw8KfjU97VWA0Kd09AFbcNmjyWIv/yNArSqABkxTQMyVihfN2PA7O/G9UAkXLEK671xYoSg+1XLMbRzfgo3MPsOzfePEZnb9kzYeqjNV/Xj6wI/eVcrePC93+Fkx9wPruznQxa2h6xtj+nzrG2qO7r2ts7bV3lXWadGWVQaXlwG6oyGu8Uzy3056z9VIaS8+L6FVUG9WB2be+d1Rc+/96QtqmtlbS9umc1x2//lfbin29Xp//57CvB4aOBnnk+qMMqI+Pc3/wb/5Hu/xL/46QTvnVqNIn76i1/iB3/wj3EcR0zjqLyDMU0DXOhxefkEl09ucOG3uNhsEHMU3qTZq4tL6+IQpkZplTgic8bPfvq3+OjDn2D82Q+xuz6I62Ohe5BYZKNlxYOAQJqZWFy2NawgZ3G7UnrIvuZEzpb4jIu4UKaJ1aWsuvFxKTCdCRjGCZ4ZXd8LHjNxMjh0XYB3ToRQArDck7pXY+LiwTFbM851vux5bSMxI4jPZOHRkmjEiRwVE55/+QLD8Q6X6y0crWX/hK4As3N0sI2Jb8ESq1BsYO1eClCU4KdUgjA/I/N6bCqnPUhaGmVlCyinA25eHnDYHzEctRzFMBZldb/q8ea338b7b76FruswDQP2wzVCYHSNtvUhWeL8d2f6bNZQzQZS+LRzCr4bF1trV5XVs42h+yDDq8tvMwsFRcnlafH4rO9VRnAuwIegYU5W1J0aWXH+DlNAV+S2HHMrL6vMWSCFuD4G7xD6Dt45xHHAmJLUfFa33nEccDjuEYcJnCPgHFb9BssdlrMmrVJfLZFxNUZUC4VX0YQLXFi62f+74R4JY2JmFahB0FwCu0SvkRKQxoj98YAuOHT9Cs4z+lWHNB6btOFJao+liOkwYOcdur7Hqu8FwHWdWLUsdm62kdoNSMWSBtSJlT4Dxc3x7ETTLFvszHICccNg/d3IPDOQ2ww5vgNSKoJ3S6zJE5DrJixaDhBCYEz7VD6PI4MPIzZXV/Dea8wXYYwSA9Wv1litN0jTgONBwFsEw6s2wFzkMpFs9oavZxVJHImmxEBWUrc55IzMAa7RklldOKMHUr+uuiEYUKyWG93QDbJr42vM7YdixN3LG9zd3OHyjSfoVh261UpA3TjNBSsIA2qz07Z0j5khR7dqd+y9zjvklCqYFfQD3whUAeLGKXtb+urYaro0oBgVREtxeIuUEDeUxECnhC0lCV52RPC+h/cZoAiXEihGTCkjZ0JMwOpig/U64Ljfw7FmpyliJjUi8JzAwnvZcwxQtmB2c+214O/7hf+WeSy1fWItFKBrRu5yd7a00R6JWQk9AyljnCJSYoTGmifnYbmiwOzQKROlFECU4J1aqB1kXZWBJ+1Ly5IWrc6u5Vvb+1v92UPy+EPf3wfalgLx173us55R8++5Zx5qDzgvjH8VC6A9Y4DnnLVuZiXGfO7OWeruG8dyP9M997Uva70nljKCg9RAemjIZt2alQOg+p293zIkL+Sl+TPcnLflBibMMmCei2OjxXP3zY+RrSYiaPbg+cIGrz4L9c7TtbTnYor44ovP8M7hX+Hi6Yc4DM/xn//+S/wPf/MWOGccDzuwD8j5iOMoGd5ySsiOsN/f4u/+5kdwnHGMGe+//S5efHGBLvRixRiPuLj9NW6vJMvkarVqyAsjcUJOGfvhgC+++ALPP/0Iv/w//mv4nDHFpIpbV3gRm3I3y7iYWWu1ZniVYmXPSOIkYnPNR41r12UnVTbOEp+pwswE0eL1o3w0Z8Z+d8Q0JWy36ypGQemwM0dQApGAQgMGzoRgMg8JiJWnebUtkBgzLHRDz2TKcDmhiJjlUMr3d9OIm5cv4WLGk60kGrFnfZMxeil3tfGkJ5YkA8xEmMtpza6bCff303mZVg2LKPKYyR42kbx44PSP+m5JQBanCXmK4DGqRUi9Zy4v8M6bb2O1WUvZpBjRed8Aj4YWzEc9U5DedxGp292CtjB8VVrr53Oww2WbLVosY3VN6YNTRHb60exrR1IHV0EaeUIfQqU9VGn+zLUbwuYzS4bwQpS0T+2+mblIKviTswg4PyFgQOd3+Gd/8hGmccIUBYfsd1v81V+9K6FFOYGzZC8NXQc4hynqviigS2LlXQLYMxIYgR0s1KatEQjIeSk2n8Rw/lReOnd9I0Bb0WqxiZL6eUHrrP8XAezZt57h73/6Ewz7I7xaQlJiwPUIjpE5gbO43LG5wqUMjhlpnJDiCpdXDr3r6iZ21JQBaN8PaGQc6g3ceDcSwHQSJ9Mq++W2JuZNP0qoH5iLX71kgyFPzUad2edm1h77KfIoK/1SRpES4pRABOxeRnR9j7BawYeu1KqKUUz03nlsr94AOCEOB0zjAMs4RUTwRIhmqWTWmmytgKMbz5v1DfWQNPdZ8HfK0GBfeQcX9YMREMbMHWBBdGUeZSM7IoSgqVkz4/bLF/Bdh/V2LUBVxyFrYdkjATh3hgmob7KCcvYkCSlZIFUuGYuoAkZds0JbyJJ06D3MpUC5ptmAFS0133zM9og4mHhLX88MzgkpOcCbEKYayZTEfTUm2ffsMY0T6PIK66sOx+MB0+Eo68jquNIwwKrFIxUitL8ETCwLRYTS9wwJp2RdJ0scY5rg6q7BlaGr5oxJmJFjI6+SIpfbtWVzxVXrbEzglECdbwqZnuEIDFRxUrOUMSNS1KZr4pcEhzSMyNMeHAdw6NGKqvfxmuXn7d8tS381+a3PLNt7qI1zn71KGD4HeB62slm7rweylm21/y7fsYwRqrv+1f1q+3+fFUjaun8dTkFabbMFY5lOLY6Sp3f+97Kt8p4lQFp0qlopME9mhQawlrik5hm7b/EdUHkDuWr5bvsCVOtgK4Ke32+n67IQWQHw2fvmdyz/nu/H+ee1LbsrM+P589+Af/bf4DL9CH/58wPWvcfl5QX+9Du3+Itfr8DTiLuXL/CznwSs1tsitHFmUMo47I+iJOSE/S//Fp/+8P/GZrtFcMCUGCNWeHdgfOf7f4SgGn/WJA43ty9wd73DJz//EX7+478CXnyOzFHzFGtssvWcja4qPWwCUo2vacQ5gFborlYRhnqTWKmFJuFWWwaFCMXTx7DFOE04HkfElND3HUIXYCvtyPaMvdOL27oCAgN9JWNyWSMo71iCIpqtUb15bv1hOAxTxG53h+PxiD4EbJ6sseqDeLRQ3YNL9zwDZGZBPLfDiGq2f6ToAAAgAElEQVQpkbKvue6m8vOM1cb4E+szZoUT4E0w+yRQedv85Y28uACYdq/zDqu+RyDC6AjpKEpkH7zmczBZgkChK+c6g+Bx/7W0atln98bWVWIj+1SV7VJImqpM3NCH07basZ3OR5GJ1JKYmxMuoRGVdhOjWIm9d+rVVpuXbKsmFzTtsyRmkzOFRoap3Zn/qTgADMaAyycv8f77n2N7cYdpTHjxRRY5o7xjhz/7k+cgxRg//uv30a06DMctDgeTIasfE5iQOMNzRGYH1gzZDlPNPdGc7YIJjE7YOF5xfSNAW7nMUgPbCg1hKYuSS/IB0/AgM/b7CWma0HUquDuPBluobC7FDIMWnC6CdvuOImRzJbwu19/R4IZmY9032WJib+5t8YdTS6Jqc4rgwhDpFw6gAHAsxFhu0tlRYd/SchhYkP55MdemjJzM7QNIMSGnI4bDEaHvELoeoQ+gfgUiyepoJQFct4ZLCTlPtetFGCet5aZxaWpdk35UAUEYit3TBBCrxoqxmDs+FYJNRSgAkOuBL8KQurRk+cs7VxJnMGfsb3c6V77MGVn2SoJYAvUAOXU5IhXLTiUs+ZutL23XrTI7eQEhbDF6QiYcCcByJZMuofxP9/5i82jwrroxkoDr4yGh6zv0nRBA075ZHRqhJQnjfof9KmC13qBfb9GHgGk4Ih4HBdEEr/7rEvSuLZkQgPrTq6tOcUklYSSmlZ35djdrU9aJJQNT1ybp0csyomYSJQtl0/qK9R3kkXNCTBMC+iaQ+CFAoYxeXUBkjgEXCCnq3OcRh+mIw4tPMRy+RHf5HhxciWFsxRrdxieAYPldQxJm95zsacx31jkS0f69bIfO3D8f/cNWrceCsfuA3EPWtmULr9P2Q+9cfvcYy925TJnn3vHQeM61YZ9ZH0oMXBMYL2T8DDimal0r4JbEBbL92xox4d8sZeaOZtZ5OYP6nb7LhNulgN32YT6O+69l/1uA+iqg9tg2l5+d+z7nhBfPP8c/93+D46RJtNAhpQRwxv7ugONxB86Mzz77Nd7/4BkuL7YYx1HaywpgnYMnjwkd4jhgx8BmvRJ5YbjGdPscOSckTogxYn884ri/w0/+r/8Jd8+/wN2vf4o0CjA2+ZJJRUvNfGuhCS1Nt3OcAXjKaC04LeAoykCbCZ4nsbJkB3ZvaV3B6ZCkYHZOGavNGttNJ9abhrd6QrPXGt5UeEGVm6y4tgjWNg55n7jI17VyVHta6thqnylHDPs7pGnC5WaFvl9J5j4rFE7n9+psAuuPAsgKrGpcI8/R2/Pwoml+Ns8mh3LJ+jeL55+1DAUO57gDYC6QRAQXglqIAB+zJOjQkjoASt1UayslQkS1wJ49NwraiE55a9sT614BpzWHtnxelAKkxpCZH8yZVlsuubxvLsjNlHg065VYASE0y7lOy2ZIEjyLOjqpMFBBAkBnopPOMVQGOGcchj2+//u/xNtvPcc4JOxva9x/ezlHiFOGuZD+4A8/Rug8bneX+Lf/7wdIsa+AC2Z80GeVRjOxWAMpWAeaPipPU5nzNBHj+esbA9rOE32d+QV6XnUeq/UaMSeknBFTBnIEpwmRZQXt8LrgETqv2e6kLpZ3cmrEYmCUQNnvbEPJBk8xITMjuEYL8ApITGVDcdmpBK5SL+Qw13HWMTvnNIufJrVn6d9cdDNukZv2rFuEYNn5Tqa1xlJMw4A4TnBHB+f2CKse/WarxUGbEgBEIE2gHTNKVh9z/yAlWGQ1Y5r/ElOxtpkWyQR8iT/Stg3EKGCwxCbQEgCnE9uOyG7l+h4W/3kHD+c0lm+akIoLi/lzNzFfbPF+7TSfYSTUgBtUoaol4myZIbXLAt5bWyNQ9Pdap6xtQlaYyhraWuTMwJTAKSNPHt06VF/0UlJcLGU5Zdy9eIFhtcfmyRX6fo3OAXGKQNQ80M0alwBMoKBsK6ydAXHLnM9EJd10bq+hWN8kO6YJIFqGQUFucR/NArKSUlCrJy9W8IQ4jsB2K+DLATm3/V9mNuEyJobXs1ZdOwGJIRmPI37293+Ljz/5EHHzBP/Bf/Rf4uryEh5U3Jfb2Ekbb158NnfIvJ+VLU9xs+QnggbjfBvt9dD3jwE1dt/rXI+9/3XbfZ02zQL1VZ9/6Pv72n0IZN5vXZqpHk+em9GWchTrZyX5wuLdM1fHZk5a0LekW1o4pBGgHp6LFig/Zq6XwPoc0H5onnJJZFE/b8+cJLySETiOorgFw7leP3cg36Prhd9fX7/ExcUWIQREDQPIDPRdh3Uf4INDv1qBQSosM4YhYv/8V5h+9SPEzT/H9YvP8dP/53/F7YvnuPvsIxz3GYQMT66x+FvMsZsXfp6tYx1WZuBmd8DxEPHWE64DJAVHS+ChirPcJjybTRCXDF7MwHQckVLGer1C3/eSCMuLNwaxNFfsRs7BeVnflA2AKe231aKavMJW07xKWAVRQuU+s3XNMiYpoemwWvXo+x7eB7XqtHxyuR8bKkg0KwNV4qyAajkzq5rNH9fEFfZM+x6TF9r9RUXZIe8XfbrIYeXT2ZltMlxT+90pVXfOw/sEkEf0kqXTQxJfzCyXIMmi7QI4Ezo85prPXesWaP127SZkIOdJgFHzDEH4bfDmovh63KfyWfte+TIDILE+OUD2jPFiJ+EmgMTGB6JyjszLq1gOTR5oD0GRew2QcvncKE6GQ0wJ3/ndj+D9S2xWL3HYm+xxfnQhSK1jM0YwM8YhYt1f4w//YABjg7/6y2/V+tBNxs2ShR0QZTtHUS6YvM716FqN3KR5G/6diWkzJqO0Sz4DMCfzmtzCE578/i2G24zxbiXBvOpXS1ps2lwKODOC9yAvQcA+hFIRPkMK+TEDGWdqXxCQYsYwRvTBIyUGxyTBtc6VpBVzp8X58+UHywa2IyJAJi/OmiRAIWZddJqDNXWpaGDaWe2OWHXUBTEQiB1SiqKRbDsGZYQxISEhpYhhf0DXd1hfXMKFAHYBoCgETjdiZsJxkkDjVecA5+AJUl9LZe/W+iIau8r6iyBSiCcQWs0yEXwfkKckqYIxx2pFC1U0G65aXbUNi0PMucbRUSBwSpg0tS6IJA2vARSo6wCLhoQhhcDFcudkhzSgymiWWEsXwkb7u6MKSjMDSwKrwpvRVMH6QqwyoaQ6LuPQHZBSRj5M6Lug4RQOwTMm0jT4LPekvRTt7C+ucLmVzGhJCVFx/8nirOscgZICajJAKevVaTraWnyzpv+3dRE3yVOiQ0TogtcSEgJgM9TtK7sCEKFrKudK4w2VsE5R3FKLa8+McJ+J9FLgHocRtOoA8gB1AE1l5nPOOB4m3Fxfo9/u8C//+/8K/+SP/2N88P0/AVOAJ6kptwRl9XcunxWL5+KeWZfO/H3u3leJyKe76PxFs9/PC8+PFcpf51q2+bAl6+E4vaUF5uuAwa/6/GPAnrSPSueASgga74ClG1Vp4x5F4BKA3WuN4PNtVuGt6c9rXI+Z/3a977Oi3bdnmRkxJwxpgCexGnoKIAIiK90jB/IO77/zBt7yT/Gl0o/1dgsXIHScGeMwYXTAkwuPFKXm2na9QoqpCvk5AW4tQrRaFIr7epqQDtf41c9/gp/93d9gd/MlhtvnuL5jIA5CqzmLi7z23xGBk4ytX/UCZooQr94XKtkwM25fPsfxMKHXkkUmvZX9QjWGkXV+SD0divIOqtmPrEJjFZYdEbogyjwGsNms4QkY9ofZyhCJs3icJry8vUOeMlYe4u0wS3LDleE1IguDkZPWjWVxP012f32yZAQkIoTQKTjyNSSA7jtdZ/ZRC7wMIDfzsmwnqycIUM+GZAzOda5NSegcyAVYHTQGS4INIgXT+fT4LGSS+dlWrtBY3x05kDMA57UmGxfqEVMSeUNlO98xJmaEMr7zMW3tu2vH5nKW3gSbqAYDq/KDyp6lLgi75GW7pWm0Baq53StNq/YK52TOpfavQ05KEVyVeySbqUOGlN8wryrRSTS8axYu0763ccdtiQ0zmEd88K1P8c5bH+N4jJjGhyPDvad7SSVnxmZ9gA8D/pP/9IjPP3+Cjz58Vt4r+5HgGYj6GZksk817aLYU0ns9xq+KT/xGgTYAzSKry1hmcOMDmzJjGDOm6YDsRmDNGK6fKLytWR3FWqViuPdwQQ3M3iHrdwzJAlU7AZTI8WbBcsp4fnsHHzw2fYccAhIyVp1sMkuzPsdP5TS3jYMzzTKB+SKCMxwxEgOJaha+QvCtjpk+R5DgxXnRCUJkiGBMsvEYjC4E9AhIKSPGWPx2uRF0jWn5IAk2psO+FBg0d4mYNaibrUo80G83WHWEKUYwEijFGQN3JAWpE4uWoWS1UmIgxBCz+XYErSBPyFoQm0iJbcqldpwvRFuzSypj8KiJJUCEyAZ2BdQ7z2CtZM+cgcRITjVaROg3a3hHiCpSmyVIgARKQK/wsayu4aa79GUdy/Ln6uICoBRNr1vD/OkXVKKgVX0OEAseUUnNyykjx1SZmPdwBITQISdSpiBzEQ877LKUd4hJrKYhOLjOA5MwqCacTd4YJYjfYhBn9B+YMYQlsWFGcaG1B10f9BmnezhCYs9yIV6y2Bm5qffnSPZYzgmdCygsmug8YDMhLDPyYQ/vL0GdLxvNkxNFDWcMU8ZhJAxhjwti/Ov//X9EZuD97/1T9H4NUF3d5VVkrrPf/sNe85Nf+zHvC50FRedA1UPAzb4/K3Q395y7/Nl+1T5/1avt8339/20A0nOulPdb45ZPLPlA06aS9RmYMV5izB98whvPiUXtffddBrjqnq1z99jrXuD1wBy3YG75LuHBGS9eXuOTj3+C760P6DcbXGy22K57fDS9gdSv0YUOwXX40999guHiv8DvvfW/SHyQd/jkV58ILVE6H+OEGD28C/jkk0/w/e/+XkPUVEmZElIu6i8kUxzlhOPtNV785nOMw4jMDkiT1jZzypddVeKQzOgJHST7zl4r7x6ORwCMdSf1oKCAy84HQ3icEs3iNVQmX1fQFFvZUfEOck54tfcezBneO2w2koBEXESlT44EmKWcsdsfcHtzh3gcsNmskZ1YwAw0EqMmwamdgPXEeQdOTsQQ5+AtxAIApwhyPQC17BlYs3lrwNqpN4s7T9cx34PFDZV5xlSNN5XP27XR90OXxeqlwgUQsdT2S0ktUzbb5k1RwaEoozU9V2sNp2Uv22FRifMCoVgbc2Zw5Jl7JBEhTtJq8AEpiqvefVdV0LRcicrps85J1moPHxymMeqzlnRfeLMrfiYN1bE/Z3uB2ukscyHzSnXM5aZKr9peyR8B7Loi15lXVAknaV/ZPFiXWPI3FCWXyjDvvHODP/zBz3F9s8fxEE/qtN1/PczZU8zIaY9n35oArPDpJ+/KvimKbpVVGSAEZNJ4OkY54+22TyAgZpwr6dBe3xDQRqUOlF1WP83MrWgIlwAPsZaE1Z1oRxCkOKavVgsgwFGG9x5ec/Of9fttiKEBKlZiZcuWkrjXxcNRXS47xBAQgpcikEGSlZS0/XZ2SjooLpuoVb5aqURAEpPY+3Ix9eszyy4X3Df/zpP6AAfJSsQqHEimHIeVX4F7qXuVEiNpAgsiktSn3iNa3SoWcEMKlnISYb+1Ko7DhL7faobGhBQjKI6IcSoCt41DLCzqSpCkeCFyTVYyow/MxZ2uEAGbXy5LVn4pyTTASKh7BRArHgNCAFjq+xVsXmKeFBxAND6XV1schwk5Rb1RNEBZtSQWpG0k0TlzPrJ10Z8z0D4Xt4CqgYRz+rt2/YwW0ZHEoHnv0AXxg488lSyM1QtdiIbrAkJwyCmLKw8DlDLGKSEleRmFgPXFFcbjLeLuCNasq9bd7MRdIecqds+Jsq4nSOWi+jcDko6aoBmxksT96bzEzOquY4yQK6PWmFObyX61kpIN04TkvCg/OOkanhEjFwByHCNWXY/iDOIAJC1j4Ai9B1zKOMQBfSb85Id/jo6Ap9//Y3h6mExy+XkqBC+F2XPffV03wqUwf0Ir7n3uDJA4eZZnPyssqW2/qvdLwGsnwZ593WyYr7LgPWZcbTuPvf+xfVq2tXSRnIEwMkognxltLa41TV8cLA6uNFToVttuESCtjSLMLft8bq88PLfL+L4lEHto7y/jAO0apxFffvkcF/EONzd3GL94CccRnQdy6PDL+B62mzUuLy/x9vuMdy6f4r2n/yHc8RP85vNPMUWHz3drEDmE1RqYRLkDAJQJWcv8pCSzm3NGHEekFBFjxBQnjMMg/GscsL+7K/VYyeiariDnJLyAaxwacVbLkVCXWmbFElygLL7QGrEiFlmh+d7WU1Kt6/pY1kbCLNOfgKBGmFCwJfTWYbVeA2C8fHGDoFn57H1EQBwnjOMO43EU5ap0uCihAd2HzqmXzqknQWZY0kzErECOJFZfZi+X/b/kie4c3S5jO00hXwDYAogtPXcKtjhjgeLZ32rlcUGsPEHCDOI4gRFF0awZhh0VX67SDozftWfC+Pn8xJd+ylcelmWUVXKXeRbPlaByG7OwqQCGowzwqT/X8uyXXAFsWb/RWMtsL0uPHFnxc8DiM4ikvA/nCeDOdA2za6ZEuG8J2UBtwzF04xG5EjLvYK6QBO9ZDA0ELQul4RpVi1z7YC6SPF/f1h0WkER3f/iDD3F7e8DxGBHjw4DodS9mYDhO+Pa3P4MPwEe/+h0QQumuJJHJIExwKutIt7kqprMYMRKz1u2LD77zGwLaVDDXTEXiDtYMrjkAolgXSd87Qt8H0NOMww6obmZGFHKxjrhFAgS2fxrGJgedy0a14sRyxSKs5omRpgmDCr196OA7h75fYRWkaCUIUu4NSmPKSVl0ghqxRQ/UWXdL7VP1lrQTMUc8JSBe547IamNBgZoI5L4LcIHhk8c0aWyA91pGQITnEnOXxW87s2gyciPE5iSuky5HAQuhQ+hXcGlCiiOG4yhWLa6WsaTMRtZafOmTkxgimwdC1aDlFqShOcDG6LjGWp1q7PRW5S6eWbN2zs3fhdnkhN3tHeDExYRTApuBRq1ldX8IETdBgFtfa9PSsd5DzVLN+kcAtQHbdUxL7bwjgg+SaMcHL+AzZZkn2Ha2uWXNSmlpnQGLWTP3EAZjvx+xuXRYb64A1yMedxgOx7L7nAFGrSNXsncVZlFBmPMeMaUiWJD22YBwyYZVBBRjg9JzU1TYWEG6bkTYrMX6mTWBDDm+TxE7X3u9KQ8HuIu1MD/IOSeIoohYXINjEkB/BIN2jB//yz/HH4Hx3vf+VDWg5spx6gqWmRFTROdCyWh77rrPdWze7ceDBrrnd2n3t3ctxZDHtH3fOF63ncdej3nfY+7/uu8+v8a1H6beOSfznOvr7B6a97qetCo6tsJr27/XHe1jXUlf1311fj8QnMfVkwvs3e9g/PgvsD8cMBykvlp6+n0M+QhOjDEmfDYcsPpHz3C7eR9Puy9wcXEFpj1+cX2Fro9wzmO96goxJe8R+hWC99jv9/JOBmLOIE2pvt/tMe52gNIq8xCAusVDNeYpCTgzFmRnwhJumBs5WxZHqFLKXPS9pFcXHZ0rHvUm3Mr90jATNM6gZhVmbc/2TtsHq2kGKHhywieGYUSKCd7VyCgiB6aMEDyQA3JvHkmubrbKisGoGYOJUMoFAWJtIHUnDBDeDkg2b5EqSF399QwUvn9uj8jbjL8sBXADZwbelvt9CdTa5Fhlvyl4kfn3cF74vPG143DA8TiJm6gLkpRNhTdTOOaciqLZkfUHi4PanvgK3sg8ZUj6kCzrMubntuA+BYEMoPOECVxCEKoL5hKI1s4QUXGDLCFI9p4yby3gA0K3wnq9AbFlXJ7PowFkK/VUZ3853y2FM3lB4kcntiRJTi1/ajRw6haqSuZxOp9cxcYqBl4FqYvX55zxe9/9TAqaH6bfOmCr7wHGIeLZs8+xuejw859+G+Sy1PTN0g9SgTGmLGWZooWkiNV/Sow4iXzO+UyoVnN9M0DbXAYHQzKuzG7QzcNFiwNQTmB0WF0muABwNjRfQZgh+4L0gcZ0WgVwhqY0TyrUmTuYI/jgsN5sMY0D0hjBOWqmQCAPA6ZhgHcBUz/BXWywulirG1szhHv5mIAop0cnqXRPaN3/VANbDp8AEAaQ2IClEjwIUQ7BoV/1iFNUxRTDhQ7OC/CaJtGchSDCeIpRCUVDDGyOimZFDx5PAjQ4YToMGDrCantRCIdoeh261Ra+W+N4HMBxkExJldLKe4xxadYtO/uWgpcUmKBYd4yhVVDDgNaoaaa60XbammdzS23Mz6SxEdm7InAM+wN2t3foVj1WmxXWm3XJemmWQ2b53XsC2LzS5++z+wHVHEFjLgGAI4B+sTUqkBTQBfjMM+dzYfgEr9pa5whJg73NTceAjg/iipigNWHUlEfQtUbGdNzj+a8/w8UbV+j6Dda9WIyHu72Ilgo+TUudcrFjqiayjtnpPQ4GaPU/IlDO6EJXxqtQUBmzMCImJ5ZdiIDgdMsbmOOcMcaMfjtncieXaea4zi6lCM4ZXuvScDYtp5z1EAK22w3ubl5is73EOA6g0OMv/uf/Fn/2nxGefvePYfGrxuYyxFId44ScE4ZxwsXFJVbUJCR4pODbXq+ydsyGisoaX8U2gVOdUStanG9//v5XxZ895mozbZqV+KE2X+UG+ZCF0O4w9+hzbT72al0RX/+5Fl6dArZ2nyxBGTfPVZm6EYaE4DX74IwrK52bp9NxLMd4n8vpQ9frzI8DoQsdnly+gcvbD4Gn72GcBuzu7nB9/SV6t8f3/70/w7rvkQFcvPy3uFhv8LPnB/yfH7+Nf/r2Nf67fx0KLfahr0k5YgYFh08/+wy/++wZiA5Kj0XRtd1c4q233sbN7S0oWGy70hUFF9l3oBwRc0Z2oc4OWYp/sX4yA5RZBCrV0ppnibmu+0ACojKwWgvH//L5c6w3W/SrtU4eS4mJZN4fDY5SnpNSxjBO6PsOjhiWEF6sgBZuoCncyZJU6Xw7KYcDBZG+I6z0e+edeBpx817rBOuuZRTgCQD73S28yiuRUXmwa5Ka6JwBVGUIA2SNom6mRDUrFBg0RnA3V4SVVhsLm/1tbyvdb+4nlQedD/CrLTqfIAkmJtztD7i7vQFACFeXMi6/sCMTYHkTRA/c8uaHzkWlcK45H8ZPSg+ZJWGMfuIDIxIhkC95FyyUpaUo7WhbAFfBm6ulAxqQaQXlybnimhnjgDge4TvNeLicRGuC2nOu874ozyRnoJ0/au5jje8jrC8usF6t0Qcv85oSKEe4tmYtVWBm77F6gsWzQPuZU0ZKERfbL3EcxtdwifxqV5wSjrsBbzy5QeIPQOzBMWFKY7H6S0mmiGmckJKUMIpRvNME3sgBIvcQUP2mgLZ6AnWPkCayaG8yYiJZpjgxEDxiTMg8wvFaMu5Yc7qARCjZaSpxOH+4ZCNSc8KBOEYM44S72yNWK8lamTkjjoPEUelBdrr56iFphreQimwTe6pHudb9UdGCSBJtEM0AW8FSjmrcl7aAlllzwKrvIZnzJCB7GkeAxV2062rVducZOatVKGYFfqIRkYxdWTRoZUwJKLXrGLubW0lgstmgX63RdR1ijMhRNuv24gLABVI8YtzvkUmJB8nceSIED3CSJCxZi2+bZs0RJMYszQkDmTtIM+GegVQYjSxHYOlxScPiPZAniAXMwSMDOatLrliqOpYEFuPhiF3YiXWrbIvqgmt1ywDShDhCyOciIzSwljQw2pwZbX9wPbT1DWXPWLkEiWXz+p8rcZ+O1P9bBTPREkvaXsFETmI3UwI5qQlDOZfzNA5HpC9GbK+22FxewHVrwB2LIEkQl0xABBMrBh8U8FjMWc55FvRtnzMA9h5+ZdpebdcRus5jtVrjeNhjfxiQpcogPCs4J6DvevzO7zzDp598iimOzTzdc/Fi9nXfTqlJH9DcElPC/nDE7f6IJ29e4fZwAHUe426Hb7//Hg6f/BS3XY/Ve38AOCBQwDhOcM7B+6B+7A5rLeK+FFu/jhvkq0DfUiyyn27xWcsG6J7fH5rVh9nIV7vafrbJpQ3MzcW9VnQ6/c3uPL3XxOxTMPMQWK2tv/6anW+rESwXgGoJ4E77cApcH7r3oXvaqwWDy2dad81zffgqV3m2WWijpRfbC+Tv/QmGX/wQE2dcPbnC9uICb7z7u3BvvwVH6qJ3WCPHjF8/3+FHP/0V/vyz34CnhPUmafkTGVVJ5MGMcRzUomGeEIQ4Rfzms1/j809/jZfPrwHOxbVcaHGeM3FeCKRQoZUq+AAYl9s1vJeaaFK1R5S7TCZg1lh1clSTYum0ZM1yTSpIixAOAZEMTDHhMIjs0a17eJYYeQGbYilz8hoRxH2rMpDLuQomuz6UtShhC8o/2eLJdV4SN0K/7os4jeI+afNbwIfxlwRzqiwxgWotIyLJhNyJj81M6UtU3gEDbI0id3mdZIlsrVY2Av3MdT36VQdHYuW4vTvg9uVLDMOAFBNc34n5xEPivEkzDzCX1PNLGnpCidjWuMnySgRYaQe1tkWyXpabJAxDrWVxIkzHEa4XPiOOI1z4btsL2d+lBzAAt2SXZR4yTLBo5pTBHDGOIzrVoZulz+awtMfmxsuqJzWhuwyltF+6BJGUfC8uqZeXW2y3F3BOAOI0DOj6HhS8ZIeOWhy3nfiWUbSgciZjM/7RH3yCrrvB3b6Wo/o6F5HkAEjptD1m/Yxv8YN//Ev81b96F+NxxDhOsgYFNer+T5ZrlSUW1DuV6dyJhXl5fTNAm110KuwWC0axyjBC8AjsEPrqh1w2xuynst3iytUwJWr2UwPSUSx0shAhSFaqaZKSAqJpIXFPC3KvI4kfC6FT4kqYKZnsnSaIqrAc9RBUBikvNo0WQKrJ4XlbbGPicmDKF8wAZwTKmGwnM1DSkWZGZCm4KTTeiUUjJhRxgqUvxUhLDkQaP+RIrRSVoIMljV0RxswAACAASURBVHycbjH4I8ZNh9Cv4TsBGDEmSGB0gAs9cj4ipVzSthdXN6Ji1XPOIKweCOlGEfbBAiBs3UgBTFSm3NJ18xD2RIhaiBowImMzJyZ2By8xcMGDUkJ2HiknTGOG7ztNypIBFormbH+R/BQ/bLEfENX1zGhgGjOII4BVIfieirhRsjMyxJWTbJ9yJaLeO8msqfvVu7JlAEgKYecF2PksVtX/j7p3a5IkOc7FPveIzKrqntnrAQEKMJA8PBfJ+KB3mclk+usyvRxJL9TlHFEAARDA4rY7uzvT3VWVmRHhenD3iMisqp6eJUitAoad6qq8xNXdP78mf4cFIQNqYS1Z1+fp4Yg0zRjGUfeLA+IYajru5qLS9rRJOQry2xArsWdmjMEztJUVk4i7PcbDAUKMKRXINFudI31IfY+9e57OKKn5fFc3TfYsbdtGoHGApAQp7pazFb4YHAPuDns8PZyxuxuRl4LAEX/445f44je/wUf/9AU++fcP+Onf/C0+/uhTK8khKMtiRFaJiCo4CoJnpOvaSy1oW+H4uft8/8jmb//M3efL9/jzL6/ZzmTprrngmzfuudX6ZwjWz3uuj/Wswy2d7XcGVvl/bwOb69DmJcD6pYDlOXBzDQheCHzd9wR3lW/gffts6a59aWtxRpeuvv1ztjFsH6p42Pa5iGBJC2KICH18OQEpC+Yf/B3K6Wvsl7d4HXcIP/yPnWBI5o4G7CJhlIRAI+Y843h6Uq8YU2pJKTWerEjBP/7iH/GXP/yL9jLJkJKVlpTS0vf3AtZG3ryUpbqda0JlFoDh9fdIFZEOVkqx5GBsQjeqpUNfYrvDC2UTVDlNGpuzzFowm5nw+uM7jCGqYpT09Wzu4/oQc8Fzy5sLlVwNkaowZUIJBqtSNk8WWg18nbERWNKsSlkRBA5VeXlNF84hmiu+Clyt+Lb+rRY0rlabbb1PZ2ps8hiKqBsnsAJ/vbjVF+B2LysHjgrKSbOMThMe3r3D6XTCMi+aK6EIRrDNwZouVUd+6mVJaXihA4tN+e+yp9U5rXPUrqsAM3u5JwGLIAZGISDGgOJhCB2fW8fL4+I7T0rillbuZsnpSlWAm6yiuDJqYWghBOp4cDfmOifSKHNTYjSw2I+vCEGYEULAp/evMYyj7j/RWK5xjLi7v4fkhPl8xtN5QqTSHI0aA0D1KNo2Uhk354JpmjDuM3J2UHl5+XOtDwMBgN2OMe4ipnPCNAmkdLFnUpARMR3PePz2HR6+2Zk1TemQv1xsA4XAlsneC4ozPPlOys+bBb8XoI2gPqx9NXb/pbHm7rADAGkaew7RCjQyJDcHEwVNpjuj9v0lM1WtlgaaehydHXJpQqEUQZaMYPXP5klTrQ+7wcztARzcFYGrpsWtSaBKkwFokesZBQPYUspTlWbYhi5i1grGRa9rOn1aFzb2Jujcynptiv0bQoC7LxSWWlRaQavPg828Ee0QAopk60+urH7Vr7zg/JRApwnDTot37/Z7cIiWbMZmnNWSqt8Z+OW1kNkDgN4K1b9S4VE7FETUshWJNGutCDg3S5ATKM0wKtUVVZ9hgmEIYIudLBbnV+fXJogs9iES1NUUUi12dSVE0JcEKCLNakdNfBLb7+6PH4haWmQbmzjDMMudKzqyAKPNqfNctqyPpSvcqmdNVHAgIIQBKXEraC2C8+nUiEwnYYsRYy/n4IzU+VPda/VfH3Cxs6AMXmx/gYBpmrE73CkgJpMorCg2U5ekxPZtSQvSso74FIgmDaCus92v7WOpinGwgLKtIQQoGWmaNRPqvNh+WMAgLBjx5re/xtPwI9x/8m8wjnuM4w4AgaM+fzpPiMMAArAsM3jcd3N+KewWE+ze79p3u71EfH7fNVXguHHf9rdeOHrJu7f3X3vurXf0/++BTA/2+u++iyXzuXYLyP1zLKfva9d2xK0C4S/9HXh+T32IBe27uIhWUCjAPJ1xmia8un+NsMZs+PbttzhPZ0yF8Pruxzh8+hkABSIFBTlnvPnd7/Fqv7MkIgWSF+RlQZnOSBbv6jtDxNy7QWB+whgjPv3kEzhDDqwugjGagsVqvta42Sp4oyb4AFx47S0YBPLC6KL/shjQNLlGQR9jiB67W1qQWJ0nVB4MYOXGX5aE8/EE4oBXr1/hsN9hWZZ2LhoaWAnuFUR0MkITwq2wdZZai9PZlBZ0Frgrr5BaGSKzlmQIEUAyTyP1ysldYi59u9Jl3wPOr5o8Zud7W5y7R38GNOvJ6K6tPMHfYc9VHmiFwslDLsQyFRdM5xOOT084H484TzOWJVnyB8G43+HTj+4QnL9K61/tlaxpFtd1wzOtWx+bBx+nzoEN1xKRlNLxKkjN4K3rt3YJfa5J3VQ3frf17usL5nnBEiIwhgqQ/LfViDYgroFEPzRyMScijBxNZmdds2jF1SVnnKcJkhOm8wmnKeFujDW5YJW3/F0XWAFV4fLq9RP2hyfkTTnaD239fRoTl7vYuO5HjiBSK1xaNAOkKoLYakgavfBEQYZZ2EBrmTNKmSE5X5Xn+/a9AG2AH8BeQAWUMF5GPORckJYEZjO9CzXB3mPJdBegUDBBk6/XUrOYMoGm38+VTKEeLGa1tokIEFwg1vfEEGvmqFrzC0X7hHYYqZROgyVIueB4OoIEiDFi3I1gYowDW9FqPwB6yOtCimpHmhbDyNnGAuIuEC0VrVuozEJGSth0fGU1Xk9Bz6IaC7XTaL81Q60e5hIC8gL0WT9dy4ICzOcJaVatyW6/w7AbAKirJUGBhoCQs9L2cT8gRiDnZK6nFfe6N6itmFS3xEZQOsCQpc4N4ESRkJi6jGCNwZIBuAjPEKWZlZSouMZWwUque0ypnTPgECLG/Q5Ukj3FE517B1a7HdIlc1EiznX6t8Sx/6sKEkyI1g/n7dvDXplpd6h69sNBNTvjEI1p1Z1Sa6b1Wrbe9bGUXM34TIQIYGmvbtfXs+JWPgL5dhHBPE34+s0bdTv20gjGiB0Q66V6HqQUzPOCYdSCuRpQ3rsQXjnjRKCBQcgocOCrew9VccDY7/cQZJTzgoACjvd4fYggRAzDj3D/1z+xovUK4nNeEArjeHzA6emIEAcQM4YY8IMf/Ah9AeS+9S5nHwIA1u5r299uty0wayBn/e+HgLcteLr1O7COWbvFjnpw1gt02354u9XX59jdc79/6Py/5NprwOY7yg7fuR+33t+SXW1oxjPP3rpPfrCrpFl7eg8Hb6UU5DQj5YzD4YDXd/cQCJYyQ4rg8XTC21//Z/zyH/5P7F/f4ZuvvsSbb7/FvMyQMCKXM86LWPZkQfaU5ZboIZeMh4cHBW0+hhAQY6iWmSUp9e9Tu6txpymqnG/08xncFbAOqwnjfYIQwIROKchF61DqpY2uekK2Gktlz1QlEmMcRwwWFtKepz3x1XRrUuMVjbfAwagrw6CgpJiikEQgrLJQywAmVXnIgVXeoaJhC6FlQWTJKM5jCoCcASu1REKVmV9LMrJqfbr/vphYF35S578DMwQAlgnS54yDyn95meFKx+U84enxiOl8xjTNmsxLgDhGfPLpZ/jsL36EPD11mFqVnKAmK6YiN+RJ75f+h6zPpTJ3l9m2xoR2Y8mWKq3W/ZCuKLNUmcjH71QzGBj0fq09y3Txe6ynif/IgJklQxP10NF4eIsErkpnf8Z1GnGRmd3iswSoSeWYCAMPGHa7Ku+nLJA0Iy0J52XG+XRCmiZQjMAYq/IDYhlVHbgbs9BzY/KcyQ6H/RP2+xOOp9LJMMAwDoCkVdHslzRmQhwHDeMpUFdt9z4K4+paEYBj0DqzlvXTQXdNxlaAPC9mbc/1nDGzevc90743oK01cyfr65N0bF9MyMrZTbBq0l6mGSRR3b6MWGlRR52AggKmgGwpk7fZiwSa+cgVEyJW6ytoys6qFaEGlpi0QGJhttpoBiAs61NAR/dW4EK/K1NCSgsWZhyPR4TAeH13wDAMADEkBMTNOZAiWOYFjkyVZrfNp19r3Y+cNE5rToJADVhpEUmuzKqPkepTsRIRYmQgZ+RipQE4QKsuEoYA7EYFoGleurINBgpccBcB5QWYijFnLW4gonRdxCwsHDHev0LKCWU+IacFRbJaHAHkwoicQcTKKH08ohmd0LkLVkuPEVkt0s1GqNyaxRaonYEiNVEK+SbQ7VhT0rYkJM58VNh4engExwEhEsbIVbtzXfz0dyQwjd2eapuDzR3I94xncIQzVbBVCqK6lz0pSKnvsELcrNbflQ7U6mnkLJp9FRYkW0SZrqSVllcJiwBSzNdeWzYzfoYKNnnDJMTAf4CAYjA3HdS50/cWPH79FnF31r1migQXeIjEhBsNqhdmnKcThvF11bT2wmRBF3fQnTlnOJra2l182/IQaVKRaXrCuLtHSgs4TzieGX/3d3+F/f4e43/1U8zDHUpJmBcg5wWDRIS4x+5QsN/fYRx2iDFiy9gqI73YDTbH/Rm+csUWsPVXCK4/s/+9F2y2wOh9rOva73Lls5Oqsvl96+xxDQjK5rfteK65Z14bcx8bdw1IXvtev1uD5/7va5+/r+05oLjdN+vP5iVgT7jlFtnPxeU+Xb/94vcOQChpbs90QPTR/R5x3CE8/grzH97g6eGIX37xG/z+i98gPh1BgXH86mucHp4QEHC3v8N5OuL87qRa7GGAq0SyELjWfxQUO4VOmlXbrcq1lJLxQnIk4+yj9tOBjCp8WqIxcoGBPEGaCZlMq/lwAIiiViwvEQDAlG/qCdInvyA0N7dgnhNLWjR2nAek6VS9PgQwV/qm5GUyodnlE1E5p/KJQAglIOeEDM00SEVQWC2BZLnuiWAeLKbYY+Cjjz/SZ82Lupp2e4xJNrvB3PSsX15DFwS1AqaisWvP1Gfbtm26e5BmVNb+qXyjmfkKJCUDPh4jDwxMWExOJFblOQW1PiYQtGK67wDfOaRCOAGSBaU0Fa0Ns52RPseBK9CpW9lOq9zTpt7t28fHzFikWWFMxKxAH0ANpdDnyOp+f1GveM3ksmn/MoFnuNQHmex9UVDS16Apxy9aQ5Z1dKzSApY5AzKj5AVzyjifzzidTpBlATEwhIj9EDQZjLlq8srKytVYoPGrUvsmIliWYhjB3YZF6w5LRil21pgwjAQOA+bzjJz7/aqyqbcYgdFC8odRgeEiY5MHASBPAAGlvIKUgiXnmgjQDSrNwJHME0o7PQwRcTcixvje+PHvDWhTILG1HDlYa0IrSLM58mKHLYQWp9Jp8FgYhQQsuQp2AlSNmfsNEwwRd1YF7ZAmtSipoCDUheEOnKmwzYjB65/495YRr3NZaBtYPyrDIFAmrQWWgZQYX08LKBB244j9/gAMQVP2h17/p64KmgQhIaV2cJW+FwT2g1egscaWVMSIpY+jEXgy31+qR4tNstYCyQHCglwEHPR9kRkihCESYowoRTPh5FQqAwyBkK2YM7O7WTJSKkhLQRZ3YQXyMmFZRoQYEQ6vEfKCvMxIy4yyLB7gpivZgwciBZNoDLn6PHdrWkQ0g2LxddJUrCLqihioXa5Ys9PowImnWQj9INpzl9OEN+cvsb/bYdzvEIcB7lYKmFaLsGZjHQODaZ0U7BAsWk5/L11G055x2jjdldfRn7tbsmcx3FBVPw/MtncJ4CXpeyrAWfczxtDOTGUUBoQscQx3zxexQqEEK26vwflNO2bH2c96KcYgUJlb3O1x9+oVjo9PgLNNYqRlUWHsitaWDEi378za2K7QfciNMenaq/9+GPeYl4TBYjFSXvAPv/hH/O1Pfor9J4+IH9+Z66pmqxuHAeMwIsRPdH9cgRK3AFN1Gaq/X+d+1wBbz+Rl8ze6v73dytC4IXkXf19rt2DLLXDm92wBWf++7TO38sH2Wdeuea5v1/r352rPgcF/ybYFaA0EPT93tLm+/217/a13XbcQuz9GU1BsQRyRxn3WeBFybwXBiAnTr/4e355ncCn43R//gK9//wUWIex3I2g3ala4pCVkKGgs6p4Z092E5elonRMwErgj/zllLNPSBGexJFJBEx6UrEKw8jRNTFW4xS1XHVZQwTiY50ypmakb74F5uRj6rYKy17/0Gauu3z4XQOURNcGWKyKF1WWPADaLfoiEslCVkKQ0S0MfgNfHFSntWAu/ZJQ7kAISEUHJLYkURfVSapYUwWBxztN5wjzNiGhlAUAuh1CrwQqj60Z3dW+YqoAI5Nrp9wC2614oquyLHhsEoOSMaZrw9PiA8zTj/u6A/TjArV8xRtCBkFLGAi3XE6p1U4XpnKn3ztR6fKKKazbFvdhGco8m39MX8V/SYgmrQuBqDSCbx44HCwgZhMhA8VANOBZqJ5qIEIQscKWBzH7OiohmPneR2e4jUwgM+wH3hzuEOIAlA55+/gZxa66tHVezve+K6HoFR4yHiJITjtMZS86Yp0ktTiUhDhHDfkQcBnAIFgfvStzWAR83cQNuPfEiStjtTk2+MkVLjAExAEkCgq0XMYHl+bpode7sOaUUk2FVEJQyAzRoJ3hEjBkhzJjOjQA1a5/2mQgIUWX7YTAlbymQxQ0yt9v3A7RdypVVGydeBbldqtYr2xSSE7JYYUha76zKxHqCuElJChTNUmPvIyO0rhUjIhSjJzXdNzuR1s3EvO18cyeone5e6RbtMA5aSDCxjsMGJhk4pwlpXvD69R3iMNg49BkFmi2QI4FDRCyLJpnoTOWecZItw6KOm2q/iDyGkKolxwm6By5nIUvvSyZvM4gFw26nRSBZNZcplxpTNIwD4iCYp7Rat1KkBViKxn0RihIGHlFSRiLTeHZTGcYDwrhHTBPCeUZJblYudRzKJDp9V6VXVIGTwLUdOkfUbSsBNK2uaAbPQoRoewPuBlGBmq+jLaoxVw4MRsH56YT5eELcjXCLkV9PhBpb0D/LrZHVslZT22qGzgpAXFuDTcCzSwLOLHoCxlyDn5tmkkGkNfmCJdoh9mBw+3/Xp17Qgpg1q+5pqa4Pa9/3piQJpPvINcmVyRhwHMYIgWc0E7N+evruqH3UK5SZLjOWnADECzcbB3GCxjhKUR/zbLJMYyLkiwNmwul4RNzvgQGaDABATmec3gFfff0t/urfRhwOe4QwYIyjxaKGOv39vr0Ghj5UqL/lDrn9fF3Ibr+79am/tv+94PZzgGZBuwawbgGWa0Chn6P+vdt+X+vHtX5fm9PtOry03QLMPVhxkH0JYJ6/7xIcNcDzXSx36z75O9q7nmP5z/32kti4a325PXfX3k8oWUtk5JowouDp29/hD//5f8a3X/4Jb958hWVJCAB2w4Bd3GG3HzEvC5bTGcLupmeeJjEgDBExxu7dJrAJICljOp9x2O3XhFeA1/f3OBwOalm3bLtJ1KLfj6p6IdphV27Tu8iRKW30pepNZlZ9o9u9+/oqcQe1f3IuSClj4E3qb1F6FccRQwiYpwWRd6gZ+8TDCdwSaM8k1jpkJmOJhQeY+0G1oImVLfAbIzGSAFwMVFkai1KKFiRfMqZ5wnw8qyVjHOEwrFl7/KU+bz1Psd0vdteNrXcrVMDPud5WlKeZ8nCaZ5yOR5yORyzThEKMw2Ff3ytZNBuyCfJFFCQRMaiWtLHEZKRJSQSqTG21zUyZveW7/vlyJKsEME0+3JweQa33qo/WuqeRNM7wMhnImuJl6ufM+HDHzrVuqlnTcl73gYBxf4dXn3yGyITp+IA0pfaai9a/e80VyOSvYrw/C7AsC5ZlxjTNSGnWfAGBsR8DYhgQLD9Fzb2gKdm7vAWocobv2S0zkiK4v5vw13/zDd6+kwqOmNXYk3LCMmtcGjMwjBE8MIYBICqmIF8Ptuq9rRi5yzcxZIAjRA5ISWVa5Bmff37CT37yiF/+4pM676XWOgYgJvcMWrYJOWv5jaBJWv7/kT1SoLW4SFbAi6CFDSvOMs2PlILX4a9xxC9U20aWiEQC+tNfugxuAtMc2SzWrS/AKtDUpAm3bJvixJUjKzGqj7m50GSLHZkbxKiIqFVqyQAJKEZEAUrKBgKCaoNqPJ0SBdeUaPZGnYseiHotjiyCaUoQKZqp0QRtZwRKcDwjkWmNVi6pNhlWhNkBCIegGXSmhDAMWJYFBC1UKla42835ECNU1LRr2Zgmkbu3as0ckgV5SXh6mxGGPQ6vdohhQM5JnxcGHO4GlDxiCmecjycvOqDZL6nFTfn8+4Fp26dpqTTCSS9yG2YpUrNylozq4kKAZvGCEcQKmCzZh9GOaIk0Si5YpgUUAgbzya/xEEQIxDUDlv9WOkarH2yeBJa5UfsvrtnVhaugVedUk8v0rrpOcLgyk0qB6pwhKIEUKpWIu8az+mJ3c+igsU824n+7y8sqS1YwImyizpbtxDiowsKZjPVxSTNCMp8EEVMgCJAz5mnBsFOQns2PwZME2ayigngC8rSA4w7ISX3Iu8ItRIQsBeCgCUXGARgC5uMJd3cjlpTw5Zdf4dPf/j/46Ac/xrjbgTvaspG92nM34+yXdy10+Pe3BeDtM/tRAtcT+Ny6b/u5v+fa/d7PrevhFsz1z9p+ls11/b+3vn+u3Rpj/75rv7/vGe3axjfW91wHbv2171/DFRf5TsCt3X/5zD9nqwLsM+O+vOdy7lbOckQokrEUYHr4Fn/65d/jj//0M7z7wxc4nycQAfvdiBj32O2iJs3iiGR1IM/TCcfjI86no6ZNl1xTzpeqebf445IxzwuSBfj3PSLrSy2dYq4WHksUuJEJr4bqswAor2Vyzwa/Dg2U9BY3MSAnAohTQl7V7IIIspUAoNCATMM8VHlO8RPWHS4hlaP8PiKATBDUWnLr2PMm8JhiC85HpfJul7lgYGCeZ0zThLRksGRE1qRm7sLuLxZ/Rwe6qh22qJu91+iklC2t//rkVmU7rrcV/ZCC8+mEk5WOmc5aPzcVQbC4NgdWbAloYH0IXMxFkmsBbZ9vsbUjBztwq1rXj42Q10CT81mTKbrx0RXhMCNXxS7BeHZJFSxLffY17tEA+2Vf9PpewctE6n1ihJwIECKEuDPXUqDWN17tUVT+72NsskB/ulQhMy1JSyksE1LSsIvIjHEcMIS4Kl0E8jj45kEjuDLXvi7tW2wNNu4FNezUuhqCDZUYzJq5PETW74URBj3HAYy05Co3afgOVHnMNp82p644KkVphvBY5yCEgGGMWse3oCay8xp0xZL2RG5gjVzBvTUCbdr3A7RZ6wk7oQGsKpp0GrI9fYqHrNoljkowSlLpmckLKa8JKooH2wKleK0wYFVfol97IlDoxUztYwChpt0xwka9L7Y9g6GeeLeWYElJC+uJAQYm7Mexpi4OIajVoVoEWiC05Kxp6alP1ABjKPo554RlNiIeFFS4UC1AS6EP01ia9gkG3ohIwYVrCou6XcYQsHABDxHpfEZekrnahQ5PEDxPP7EAxAgMiycSLOLMpR1+kYI0a0Dqcn5CHEaMd69qLRki0kDjYQ8O6jpZTFuzM2Ls24S6tbwQnsVjjPQAFlJXWM86WWPXqnXSQK1YMhLQinCr+wqs0LYReLfqCZoWzjoW9gcMgRDHqHFgFuR7DeALADBr2RsW22NifUHl2zoU05Ya/+szZa5n2pwvqSUJqXEZKAg5K0E3Qi5Emrq6bvtO6LT9j0pkAbfiEoCBgCEE28Pc4dFcQSNHRl40YNetfpqDVYl2tqB7MYBNgTHNM+K466ynqPXStOua+wzie64AKDhPGefzjDHu6hiYgDEwjtMCogHLUpDSCa9fH/Dw7Vvs719jSQt++Q//BR+9fo3P/5v/HjQ0d9wOx7d56dZvy2Jl9f0aqPV/++f+0VtWfY22XAMnPci6dk0P+giXroRbYLV99vb96523fkYfn+bjKbico/5Z197TP8PveY7dbcn7S9p1y9rzT7kF9F5y70v79Odst8CY7z3p3smVw6wBWd+viz1t9DSLpqdPX/0Sj79/xG9+/wXk+ITT8QHzdMbucI9hfw9GwW43ouSkZXSg0boimhRqOp5wfPeAh3cPcFVCKRrr60K189Q0zapYFEut3QYHsAlea1ZuQExQoAKeYZh6nUl/q9nzz7kIRmpK0H6pKhWuz1lbxQBgiAG7IVa3SPH4OKCmofcEEX4q3G0LgnWtWqAqMmu/sTlLpMrLMA54dTjg8d1brfaSpWW9BJBKQZ4XnJek6ehjxGgeQCpI29y7Yt0JPTNyKqgpFl2mc4XXonKMZyP2/bJKotHtwR7GNRAD5DTh7TdvcZoTprOueckZHAOG3QEhhma9orZbPbsnw+Pv+7cSvPZfKdnyE6zdDbtpvdFuU6XODwcAYVn06mJ8LjBjsUQ6FUAyN5fFK28W8Wvbe+uUc7un31OtAHsASMMlcg3ostn3x1X+38Dadj60FeQ54fR0wrIsCATsYwDvBsRAFr5jzzdZs/a4Z6ZdqIPGXLqc0nWvm+ppSUg0IwswDKSKHvOGSouCNWZNekcGkgVapzgtCV7sm6i5QxJpCJCIYJkWc3MO5gEEaDhShghD8gSiiDAE7Pe7arzIOaMULeRelpbILZiFzeWwluf7dvtegbZ+BRTNM2oApIMSEwhrnaRckESwLFkJs2XBYSEgFkjRNOOBWpUKqhkR/J81cb2UHMyNLJf1hrKLV/VmrDUh6FJUcAsTLFDSNWgOluKgFjaPS/NsUu7mKKVYhkmYhrE7qIbG3Nri9NFDhlzIZpvLOlRmyKLByBoTFGwcpCFg5Mkt1iKmEjaFtVpvQkEwc0YpCTXDDgSgANjGBdwyopa80geBSkHJwJxPmM8nUBhw/9FrZeRey8bipYZAqmEUTU6z6h/RhaDKTCAD985YvNC3uO+CyFrb4WAfaFi92yMVk0Fj3iyGu1t3qXOt2cEW0LBDHEYwFU1Ksom/c2KGItVHnUmQpGl/e4EW3qftVrb6H/2jBTBNLlVi4Sn2AUbmTsUMtWRmEa2z4oy1i6rYVwAAIABJREFUFESL7/P4coJYILy5CoSg36cEigzmqPuegmo0a5C7neksVZjxszowI4dGHHWAbEBuHezuY8mS1RIGT2qk6jKNVcmYz2cs+z3AQA5ksa3AXQw450WLjgbG27cP+IvPP8ebb95hGAdMpyf8+r/83/jkP/x3WuKDNdbDBdlbovQWjFwDJrfb81DkpeL7c9f1FrMtoNre3wt8twDkLeCF7vst2Or70P/+HANb7f3N8/r3bsHin7u9xAL15wBr/5LNx6COcG3ut3tAuuuB6+ANaN4Bxc6oFMHT+RFvv/gZfv6//0/49o/fYLdM2H/6OcIYMYw79RLJCcMwYtztMJ3OWIrgeD5iSTNiAQgZWQrOs1rQAgEpJ+V53JLUC8TioRMggjBoNrwinh3SLSc6qNEsPQSz5tfPbmnqIUOFXzXmyXmDXqAz6X+rt4LObTHezYSqNPbZOx6PePPN17X8TO9B0yvgWjzV+hopBYEISwEKdwrwKrSTeRFRdaUUIhwOd/jB53dYpjNODw+YSkIMWjC7kIALgYtgWRaMzObGFlZhIaKdhJ8yTQii/wukArdbUqSXO4bQzr/IVZdpvU+zgRZQrY8lpZPHhJALUFLSGnJJuWbggGEY1AvGhKGq4HYXReZaC7XYvNQyApYunAg1gyMzq6uidMCrJzYmj7WmX6o7bWvbZHjRjA8ZJg+aT67YehJktd7+Yuk8cYDmvtevvVs1AaCIrotb5mrylkgYBrPymXzVvaZ7x/Xxdculz2bgMFpCkWhz3XldbRPJ9ICtj1f3PRFc2KrMQbr3AUsu4CD4H/+HL/HwOKiHT9I4Vs9QHgIhJUEIGTxEcJCacCREjbfjVssdIkAcg3msqQJZ90ZGySPGnbo5gvS8GMAAhQFhHDSuv2juhMzQWnzB0ioQmUxOFQMIMa7AifU+ef7nf71WY34uTqysPlbTLLfkIICARG0nuQiQtbg0srIgd4lTFzUj2bpD0DQHsnqbgOBFn8n9XGtX6Mbmba34nncB1GmL2P0iVVtHRGZGVQ1W4FgDQzezBEcbOWfNmMnRgEPXD9GEI1LWYorYRvEr2cYfLNCHmMFihFHUsqaH2uYVnmDC+6a1bjQ+zQR/Cw7lwCi5okSArJgjqbuAd1djqSICk6X6v0xnIDnh6dtvcCTC7tVrBGYUO2nZx1sKWFizhUqHXDYSoogBAXM3UHCqSdh7Bl01q6Z17YPa/TATecygvqgPQvbabm5OF5trATCfzjifFtynghAJwzDo3Ph+dkkCOvfsDLdr1aLUrafOJzWNsH93Q2KvxIJZgZS/um5WHWwRwWKxFn2bL553+cU4DBjGAAGjSAbQgofJBIjC6tbJ5PDHBcYMkfbO5hIkkLQg5+Q9rusEcXteUxaIFAxpRpJXIHJfcgVcrg1flgQmYB8CUlaVZwwBX775WhMCQTODklmje6bi89YLsltAswYSLS1zHznzoRaUl4G+9/++vW4L3m69qwdjvUDv12/BGV5wLXV/97/f6uu1598CgM+1567ZWkK37V8LkL3PWrfdP7esZ8+1HrBt2/V1WPfJz2CBCr5zmvF4esLbP/waX/zTL/D1z/8XcMrY7+/A4yvMWXCQUul+IUYqQDqfMZ2OePv2HabjI6yKDna7vRZCPh9RckLKGfM0Y9zvwdGACATLnJBmA2yWrKzRVzJFnI05BOzuDphPU+WlNY5GB1lHWkqLYScisCVvKmiui1v3Ob2WUTwBQZUFjKfaS0KMGHf7mp6+vnsjqDObxwwarW/AEqsyAx4OIYKW0t/4lwDY7wYQRTwdjzg+PmApBY0jAsHKKREThlHjtHve0xK0WScc3JqCjSRXkLsCKZ3AToBpldFcStHJNDxoLb0QkeYFKSeVA+1w1xijccBoiVQmMQf5GCAlI6WMGByuW+08UZdYklBj8rV11E9KDUmQep+Bv22GzA8gA1X+6b6LpLJcILXyFtH3evK4Js1L+4ekhbXQ+oliWaKl5+e+25gsvtFyIwwD7u/vcbfbqSwtsgp32bZt3Nzl+BSgK6Dp+QjVu1YW1yvv8O/a3kLdYyZ0VkygeycgxgH7/YDj0ZQuTChJ93wI6sqYkhbzpjlj2EUwCpbF9nG47EeaF0jKGgcpBnKJQbGNRYExoUiEOpx1e4hUcc5QXACvDAY3MpFf2e55pn1vQBsATPI1dvI5mO/rYvZ7o8+QmJdU/bcLBeQiyMsCDgViAckhRgRuIMAzRzrwIwdsbs0DuvmSatlyACWWPt3jgKSz4xCpP6xr6Z5rDjr9shp3Z0Q/BLJYOYKLIA7yqACpZKS0QGIEh2SWPnOZsx3e0r3n1Xvr4aGOCNTI4bLytVZtnqW1bRK9EubgG62zbtbgaQJxqgylCVgKVtX8Lxbsrc8LHDGIgqmUitXB0Pu8CGrOGTKfsAjUPdT2BTMQzcQspMRbtWWpcSfrh2ozTAvqB56oxm41n2W9o98rxabKA8ttm9QRUi/5idEUA63eDbfYpZTx9PadprcdB8QYVrtGLMSzqpFgDPhKjaO6vqAuSc9W7NW9H42ptug1HVtgrXFUfdMrVyFjpB4PuD0o+uw+XrASW4amgra55ZryuRfR102HqsHyFcBLWd3BxMjQWo0xRP3N/N/VbTSYC0NLapIsSyZZ8hCCBwYzOACn8wIpC+7v7xGC1oCjoGszzZNqenkACODHP4E+/6n1V6pWeb0WLuatARzVeb+9hv9ft35lZfP99m9vWwvdJnrm4tPaqew2ULjVBC2OeHPsVn271uf3AZJbsWn/GuDsOavd1u3wFojrr3vpfvJntHV8edyebFa4SEEqCW/f/BG///n/gT99+RXOX/4cNGfswBg/eo1xHFFSwdPpjKesSYBQiiVh0GQlKJqdsBRTnpKm6M5LRspF64CmpKCpA2FpSVimyQBbK2D79PSktdo+/bjbIIQYAyRllQ8gqMyh/teUk1JD2C+EzLbfOuscoElJ/AIDhEIqvwiAOMTKS/a7PT797DN88+YNSpqrMOjPrt47xliZCV7n113A+tgiKQQOTpNRZZkiWis1Z8HT6YzjwxNOpxNYVIEbmCyWWp8dghUKj6FapFZjr+Cq/VDsfJfKV6kKtgRUvlwsC3CvaWQOuoONpw/jAAFwPs/IKemTS+ctQyrDhcCIMSLHjBwzEjTMRMQSxlB/XpzvBRDlVYIsn69iLvwOJr2uWOXz0kSnnkGtvZ6uA5Ltvmk7zaU+qWUTinmIiHuaoAO2QM2iWPmNuKxnCmd/OgGesEWKprw/7A+4u7/HfndAGMa6l1ye9LFWY8mmv5ef+8GZQrhTljTxWjSTN9zKtDlP9W+3Krsct3kX+eMEQLA0+ppJ1E1oceAa6tIKY2u30pwQAoGr/CVISdTqCU3zP44DYBnks+QqBzkt0DJWBTl7zJy7Xbc1IGa1cnNBEndx1XdURRl5fO316fT2vQJtc3mLzDOGcld9DnSdmp86uemnA3CaojZr7Yqg2noV7qhu2SytpK1ulWAH3gOSrROdTClmKfHJLLRmgR5x0m8493Kj7hB7P/Xwox0Ga+wgAnqouKYRVvCmY9fTQ0TYDVGrxs8zhhAQd1ovou+HAziuaoo1oyWgcYEMR2io8U3aM6h2SWqpaIZASqqudJZpBO7i5mmC3QXDD6ozFCbrRwjgrK4iImrVBBiRGRwEIlEPQkr6LAJSB5iYNH7A0/S7P7DGPEVwGNRVZJ6B5Qw4yEMTTEQ88Uj9oqXldWLj7+xW3ZN6KGHvfuvXXJpgmbqvI2kMQQlWYiBnTKcJC3eunN1zBEBhLSrpmSwJxdLOhj6zs4J0qz9SGb1lMuVunjzQ2c8SeayjZWNS14kujtTN/hdNO8rcGHIDbsZ8fN/rTnI4BnidFauj6NptnziBPYvqoamlPQgFBUBaMsRC03zERQpyn6Y4oGnElwQJ6spQGFrkuxvXNGXcHTJmAUYrYbHkjBgi5pSQTyeUeQGevgQMtPXLdattAdv28/ZvWVGq68+6/msjXzds9DebT0VPBq81Qnd+Nn14Hxjq773Vttdv393LSGVzfd8f6X67Bupe0m6Bt3+J9tzeuOWCeA2s3frt1ve3xlZspVwd8b55c5tDKgnT7/4Bb775A371s5/h4cvfQ1LG4W6HYXfA6XwGDyOGccRSjljmM86nM05Px+o9Id5PF8qrwAbs9jvkpFa0xWqDKt/T3bDMC9I8o+SsNY/c0lQKTscTTscjPv3kYzgv1QQkjCCWBdYFVBdSZeMq7/0T1EQdOk/6uS9l4LuRiJA1LRpEBOdpQZ4TdruhPay3PFVivZ7jjrXUVdK6oy4LudLKC4FvYsNIXQg9duv09Ijz40O1QnGM5jkjmtiNTcFncUVeAsgVdDWBhLWUS1PmGv13ul7Is1saqKnzdoXGmQsmm/J9mRc8HU+YzhOGGDBaivQmM9k6+VpyQPSsg97Hjs+v3kkMojUlIQoVWFYwJMp3gNKAIuhShyoOIC7XDzUWsV/Qxh0ycv3OZa3z8QyBuvcyM8ZeDnb6RNrnq4DGZTrVetjrBBwiPv3hDzFEBaXjuKuJvHJekJYFuWaX3D60G+yN1se5kVmormWY1v5fgra20cUjbC7fK+06ldOlAn5mPUdpWurSX4u9K5YkhIMpcU1+iZEwDITdftT4twKklJBCWiWVK0tWt0nztmKWGrOnCQPXRqIMrTdXoLK+D6NQUUsrcDkXm/a9AW0q6ANcq0krW2YiTdbQEQH0i6xoACDzexa1zMQ4dPWZNNjSgUnVXpBaLkTTOplWwRkEmXaFEGYT/Bhw1773CSgucFcFkvonNP6TUyUkxa73g6gWIxVPsxQM7rpmQx6GQecla3ZNkLoA6MblGsTa3DicULuQ3KF5A4ZkRG5F3OyAV3cAgQVyanxfjIwU2Nww3dXOfNYtZolYNR5eaLJ3oyHSA52zZtFxlxqHeWpm9jgonchiACCXNndkjK+miBWNdQTU2krjPSQVyHQGlhnIpQNrVO3vfQp4bA94xy2doft+bAKytIVHJUn1NzJwQ8bwfY1SNvegXilAqIZQtyCRNNcctvkswuuuClArb9Z1NMhERjixaVXAUKbChFrXzJ9SE5bYPgBQAbp+1nnz10Znlu56a2uloJjMbcaObke8+7GklPDweMK//5u/1aGJRpDp8SzIaYbgrsZfqra5sxCYggTE4I9foSzSnQtluAoj1VIcWTM7HY8zcBiw2+3AIMzzjHGIKAI8LSfkXBAsHfQ2LkFn8rYAfq09d00vqL0EOvTAq2/vE7pv3YfN99vnbPnprfeo6qxXCLVx3brnGijcbvVr77x+XTvvcqUP+pkuvtM7Xwb1eoBzCxx9V2vdd7n/ljXsqqD8zO5aqzvb81bPF8FpOuNPv/h7fPuz/wtPj18hLxn7IWASAQ8jCAWBBFwyMjGeThNKWlCs3I3G+8rqmd0L9Z9qBWixUwXK2nJSIKeJsZQmlKLJS2qcNrlKlyCWGIWYzGWRTIiXlVWrzxjs7+y9LcgUq9rnLgZOxIQ0aFIlERzPM1Ip2N8dsN/vkOe1k3mVcUwxTWapE/++/uv7AVawe2sJQe2zu4mhMLgk5cuSUeYCYsYYGYDSslJUKZmz1qJyzw0x/iowxV7tAtWSLxAT0CnDXccJhIDcpU1Bx1ubXAcKRt8JCBHjbsA8LTgejzgdn3A6Tygp49X9HcYYwCFUntmvD3PQmrYhaNkWzypdmY17zzjvgTJaKgADS5rx+PgOQZbKK7Wos7l7buZYyuZEXixB+6Iv+UDkYSnr/c6mJDUMgDRpFkyOjN1hD46sbnr1Zf3bTY4h1O/99e6FxAaWMgpSDtjtIgbLXrgsGo6QpxPO5zOIgCFGE714865bHKk7F76+/ZyR0ckqY0ndE6twA7HEIy48dK+rawlgSRnzkjAvGcO4R9hbrLk/Wy5FuWvN5UVvITCGXUQcRxQJIEqgIWCMmoayFM3+PheraSgLMgJiZHz51Sv86pcfKRA0Och9hcjlXrfIeTZ3eU8gW9e+N6ANdVB95zXzIBUlBA4MnNACqCAOAMIQMI4BIaorVCFCrwqhYJkTTGiusio6MAXVSmjhTO2NCtiE5KJNtwe3R0b7uN3g0D5GI8QCIESzqikwdZO1WgHaHERSYs/BzfSwYEYNooQIct1wZCmFjYjDiltn1UI4a+7Zt6a+1557CmNNx6+jU1pMteC8FyEnBJCENv8VTLNWsQ9NAA+VwEIPZ9EF4BiwGwc8nTKOpzNCjIijEVsIKKqbWk5igMCyZXZxTspAjBGLoCyzklfWWIV5UQYS44B4eAXaFWA4oZzPyLMGNBfRwqIlC8LQ1pJhJQrqzNkrjYkFaXGSG2XsSuCobtLi39taGEELBhrc7E6mkizOMWEgzSNkob8FzmBqFqjKClfMvduCm+9W8QjBnutMGh3Qsj9a4LklzKFL0l2FFD+nhAriRVzI97tsvB1hF7iRXZnXu2/e4vSjM17dH+wNxYAWzG0qg0OEx7L0x46h8WcMAoYBc04YuOLITuxBDUoXKCMuuSClBSEM2A1AwQIaP8bD4yN+98cv8OOf/LfYDeOLwdS2XXOn7CjIld++23te0q6N4dq7NrzzJmi61q5Z527fd+XM3biyp2cbPXZ37zWot/6Lrvzmp6pRzvXzLq2jvcvnGmT578+5P16OSbo5lot9sb5nTQO293yotfCy/+2/Aeot0D+/iCClBYcf/DUO6RHTwz1SSvjm6zf4+u0JJKqAHMcBUhLy4zukaQJANalFLlRBAWBAwfjt1j2rcjJS9yVmYD7PKGmp7knKEjT+O5B6JTgfa+tkCYvMusImIrgFbwvYvF91NvwarNfNG/scFa1BmnPG3at73L+6B3JqsXPdu6rlCFAXMhFIlo3Hh/WfFG9UrkIaO9x4AePu9SvsxgHvvv4WGZrNkYyXxBAgBAMCUhXVLkSrowO5tq0J3E7XobRTLI15n/Iql86CZcpl31Emviqo4oBAhJwXraeZM96+PeP4+IDT6Yx5mpHSAuIB9691Xd113pWvVIU55ScBgkxQJTsMaJsy05XCRBrDRFnlRAZDlow/fvFbfPbZx9iNLgyYx5bkain013nyLbciPWslqZYu44sVWPnqqTeRqxV8/xYpkCSQlEGDJ1Tx8a4lui119XAet5v7O4cw4G4fTO7JWKYJS0pIacEyTZhzxmHcIToYX2GKnvL7/m/8HB3Aq1ZjoO6b/u8eUPUyAIDVXjLxDiWra++8JJzOs9YMnNX69fqjAfeWhZotdn2/F8yzJhF5aav0Q0wmpAyw731ASNd8WTKWBZq8BAUxEJgHQDQesQ+PIxFLYseqBOgExmZvs1l9D8r8foA2Ut/udN5BBg3w9NUUP/i+0BDsxhFhvENYDJV7LJVoAWchQeACKowhSCOq2WuRdIxTur8FANQtAKQFDau7Y2iWN88gWLXskYFUKmG78Lvq/6rvVpSd7Qc1Aesf7l8tbpZBB9icaHQuojXhhb3JLRw9AykFq3RVbV90hMQARalpJpUIFzAg6qVObH7P0L71QZRE1MAauX+umY6doJEWoXbw5xY098tO54w4RnDwjJQBJXkEllTfZAeWTiaUoelYHh+PKEWLgMdhAMeInBNSMrAx7rVg9zwjLbPVxikokTU7pQgocNXkwTS1PlZN1ELIBHARJBjAqpoA1PVTq67ND2tNN69VJ0LVQuXjdw2u7xEQIZj//hADmK22iRF0B4ECJWhqUHZrZ9t7vuuqa+5qr9lVld5LX+VM94HTGT8P9Ze29oDqJUBaQkE8MyUsdgFiGST9LnNlZtV0duHzdb0///TTLjur7TViBNaznlIx1wa0NMawrFPZ1izoE7lkFBngNQ91LFx1DsZJFUCL4DQtYGTc3+/BYEzTCYfDDr/69S9x9+N/wg9/8u+eZ9I32ksscXTl32vg6p/bmrhwu63FgMvPzz//EuBce/6HPrsXEfzzddfLlzPr0oGQ5nSG+vflk9eW3a0160OsYtt5MvFz1f/+mmuukXT1rpc3X6GC59csX+kTM+PV/WvEOCJO/wb0+oDz+YSnpxOGcFbloYyIXn+MGXh4AgG1lEySrAlHOqFQgCYLiKzmmgOr10kIWKZZ02iHgCGoVSFnjY1m0oQTzTOgCfrOx5V3KfjQ39ZgrCrPRGOy++/rbFGjkeQ5Zb0GLGt8FgdVVLp3Cjq6058Ef7+6a27WoQeJXRxxcaDVPWfY73AfRxyPj5pluYi6xCGixsCbRY/YlLjOI4xvqQWIagkmMf6om0WqTGXhXyvrhodBlwIEat4kFLSkUYgBcdiDpOB0zCh5wXyc8PD2Hc7TCfNkSaHG0ZLNRKyoYFFhuMpFTssdfTv3s8QVLjN4qx4z/fSCVOaptMBOXI3Z930JrKCFy3LS1qT9QPBaf+2b9TiWecbD4yOYA/ZjhO/WJWn84243ABjqOy5jzEw+rVk1ba1YQEKAtJq+TATkBfOiWUHn6YxlmTTZmAjCOMJjst6XzVC7IdXaWrUIdY6u3ePyEXT/OWCT9W7PprxeckZOBdM0Y05ZaxxbuSxi1vwVofOow0X+nhe3UgTLnLEQAGjMWwxkhhIbVtHYNy01QgBHxDGiyIAvv7zf6nlM+YKaO8EHSYB5EmoI0kvEie8FaCsL4/Tla+R5RBkCMKIKjddqqHEA7sY9dvcHLNMMAZmpWbPDcClgCfU4uXFj7X/c3P1WTI4ApwICXZytFWXVbqDifvJ7gcuvDgZUHAhWIu2E3wR2uiKKVOFUnEB0faD2myb0EPcKWPVnvTuo3qqMrBszEdgSQQgFCHsBU65l6dzdzAlyfbbAAobJBCxqbokNbfmgavFCdgZnhCdfaDpLHXuLc5JK5ABBSQlTSpiZEYYBMUbE/aEGpQPAuNshjiOWecZyPNa51Trv5iNPqNnv++76tJXqUmnM1bEWUDVcxYLE/d8eIPXPY4sPbILAep1KyUgFONwdFASJA2TvwsUmh1PcGkNgi+yWpVqnxvecEV/ebBrymDtCtUATESI2dNkYpmJY2/FWf02g6fedyKtrUx+UZ8TWmCQT40c/+pG6lcIBlrk+EEyLniBFGUzg0NWX6QVLtSeXnBFgWcOgwH8pOobm5qpJB5yVn6cFu/0AkGDkgLwsyE9nTG++AH7y7y6Zb52G50AKbf5uvd2eyi2oElzGq72vbcHN9p3YfH7fM17+3sunXlqnrgilL3r2ZTM7zQrU9GLTc2N8H0C7fd/1az/0e29qLdyu1uUzbgE3bdfsbref0/7+sL5efaYIpGiSoMenR0zLAq1HRMhLPdkgaGa/nBYMgRHv9ureaAmYLldPR0XQORrGAeNuByLSIspJY3lD1Ni0UjSjsGMiwErSUPfcTqiutTg3E7H1TAimDF0J5g5ygPXuFnG/8fo9m8fBMI4oy1xT4V+edFqBs75VS88GVBA8PF0HomVwCt68/ROmadGSRdVzw85JZ90jA2xq9ZSqgCvQQsw1cMX4umemBIAszSXfwZNPsUYzU50F4ohht8MQdT1SmnA6nyBJ+0eecEPUuybEiN1uh2EcLzJqi/PfztKlXikMsmQb3gctRdTRQDKe6+h99eAOeULBQzOINXlJ+nV7hkC6Al7lWuqeYZ9FcHp4xJMIOAz47PNPDZ6pMtbXcv2OzX4B1FW1o6i+F/xcioi5vRa8e3jAsszIy2KhF4zdMGCIXuxZroznkkqs2rXxX/muKZy7+em+LwKknLGkgnnWuns5FZsLWMK+YAWuNaszsUa7i3B9JxMQgomhH0TOBGlJ1ULHDBBnCw/S87Ys5qLNASHqvpuniN9+8RGG0f0qFKpXCqBC10quJ6CG+NQYkmfa9wK0SWYsT3tUKxT1m9quMTZUBJjngmArEDwGjjSrlPowBwQm80uzzQCAgtVa659rwK0J26XVovLr3N3x1qKnAgkalOgp8IX6vrd/FWM117iegjSrlSZOIRMsi1TvtTovIPW11tJizspaB73fLVAbraYMWWyV+9OSZccMESTZCJtagpSfiA1f3TQYZFrDNpEN8HlclAnam0kQuGbTugwnjmS4T7UlQQMIQZkq+KnMVbiro+JuR9rPDKi21i1Q2YSIaQKfz4jjiN3dHUKMWofGizeaxrGIlktYkmokYwWiNg/GpHuZyh0u6vJ0kjHJep2LSAW9QGOECVgRsTpbNiYRda+Z04w8T9jfDTgc7lW7WgSxKQbrPLtfd9Vu2nM0EUkDZRrv1gM0D6bTaxwg+z5i0SyUAtR6I6G0QWucYQEhaLkC8T3Z3gcDc4Q+jkV/Dx5gz4wvfvc7/NVP/8p+zagxI3DrYq5bP1vspLsSEVvhUCeQuWiNPI/LYEIg62evoSNCNnlLoJk+52XCq7s9AkVQLshTwrZtXcqutfdZnFzY7mkGXXz+cJDTb8vrgOf2b00s8f9uQcFlf14q8PfP9SetLZG3wcT6rbSaq/XzfQx+3+35u9Xv0r3zQ1wNn7OOXWtrjvX+5z73nAbeLp9/zToIuO37/c09U/xZIoJcMuY//RxP3/wegURL0hjNLrlo7ayoJVmkwJIkAbvDXms0mmX+quxbvyTEcdSMgiKaAj4p33VQlosKXEqA1a0tsgtRqHS5B2/VxR6otbrW7ph6XakgrUs6gka7iwASYC7+aEwXzvOo1nhECM1drwrXbX7VOHLptePn0eOUfCEcIyYRoBQ8vn2Hp4d3gNVvg/GinNWy6bwv54JgBa4hgiwak6yp5HWuCqlMo1HFAFlpnQpemVBSAcLa3X1Fd0QQwoBxv8cwqHL0+PiE89MTpjlhv99jHLQ+7TBElFzAIWhduBgrsG5Z3nxx2onU2r4ag+SL7Ym2xAGatJg2v79vree2JlW+yPXSKhfZglTXP2C1b3wGXAZwDgFI5y2jV2Zzn40jmozRr7ugZp9Ch7OMAAAgAElEQVQm9Ay/Ubnt2S1FrDSB8rElZUzLhDIvyJIwcsQuRsSoCVw4Bng93mat66f6Gf7VGQxaWs1+LtcWQClFvZZIj4ha07LxW4tPzbnKeswMHqImqyO27JqElDLyPGMYF0jYa1I7UotjLoIhqFJ6mtdj+ZBWCiB5AUoChXH1W4ythMT/+r/9Zf1+7XkBwxUK4zI0JMdDcNh+F7pY9ov2vQBtICAOWsuJKMJoWz3wpaIdVOFcU/2KuhpgxLgbsbimJQRk6t27gD4FestACJDjONM0rcofGm1YrhQ01J/bYaGs9ct6FlmNSNtV6BZGnKhvpDMGLFkKgNA7jkHT5gu6NPw6Po3x8YyMze9bRPkD0EACQSxQ1wkhIKJ2j1q8nPRdZIRd7xdNLBLsaWwDpGAxRCbWkLpTSM4ABwvGRj1MILRSAkYUA2uSiqo9IUJmrnFlPhgSNSUnZ7LiyVw0zkrrcbT6fA7gchLktGA6PiHGiPHuDnHcVbO+B3RDgNO5oOwC9p++Qk4LsCSU0tK9woSwKrRst4egvj8Yc9OsVMr4ywaoaPdpIzJRZwFShiQlI+WEt9/MeHh3xhCDxpgUz2pp4quNify5vh494KSLHqzmuf83BEISL0RNKEEL2ZJtZvKMjaQZMikEyxqmxdlLKZrVE6VmHmtxAJ0wWXSz6jbS2n0pLxjF3EIg0AprRYu2pgTJ0qzP1f0j1OsFASOAE2vhepmKZnMjTeTSB/bbrrZDY1XUBJCsNEcDiLXgZilFXTc357vTKeMlTVafBa7bLmZRdndcolDLa2zZ9ZUVvABpAlxl9dd6Sbjubljqb+te94D1paBmCyi8T73Y3v++jVlb39ufQbF9Qpt+Xl55q22tWSTmPknvB19b8L4FYZeW1nZ9uvJsTwTyIWDRn7ehJu8d+YXQt3l3faKodSWXjKUsWM4zztMZ07dvkM4nrXeYFiRLNHJ8elJQlRm7oAL/YbcHiDFPS0v9DlRAVDtUh908UpZ5wjIvkFxa8gnnKVktbBrPXUBxwHjYIy+pjdAEZlQgsBLVlRZ1fCcIdG08G2UH6lZWNwJKMSWYCxmdC59daHRP4x1uKj88NgMKMjJgrvLKdcbQ3IS8/qnKSyrFLNOCwGqV9Ix3Hr+Wc6tN6oWUHQ8VQOusIgOkcT1sAC7UJGnrXU0FmszM2KMUARW1ilC1nunc5ZzxdDzj+PiI43nCfDpDiDGMo1pJoM+KwwDJCZGDKXNJE7Nck6s6wMRECK6oJBU8xORBgsWvUwNTJKtN1j2z8SYiufTUAlXZok6GXbPO8Oj7xE6gabhXYqXJCP5MQUsp729z6x+h1cNd7xu3cLaOKlhLOJ4mTNMZUhYECtiNI4Y4IjoYBtV92ca+3t86lJ5q9+Nr/+rW14Q+1VMOZWXt8+RAKRetAZsL0rxgXtTtETYXHCOGMVreh3ZmUs44zRPKotG1xKGGzkBC7XdKggTCfh8AFkzn/J2BG0AAD5A8AxRAHDAMnqEdOJ8jjk+DVvPavEOYqjVNw0aURuXu0T6/7wu5+H6ANmi6bWKtrSXeLVLiAVhQqxgahQqwrw47hHHEN7+7A0BgLtXlkF08kZbpibqUwkABKHYnvwcG9nISqx7fC2KAqdK6W8wET4QawNQ/qxeOjVk4oKnnoHIKfz11gYyCyjYJWkU9G2CoZ7mJZrmoJSXEgDwn5KIlK5jXTNwFGxfuGZaIwuYkEpk2w4AcrMi2tIgnBRVk4pEYXTYrGVBj69zk7VkTW2ICFcCZAVgdFPVucGe1fo6VsAiaiyPq/FmaYxAOhx12uwHzvGBZsjKonrgCWJYFy9u3atoeBmVIHrRsSlABkIUQxjtgUMCWlwlYFluoy/gRQTuwngJfTARs8oJnckQV0rbuAt2wu4cLYgiAFEQm5FLU5ShsM4J2STVERWoQVVBXze+r/cvY0gr/2eMeuEvnH4oVULcvyOqcuFAvZLX4bD5IVHNbvMSG2FHpa98AStAss6vOE+E3v/41/sN//K+RRdfYM2AFm4OVtls3MnLJVjpDH54jg2I0y7qvHSGwWu9qpk538xEFwcEKEzkhfTxOAD/g/DQh5YQhxLqml3PXucO+oKn1IuFe3mBYHjGdTyi54DEH/CndQeIdPn51j9eHV/Ci3FuW2a/bVhSUzXV05R5va3DUA47boOd97pCXvbruxteLgz1YWJd2v3zi9r3XANv72hZoydXvL+ftUpRZX39DLN+85XrjK3dtQdRLAN16Hbf7dTvi2+9WYKCFs5+ejng8HzEyA2nC6eFblOWMmYD54R3yMuN8Oqm7XSCMQTM4ZiFMSfPDESXzzLgxD7ZpPUujZIt9c6WYZ2izekmAlz2hym+CgZ7+Dc7T94cDjsej8XAAm7UDgMW/8yy/pnhrMm53HskBv/FYM9lXeGbue93QUGmFCauuZKurUzasoBQUCAIRZgAsajVLSa1BEerREqMmOHO3zlJEa+ExWaFtS+9vwBLMiKTyFrMWJvY4HC0po32ECaGVD4vBGGrySKH1OSkAKC94evgWD09nTOcJ86SJw/Z3d/XaDJhLbdH4es/46woyy/atjiBUAU9v3dM1YZv/oLW2rgFz3VgX66ifreeSUTYp9Ylue7G12K5OqOt6VlV6HQEWf3cX114V6r5uFa2iA2x+tz25V3jA1qQswDJhCIz9/g4hqmeJ1hJjvA8kNCDXv2t7jWdQ9bFJ/VdMOe81eCVnzCnjPM3IOev/k2UchWXVNGt0iFqHVbK6XGeLZ1M+rtb1ENWFNoYBlEVlc6K63jkXzLNgGBm7XcA85yo3f0jTeMEESAbHiGjFtT1Xwn/6T3+pIJVIZXTDEGQ5IYRUzqAs3fkWBPMq0vPzfjr+vQFtul+7NEid9opYa4RURwGx/ww77O8P2B8OePz6VJ8DoJnSnVQ2CVS1RjBtfq0Rptf2wAr2uaYJr9Tg0mUBxF3wkyiyQrd5twPuMyvBiUjTMqnGxbpcXDBHmx82gSc742BY7j7ACUrZiC1tSPAATv+6EhIL4tJuB9SoE2piQsoFCdlqF7rAvB6j16Bw3W4RWlkGm2uFBRbXUgpORKmuvb/bmdiK8RJMs9bcRTgOGCNj2I1Y5gWzudDk1FvKYNbIDJnVz5sG2xOlACWhZK0RI4PFGnLAsLvDMKqbXZ7OKGnjJifd3qm0dBsHsRnE5s/ic24/FGq2B48VGwIjglGyMlt1k4Tui8DqDsOtIHY9N6t3d/3ZtGyghnQ71Ayqer1ZC41hepyioFlTfYy+/kyWwEBMMDICX0oBe7rvvluk4eAXxeql1VKhwJpuC67FNWu5bUavZacdCaq5hcUsMtn6S01U4u+mrh9rhqXM8nQ+q0BkGmC9tK3g1r6hJ3ul9lm1XrAZZMby9ku8PT0ChRAYmFLAnAh5JLy6O1QS0PH81bJu347u+v7z9ruXtcvN2+yKdPFd37Yqo5e0l7pZ9td+qEUKaBYlfc61UdpvV9T81962vZe+Y7/0WZfjep9FsyVVuT6DHzKvV/skgjQn5Le/Q3z6Vr0d5ic8fvlbHE9ngAUDM+bpjLTMABgcB43pkVJd9jK51Z0vx9PLutIJqsSqmCRCiGzGLDHrkcc5m9xmwI1jAOZ0oQEHCJ9/9jmWtKy+VRfvpoPtaaSTr2aNRQNbIGgMt9SsJAoomisjiwURELp43cs9RwaMXI/UW8k5iAqf8P5RFfyjATRXDFZxilXRVVJGCEGtHkQWRtOEfbfUFfI46xqcAlfIqvWQ4DXhdNgaMOa8zkupeB8U5Arm84TpdMY0LyAi7Hc73B12dd3UeYdXgndnpgQ8w6gLa5v10a3TrGSBA4YQUSuBit3nfcba+rIWnNnAoSle62asL7JnbgRu6j8IthnFqXsaqPdUAMRyM/QEWkFs6PIZNPm4p0BiRc3EYg6JAA4Rhzu1agemmtnZQd5VJvGCtn339iHF3BNzKpizJhFZ5gU5Lche5qgbm+cyqLw5JcznGbkkS0anHjVjCAhDUMA2DBgCg2NAGAOYgkWQkWZIpaRKlJKRZy1jcTgE5Awsy+2skppkpBuVCCDJlPw7lRdRkLJa1mOkphAnwy0EPbie6RRqGMkU4JZHEGpcpp+f9+G27wdokxYQGqIKQi6GNSHbBD6dEcQYsKSMr/9QgGnE3SvG48MZZEUsa9Com+bQwIq+8jprrgYZgiYh4Qa82lMctGltETV5olNP6zX9gVg9wX17OQBY9Hqn4iKqQawHQplQFXSpEWgXKp2oAFTroQXLVAUsNgasJTzvFPlblLkEm3txVYTLrGSuCSh4WhLSeQGD6uFxxtZKm5vAbsK5CuAK5IqbyTU6rs0OmXaUmxuBEJm7SUYRUXc0aUHXpcgq5NADjymoVjAOQBgG5JSxLDOWOWFZ1Px+7XAonzDr7jLj+O4thv0Bu8MBIp5ZjBHCgGEPTE9Hi1FTBh8tfsE1kKUUY1bcaaOaMBBsPX37MKmVtGci1b3QmL7zqiiEHJTZugtpyRnn4wkSBgz7HZaU6qbu6bM3KWZdxdrS53vCtake3F09eZlq3IafIuniIOpk2m9JOoWIPcXfViAt81kt5L3drP6Sbq1gLrUlIcYAkY1blY/Bz8NuRMnF4jXcatwR2P49/Zvdomp7WTh23WkMt0jBeZqQUkKRBuaZB7y6u69ZMK8J276yzIzDbsTHr3+Iw+6ALBG/OR3wY1EXMA5DFRS3LPMaUOg/O9Wq64W1Re2SGvr3tPn72hsu23OWnZe03j3Rn3fLQrT97n3v21rC/HM/P32rwqlf2yk1pPsOMNdR2vZT//td5qFxqpePa/3ef15br4H9KwI6fgl+91vg6cFckTLmecY3X/2/1L1NrC1Zlh70rbV3RJx773svsyozK6uc1VS52o0lN9hqM2isBgvJIzBSDyxZYgTIkif23J556ikSEpIHyHjCzwwjwcAgIVp2Iyg3YMoN3dXVld31kz+VnS/fuz/nRMTeezFYa+29I865772qtqUiUi/vvedE7Ni/a61v/X4KCgFvPXuC+7s7dd0OZCnUCSmpMoaCAQJX0D3eAQAeC03mNlyMVmvyDEmpJo8SEavTat4X5lbFu0NewTC5haJ9W9O4A/U9/aVslpsFhGyehJUumnWtWoKqKAnjuU5T3C0fdfN5FkagAZ6+n7lkc/RRulkTrlXaS7XfOatC0kGcxuoyRKts17GRdkTxGDT5WBHSGDwHdOJj7Oh+D142NWydJqKCSbZOURwwTrlm/R6GETEEcGj8Qt02A6pDryk+DdWYHNQFwNiRa9jOk3T1cKjfUK0WX+ut/26U3flmXRjs2rjATzfgrUdBe9rRvdnmue+LACikQGaaRpN5I7Z5ELo+V9BtT1N7JzNhGIbat0vWHJVNGhh+zOLj+4CYTCbxz+19orFp65qwrKulxV+RU6rhOzBlfIzRYjsZgdgscQn5tKKUUmVx9pppkTCGATEGnQsLowkE9egKKkVRNRCYD1gBCjO4qDs3scZwxquAnFtJAPeMAoC8c+sIQQu+q6wmECmW6IjhLqDixbFly++4yxtqW1JXxs9eF/P6/x9LG6mzXUYz75IEyzTHZsp0sVZMEB7wpfBL+Pj0h5CjZqiKAWbCpxbQ12XA0X0lhuTVJUCt3lQPSo/liJUQOiXwjUlgBVylfqgp/3fX5shK94E5R28Pt00EEbxux4YsdDTHAadbJ2IAUrKCmCDEAZizCxlioMemgkwYMlCmwNJLJqg7YJOXt2KTGChLa8GaBIGhhNZdII24VcZEmikMxgBdqAlQ5kEoyDnVOmm+cUUIwowYuG5q8vT4gFkDnfEpU8lQi6TyS41zKFFTPedcwCIYeQKHCOCEdfH4BgeJ/VIQND1uQV61LMB8d4vxasLh5gmGGJGz7kkxaVgEWLNmFwtsGTfFYgCMAexBkR/ekBKyWVKre2G9TzQdsyEuM6AhOCgR3w1tT+WcscyafOXqasQwjShFLVQc9E53ewUETGIxAFT7ti+c7QzQrWXkX/R9dVToQktnpfIkAaVIswCb+8rp/gGCghiizueGE1Adp4ZwuIDUCOyyrBiGATArc0Uo7LF0jFQKxvGAeUkg1roqPdcTF6NU8wDJflhs8A4qSQXAIpp4oUis1kWAsKYF//yf/R/46P/8H1Ey4er6Bt/8ha/im3/h1xGvrzHxaMlR+nd3vSDCOE24PlzjMB5wKhGhXG/ic/fiR7+n+p8dKdtce8ca2n1+Joi4gLn7/Jy9SNee/l/p+mVwtW9jf8cl6+WbWoiaU3JLXHN+TwNavXub98utKZefdgFWZ8bfU+PoNme90dQ9AKILP/vvevts/723dQ6o6aeeK+Dc0rjpJNqLe1fkvMxI8wlFBPO84P7+Di9fPMf93RFX1wc8//wLzdg6ThjGSS1qAFJW2uuFo/3Y1VfKrt+EKuC7JW2Ig5b1KKjx7YMl0xBhdde288qg6nnjgpHrI93lvkBj4UtWZUtVrNb5aTuJ0OLFW59hKfG3Y6lrz6Sx4B2VdZFgs6qCWnrHZQ8iWMgDIKXgdDwBAkSLi3HeIh3J8j6FwecBHkBeLQIkVMvErKUlxgistVu1OHhGgHp7BLRt7YrjQpYN0evBStvLgtyEfJ8X0RqzwzDoTaxp/4W4JlxrMdy6ZsxdWZbuFGxkKl/L8wWwSdGENO0+O7XVhR6Vn1EITRasyyLoXRKl9kGqfFMsZABQuZG6HAebLU2bH3Wx6t4kgeQV81JQEBBCafurf76THzeADcCZZc9lpougwAFue8R53aWQjVK0DppYhlYRYC0F66oK8WVJWJcFJXeWYLemmeLG31FKQTqeUFKp86tAjMBDxDgFDGzlIYIZCLglSiEbq9h4yeottjnR2FYRgReOoqxhWDCacjiESvBdCePDDqy0I2dBSjrmZOFHgMrLLiJEXrFgrCIIsxYHL7537WznTaZDqOtzKcDQqxEev34uQFvVlklGtjgROROqUA+ja69GHvHs+hnmNOG0JgUQzkcvEU5vyyNAO+LmmV1aqjt7klEPth7cuuVqA2R+q4W6VejSjrqQWzM9XbBStM71QkLrNNn4xYg5B+1bTlYHDYKSszJGIxjTFLEcW29rPCAa0dFX+g7V2iC1BpndXIlwLmrdgpVWYFQtI9n89ASFzcyth8E2so3D67ysa0bJwBi6zGGVQJtrLFCBmYPN3DGgIh4F2bRIRITj7T3m06zZqqJZuUQzUhE1lwh/JotUzaBqXRqxLVIwP5ywnGZM04ir6ysIR00ZIWJmcgAkePLkSg9yWpBSNiKszKR3kfRtmp1J+N8IICR4YLIL7GzasAItHOoL05Nhgmb+Kqwa2NP9Ax4eTnV+ITsxVHQ/kQk2yO7K2AgNUgaNsRPGpbrHkDFFF5DqGdioPf195JNptxTrL9Tldl7UBdN8gbJQB4iAd997H/f3d3i4v4cDqXVNQBZcX1+rssM3OlCZFJFa2hrD0jEMUf3bm6bfT50xeVipnxqDouOaDhOOyxGneUEIATHEOtYoCXff+y3I/IDjKUOWB5zeeYK77/0mhg/+LPjtr2AapjOB+zIoIHx4vDqjEf7U3kqG3d+XPtvJ4J2A1b/3Ul9ef8nmpzG1R+7t+7LvU8E5GPlZrr4fbiVjI1b78bjlTJ/YCX6vfEcDSA7YLlkInbB1LOHsJ3U/qX/Wrks9+mndHF93v5Lr83vOLJwlIS8POB6P+PzFc9ze3mG5e4nj/T1AgnmeISiIHBEhGMepzZBobLCW/Li81p4cQcTKzzChlIzDdAAHRjB3pLRmAwJsxZ6VL8YYLIadLKGQ1THrxuPKNAoR67wgrcli+EiFwG6TEjS+nXcEVwTVZY32kr4tpNNXD7HQOWa0iMHumaJWCrGY9QZENCbXrQw5Z5SsdLhUeooK3ACn35Zmnkwh6EAZHvNpyjxpvShFE5KJAGzGF18DCweu8hiboOvKRB2gYJPQjXSOCul7iFTZG6JaKTa1Qnsg5ePvZNyaDKQHW/1PG3fLNEmQwBWgeSKMZu5AlV+05jGZfNOnzrekI3VSt1erQdoSYGy3dJMT61890SPzEEIADxGBI9KaEMuCgQsKWU29vkXaDtzVgI3e7s6wdHO5ZyYV/W7nsfdaceVwzgXLuuI4rxZjtpgrp7SyBNDxsFunQqjNSilWMzchrbkqRzgwYhgQI2OIEUMMiDFY6QGfP7W6EZwfQxUcJiOWbjMo0G8zlnIdLAAtQSJJwKweAgRBCNLJR3qV5OBSP00lVqVtP2FEhF/7tR/hf//2v4L7u5s6f8myXDqcEY6AaJZrtOkFTJl+5mZ74fq5AG3SC+G2uZg8SBLQYTUiEDng2dNr5DIh3T9otpas6L+IZoWrJm1Uu5Ra2DYpF7E7XE2gbn1rf1eNFhrx0wQbZuK0zd8UIMoYVJb0g1FPKYBggi9VwOLdIIL6uhYFUT2zca0J7CC7x6xmqHLCsLXsXJj11iJhSwSVOsFfUw8EUY0faM862yETiCxDlVmvNC+J1vwIQWvnFbO0iFhbQt08e/YniwLqGS052CRLJuGfN4AgkAoYybJoHu8fcIIWcB+GWLP9eJtMSqx7//w4aOCyFKmmeiLCEDUV8XKaEcKCkgVpzUjJiqUnIAnhMB6AcQKnhHWZkURjJtitt90KKKNwwRE1gYcziWJgr+ziICshriO3swNziYgMlFyzo7q1TEEMtaybu70i+1+6tRSiWmSVRMAF5t6gRSLZYiUYVkQcpC6WRSxLmahPI1CZJwfGxIRMrUKVykaX9i9151HnMqcF85Jwfa1pwlyIkH3Num7Rcwn1XIl08ZSdJaHeLh1vg+D+7han289x//wTCN7V/V2A65AhP/ldpNM97h9WlDTjlAMebj/H3Yu38HT8PsI7X0OzAPlZPRPdAAAv0vDGAOpVJ/0x0NdTpP3n2+fbPO77fA65LvXtklvj+bu2gKXv2WPvPm+v/8b3UR/Z607Z+znp2wfOgctFILNjrv6eVwGjyzvaaKHRrrPzR6+a3cfec7kPj7//p4N/5fgF7j76XXz88Uf4/PYLxJQ18dOSkNcVKWtijPHJoHEgwVyiRekbh4DjskJrOjrfNQHIUr4LNHW/8m3VoH3w9a8D0IzPOakK0euEBvY4LW3D3f0ArvUbRbaCERGh5AX3L1+C7FkxoAOhVlbH+r2Zyyp02Wkm489V66iCWa0ttjmEYspeVEAgAJYl4fjwoNYoo7saLSGa7jxGtUz5+5hAxQAoOtmFmhAYmC3konNL9/U23hMI1b2ySkwu41dRqjJc9ZawJFtkfNJB+GZj2fiKTyJRjWEqqaCQJRpx7wNRi9Yll1kHUE15369FZxXqvmCYglyAxu20Ha40RqxZqqWaHPCquZIrmOvXvfbL1nq/r7rewSkbWQIVVUwGDOOgsYbkvh9mXYzmxUPqIkp0Tm/6tj2u3T9rfXHvtdLfDreitaQp53RVRJBzRkpagHv2NPzLijVl5GqV0p6HISCaRUwtjha6kdQYU3Kp5xhEmGJAiANiYIRxwBA8BCBYaBhVl1nUuekXCFWe8f2xDWXq18jXrd0bWMCWITWXiLu7bwBScDh8DOYHk68tOQoBCcHowRlw0IsDfuVXPsVv/MafrLIYjDa05G/GCw1ASy5VyeD75o8N2ojoPwfw7wP4VET+NfvsywD+awDfBPAhgL8qIs9J3/afAPj3ADwA+I9E5Lde9w7A3KYMrfdXUwI0NltEE3PMy4qX9/dV8AoxgsytDVAaJcLV3CkC1bqJB2q2ySeCJnKo6qptHxxtC0sFlpVIBpP8k/azpkWwDSQETRjilkDrjMaaUYe6mzBPOinwjQ8DRc0drgM91o9oPrdEapY9W0sf1pkUR0ociNrGlwIRMxurn6jSanu+4p7u0Augrnyd25yUYrHLVFO9EzMoZ2VItoFrHSRrpxjw6pOP6Kt6J0wnv74Q7afWkWtuaAK1RFKMZmlrY5cNAdBfGRoLgKB7RZNm5o3fs3Y0V5ACqF92SStkiMrMecDhekAaV8g6Iy1r13Opa19jCCFAR2B1jjoA0f1CtqY7Rx3tCRkQjQERqjlcszLZUlS7hBAgQQWaXgvt7bvLILG5xkA1rcUzIZFZAY1os5WbABFCVocitdjqqnlwP7o921wOsYlx8bErAw8VWHEr+gZmKDA9zsjHB+QpYBhHkBXghDThNRfFit6Gj22TYAgdgCN7B5rArnSEQHnFi09/gPV/+0fI4YAkmlvuG3/iK3jvyQE5Z0hOJigVfPrJJ7i9u8dXfnHC019ccTW18qriexPV4ab25SfLWPfu/nqdJeoxKEXdT9l9/hiI2Vigdm3q/tz2+1Ibl4Db5bv7Wdi36UC3d8PE5ve9e/FFKCI4q0H/ptce2PVuyXtXy0vvPTvPHUg7B7bWnqDSqMe63a/V3gXzsavtv9dfG6WOKP24e/mAkjLeOlzjxRdf4PRwQlpWPe/MCNMIHgYMw6hKPOPvApi7FCP1iQBMkAlRXczXNVlqc30uGN2WXLTIfRFEA30q4LknwlYNQkSacRIdeyebNSlIJy0urEk6kgItnxVp/N7LlPTd1XvMukC0eacLwgTUmF2nfTWLLrQkTH1S0TssbLZ+ViUV0lACikGTPFSZRU9HHa/3oQMTBM3A6rKD81pVsFkqfMsJkKHlc7LY98WsdU4fBSgoWFed13GIrUyQ75edjNHTHrKQDKrWS5sBn7fuXykqO1HflmX4bbF22mbjWrCxA803U9PQwxKoCTd+LyaDMHHNxuwvZKdeRA04bCjU9lS+Suh2xV+2EJ5oLr2usCHWemlBJXl9P0NBKQEFlmSspxodUKwuhps+OTNtf/qZ8rWqmZbtuzVlzKcZp2XGshSktNZcC37WgGKJ0BR4hxA1o3QRrOtiVk2YfCQqi0yDATsNf9E4PV27ENUbplpeqUfde+EAACAASURBVIXE1BXdgZoaTtWBasB59ZayuezmSnsmQRwiYhzw4x/9Odze/wmEyDjMP0HgW7z91m91B6Xt+3171lWViy2ZCqw/YSOp2j2BasxclX0ugv3L15tY2v4+gP8UwD/oPvvbAP4nEfm7RPS37e+/BeDfBfBL9u9XAfxn9vM1Vydw1wXrhXibEav1IQCODzNuX97i9vkLRCZwZMRxwOHqGtM4QUA4WSV1PRAaEAhRgsmgFtPmhxtQQmCSSNvHJuR1nF7MDFUPc/a+ub5E3QuLD8cAm1FwlJRBZEHEIsZqLaU5qSk/y96y0mIABMbIq0bRtRDq30uylUwubgXef6q5WQua0MzWJ7GiahLYKaR3yeisFUpmMqIC08SRMiVqCScIqAczsMaCocv+BxdkSGOhmKEJO30cVBPyIDiDsnljP+jU+qg0jkAcQJFV+9YdDtdyO/MEqz81mZbIzfs5GSCyRCmlAhDTDhbdAfNdAqQgTlfK/sxUHw9PkMotZF18Adp+6061ZhFKKnxVhtOOhvN+ou267nQNbe3Jp6O5AJ5OCzIIa8o4XI2qOXU8BO+P7ktOQBli9Q1XsMOIuYHLbBoxFp2XlQmDWxQ75UjfpzbiTgtoP4g1y+P8cMRgtfR8itQ1VycgEFuQcQZLwXw8YTyMQKENM2Ly8ei7c8mN4PsZ9rMKMgVHgwhkkg4bkMvrCZ/94Ls4nrT8g3DA+vHvYPnGt7As9yiWjU7TDQPCCfn2U5QXPwK+8i00EUvbzyXXmm9SMrYpnc+vc/fKLTjqv5eLu6Jva3v5dtw/tdmmO1i1B0d78HCp/5fvfRz8nVkkxUGHoIop1PXpgnWsOqR136l7k7e97df+s/MYOBMOLCPpfggbOnNhRs766GewgrQt0+9jz3aP2H3SKbfO98el536ay+cq5wyOAYfDhHWZEeKAEAgIjEIagzUdJsSgdZZcK1qyYM0FgbfFmQGlndEE5j7xRckeX0bKg2zOma10Ckgz5oaAnLMZ86m5S5LHULfNUWOASPlCCKxh6VZyRPK2zLgrmOrf4uCbtrRsJ3TpsG3PsJVEFi1b4smj+vdo2n2y50qVicRkhFIEa844nRas84xxjEBRcdUBrnueaAFf5xfN+iKeBcveTEXqfSANQwi2j0AqmAYTvwQu1Gv8UikFIUZkCCJ54R9tLKUCTKhjNBLqb22osdJm+2MD9lDH3nhYk57ZkkpJ6b2OsKHnvrd8LRyU6rx0rpj2X04rioyAyT4wC6wHydSO+ZqZi9927f33Rkm1KU22kVKCJsow9zkIKATjAdQsTUQAicoQIXSKgUYVpFo2qL3b5nTDR3aPElCzTHo7YkaP+Tjj5e2DJjIjBSJxDNXQkVMyRYQKYyUnzEkLmruqXF1gI6YwmBVOgZ3KdGZRNMX6Boh1BeqEWp1aX9cGoHuLYn3g4vr7Z+MIjNdXGIeIcVDPoADC8uEHJm8GLOv7KPkdLAvhK+/+0zp5Yn0TNz5Y+8xkMqKtc84qJ1O0dbAyRtqJ6lUIKC5QYNuFG73mei1oE5H/hYi+ufv41wH8O/b7fwHgf4aCtl8H8A9E3/y/EtHbRPQ1EfnoVe8IQ8DN0xukZVHNORqiFXFh2TdfgZSAtK6AZFxfjTgcDhjGEfFwwP06Y11X3Nw8weHJE5RlQU7rGUMtEARQK9Le7fdziYU2G1t/Fni5RspZCSY6jRtRxUyV/YppggOBhTU70lk9MmNUUrpjSdZnbO6q/aFtC4yCQmbBs0POwUBeJ/jw2Thh2i9ts8CZPzBYnNEqgstZUl1oR+sPMygn25Tq8+3LUOo8djPUrQOLZopUkZiq4KwFKglEuVptPCaSs1XiiRFMQeOYmEDFg1TPXS4A0kNg37mrYrLA2Bg63+iSLfhdZ4aJkT1lMCJiZDAKci443t4Cd3c4XF8reLNsS1IEKauGNKFAxoCRlY1sxGBBS73VH/Kur4JWu4q4ATL4o84DKxozYccImuSM9Zhxun9Q/3Pu011shXRmjV9guJYNyB4nZ/dGGOGHIIogBbc/ksUYkLkwyzbLpL+jSIulg7omDVWa0M88saq0zQOXBqhkjEQ4PZwQmJGKZpEj00jHEJBtSoMJasuSUKAZPqcnN1jnI7gbeQ8lfOeUIshZkNeEsio4K5Tx/PmKu/vfw7KsVZPp9afieFArQMkqjHZuHO6GchgZXz8c8WLp3/444HrMAtcDOF/FLX3xSdsDsfa9u0Q7bD1//rwfLubKRXjy6uvcQrR9vl8ND10su/urUL4TsPtrX7vNQchF10hrx93u92Cs7ASiKhSjc3/dAag30aT2fdg8azPQi2zU3e97dLv+2zb9/XuguhecHrsIlr2QLCFADMAKjOOAI5nCjpqr/jAMtbakCoUrSlox5xnMhFQSotHXEBhDYcwpa3wQs1rICLU4c8oJY3QXd6ryqScnECZT8LlQpeCjJkWgLUjazE+RJlxJ2ymuCe8VR0Q4W9NNLJMRX1cGlk5B6opLYq4//bkwTJiub4B1Vndze2MBUFLBaX5AyVqvahxiVdj6lvWEId7HUhmBXkyElTyGTaxQUEEAV96Q7WEhtY6KZaISEaRSMC9rzWZ8dXONZ09vsCwLljWheklYxspC3MCUQD0x6oS3n1U86xK8bBQeHNBMHd3OJ9Xo9tavnsMTAKSMZT1hCF3MGQSQUJX2JB2N6xbZ+xUIVVNclXu1P6+6toIlEVlxc+OdHdCuViYThIgAjgHMg7rFBta6oZUKeGd7gNj/3I/H/nRlqoWplFIAS88/p1yTiYC14LmXDpCi7sppTepuLH1YkylROGCYBgyRMcRBLWoxmhxlrsIdmeGutMP+XAqa1a2nTLodG1DeyjdWGkCyyg8jI8QJ4zRgGrUfCK0PLEBaFoR4AperTj5kpHyDzAMgWYErFXDoXDZV1ITvuPs7wT/+jfdBnBEwINj5ckMEACBrTeLSD6gf8x6EXrh+1pi29zsg9jGA9+33DwD8oLvvh/bZGWgjor8O4K8DwPXNNb7ywddQ1oS3v/Y+jl88bA6vPVHBAEHrItwcJozThEUIp5e3+NI44Us3T5GWBXAtexxRWN0myCQ2Z/oA9NB1BLVqxXrkj+7o+SayeCltglRj40zC7nPBUs82o3CpbTihFssOIjXZjzNQ7xCAja5BPxMDLO5r7QwKkrUvVejvwZEKU+w5hTu3TKV9xeKVlBhVzRcECW4LU+vWWYIBak4JTGKBoGSMicCw9LDdIXMi1W9Rgse32Ua32Eam0oFF/cX/JhOONAGrAzzb/D3NBEyI1n6JaXCr777XDqlAWvsSIneN6Prqo0YgiBBI3fgciLvG6nR3DxyPOBxGTNOkdTpKQc5AseyTVzfXKEUBQJkXAGvddALa7E9fZzJrajFiWrXr5Gtg6+3rLgKNHWxZ0QIThhBRcqoaxXoy6isbw3ZRlHIryAqb58Kt0HbMBcnKQIhrKosG+ortVWGuZ7yIWRw7t1pIMU06WeFum3tRSyTZGvsOpZIhq1oLx0DKWNYMvmIghh0h9Fg2tRiq0EBYlhXjqO6NlNzNxLecCmSa+VRUqHP3XdOclyJY5lOnHBD7vkDSjDLcAE+/2k6VCAoKUta0yGVgBZCiSpufDvb0ozsX3HvQ2V/9O5wEKSA6vx7rT1+AefuG1wEx2fT1Td7n+/zSVWfsNRO3Z4yPulBWutKZ+V047kFcN+geuPVt17HuLHVvchXrR9NW2I9Kvi/Pcf8uV6bgFfO3v86sihVIFnjYfymCXGYAGp6wrAtoGCBLQhiiKYMYa8pYlxlrSlqna101bpkJcZoQYsDh+hp3L28BqBLD63XqpVrxP/zDH+BP/eK34Nr2GINaLayffu7IQAeTgjmNO2dQbEWWgVLdIpXVEAoCag0yb0ekgpS6Bc6UfzZnnjlXya0qan3+ilrXBACFCLKU6b0r4XvvfBmSV7x4viDUwsT6/XyatVB2YEzjoEWI7XLXzerlApjXDFpSESYrm2PZlo0/MNRN1ULoW3wbN2BSBDjNK+ZFvUSGw4T333sb11dXmtp9noFuf+eksUKeRVmEQKSZijMxmDKcPQuo3bMTdQgm6JvrvU8V2RrUUBgNOtKxV2W1XqlknOYVYYKFRujaEeUqQwixAn5rFyJWFkGTjVEnjza5r9HW2l/qpUVBnwyDLf5Q5QS3tDUAVmWmEFUJQYSrw2i8iuvZPScdj5/nnhah63cpWrM25YJlTUjLiixAssQiIpoDgEDVMqgxpDqmOAwKqDw/gRVjn65GTMNgSUgC2MoUBLqgGGK3baIC1U3f0azHPS3yf0UItfYeYJkfC0AZwxjx1rNrddskU+aI0lLJBdniJqVoMrz3v/qb+MEP/lLdN8KkCeeuJoCAz35yhZScQ+qclpTUImrj+O3fftfWVt1FhbwW9G5Q/cpJp4t4w+uPnYhERIQei/579XN/D8DfA4Avv/uOQhNLFDGHiFKUCHRPbJ5nIYRhwLxm3N3eYTmdcJwzvvrVdzFd3WBdV4wkWv29AOu6NK0qUYuteQPe6ZmI+gQWtTtiwiOhujrqZTUcHLxRT23QYrX64GCT1JURK8VX5mSktwNyNYOtaY36RVclH6sfuO2aUoAQtjoZD8oHNDHFAqrZnWBgkP3AkFsOO2ZFbOCITZpygtKIpj9bHIiBarzQxvmTHBKiEnPuhQ5Sgq5ZKKXex0AFfsHXyA41qqXN/Z6hTNoCixsTIIhkS0SCmgkR8PocqqHrtT4OgpSAqBWy5KatIxA4RISgVp51XtWFlkgDsIutvTFEjhMiR3AYMU4LTsfZgumzgTLbPPpy0yjZXiCLvSoNkFXBwrI5CjXg5S46qh0DAgWNxbPYPcCAIDVNL9l6lQ5Ekq2ba42qgBBY48oICO5+4XV/un3eNmzZ/O10ru7jDUVrTM73gACQEGq/ci4IAkTRc0Gs2SN7n3d37dB9ofE108DqmgggjoMFoC91/KhMx+demnKJqAIKhjI8zfBWUAphteQMHrxfRAX/LBlrSpiXFfdYccQD1nXFNKqjUaMMlwDXZbJ7WZy8/Gx/7x6obQHX48/L7t43hSOP9fOShehSH36Wy8GLA6hLoLEKOpXGnwM97Ldkx5w3Vj6ycdJuXJtfm0DzJqBuL4Rt+n2B+9c2nb7CacQr7t0JexV8FgEjqFtRDJiPD/jJJ8+thhG01IoIwhARKSBwQC6aiODh7ogiCTmL1X8sLZaGlM6OVweAqMbZBotZq7EjFoNKxrMCE7KfzW7OmVk/J0aIAeM0oph1oI9rcaDRW6sA1ARYTYuv9/MrJCxXNHoM3GZfSEf1WLPlzsuCvFFOi5VrmbVMjdF6iM5XJEIYhybcGyht23RrYxKgAk+VEZymu7XClL0iliC3Jb3yz31PqO5JvQQON1e4eXoNoYCX9w+Y5wV5TcaaCMliiiV3limfDjEQWunpbg6ZNdNyHcFWsNnMfK+syL3jMLWvSUFzNj7fgI+fhWIAbneka2hKQaixMh0YJJj7LXCGNC+eqyagm9NSjb8lWHIWKxQvbLGb8JwGgHqYieZUuEgb9ujA39voWC4FKRfklGsNtZTUalbMYssc1GOIuSpNURIygGEcMNRkOBougVwgOdd6tDEGxGFQudtEBQWglval6/ur+ITv796t18dT6dxesSkFy3rCsiwaGzio+ymRmCJUwWouBdlLdIkX+Q71vS4TTIcBbz8b8fFHT/Dh997DurisTljXjONpBceAadKakCEUDGOCwLNVX6DJO/rR/+mKlX9ZlrZP3O2RiL4G4FP7/EcAfqG77+v22SuvkhOWh3uEOGCdV5SUVCh1t4moliyCasuGEHA4HLBKQRwnAPcopeB0/4DbF3egMCCXjNNxtqLIqp0XKMIOuzxQ/cSdTZh97haUdth18bwdT4zXODftXKBQz7xIJyBJAhC2IMfFIH+34rdqWTGKoaCu8zMvm8QlnRbvfDhnl4imxS0m3BdRMKIEq70DTvy8NdK+iw+unz8/dCakss2Pg6LslAxk4EW1gCJqdanMSKBuIEzqv+/aFulcGoprvknvvSiomGWk9jNX4U1ELSgssLg4gdfxIKtDR5Y32TW4PlSVIwSCoIzNCK6YS56b06t/PBR0MREkF6R1RjDnf45RBZ7DiLKsOB3nbh7cbVYZSM1WJwbKHayKxw+IIyu4RqvUz51diNbhQb/3fe76pdTnN6mFLbNHH0vgwE21uYJioEsTBAU7AyYEkKYA9zFccjfRfeOf+Ub0vSFVEKKUPXAFHFmLegIYpQCFMC8rQhy85+rLb4qU6tJmypmStHAmDwE8DCgoTQizPuVSGteFy2cKBO0UGgHW85FSwXr/EsvzH6Nc/ynNqEeaNS2OEUyMgKQ+/rt049r+qyHLq9zi+usxEHj+uVz8fAvWGgDatnX+2+v63Pfj0lNOR/hMQHr8ciVdzzi9dXdtdJpGnUKuxfycA7BLwKzfG+cd7+aMaDNvj43xUTD7GnXs/vv9uPea7j2Y6+/r27oEojVTMCOlhPm04OpA4KBHMC0J08011IVSQdvD3QOWdcEwDCDOKEk2SlCnSTEGyKCWBvd80Fkr1g9VOxHrmoUQwDmrNavS9eZuFgatCTZNAx4eTshJ40VFLHbOeSvO5ClLqNKE9EtzvZcXGt01cNmRLr+1rCtePP8j5FQQTSFpjcEtPD1oYmtrHIfd+qL1ny0df/UUcYGjKTo9eVQWTx9ve43aONwaEdCy8FY+FxhB2SLSuuBuzRbfrTe5q5xaFJ0y157o2OFZms352s6zvsNkD5ib5saDyQft7yjVo8XHqSDK49/MetoOsp1RG3OL0+hxmH4cAAy10A4yuMblt3XW/a9tei1hP7tbGtrWy2rKSuNb0azR1S3P52H3XCkZp2XFNESMtP2uvWvLNJy2rUmfTSlhmResKVclCBOBOGAIZGn6A4YhYoyq2A5MWA9RE86wWti0zE2AiJWnSAm5eMkpL1vQXB/3Lo5nZ2YHyJoF8vz+vdW/rYfJYZbgp+SiSXq4oKSM1RKpCDRpDtnc5JoBtc6aNagxuVdXB9zdvYNhuMYwtFuOJ81YzYERhmgW7oRQvLyEbdVXZLPsryqSvcH1s4K2fwjgPwTwd+3nf9t9/jeJ6L+CJiB58bp4NkDNmvcvbiEiuJ4OIIoQK9a3vQhk1iUhQggRNzc3OEwj1pRwOi5Yy4IvPv8MgGavuskJcRgh0GB/1TY1SspEymlEWgyY4SZnacwBV09vIJFQTouddwcNBq7MZ1+JUr8IJhCaW5rvtdARE6kwyES/AnCACsW5SYaejlb680kaNLyKxkoVySaP67YPUQsD9oTJL3YLDp8LQu1MdMyJGVS62u7SzNbbqJfuXeQEyUQVsdguoj2JOesgMyMQKyOnYrVrtFyvAhibjyKq9bGJb2DKGblNnh1OZcTSrXF3GwpYTONIClzZanRtNPDkcXlmhbVtlEtz6xPJWFZgHELHMH2sbBa/jPn+DhSOmA5XoKjuEKBg1rHTZj1c7NtMsjHtnigW0VgK8WACVHmmpmauIXMd064CpmXnAoDMGvcnABJhUzuNclHXkdji4Yq1WbXjdZLd7bTLLGnMGVVZ0ZiNBI0RhOWu1NeKCnCNB1s3BZrslEBMGOB6WnO3TSec5qNqCwNhhjMJtcx1s6eaxkAQq3t4NR4wz4vOje2dtM5Y1lWtq+5/ZKSDRMyazFU4CJHVRXK51z1l+4lsLhnA1XSFaVzwwMcqwL9KiO+vN3F521rOmjDfqkU1MLFPi+/f+GeNZr3Zux/rT99Wfddjlgw0JZBTFAdxe3DSz1sfe1bb7oRpUNtzfs8euOyI1flYXgGo6tkqxnvEPuuWdb82b7TmcvmZS89XCixtfvpnuZ7NBnLP7nOgYgq1XAQlrUqfAXgdrJQzrmNUpQcD66LuVSp8Fi3o3I9dTJHHRm8D14LOKJqBLSPVFOPH0xGffvopqmVrIyTrgrp72TAO6hLHWvfJPRdqojMoa6hCbKfpFjZ+TS0ZQt1nzt9NFjG/GmwKMHcgQXZ7KwIYRs0wXMEZNDpaqD68E3Abb3N9JzfGVUUXn2ciU3oxQMWz95p7ZCktSx+rnJGKui9W90nWn5Goxj+BPMRas/8Ra0p6sXqkACGOA/LarGzKL7a8SGCK9JoZUtRbxWQKVQy4tcuFfrU4qmaw0WvdkwyuceGELr8I3FGpkGfIdE+rUsETS9HC3sOIZ1dPMZK6dgIFnozf1169X5vl0sfZrDT1zd1+8LOpb9QSGFyLRddFrIpZPXtrKjgtmqeBmDEGwlZs7wRBNHkspYx5zViXFad5tXpqRhmIDHi1mEqOAWMMGCNXt1uXNYdhwDAdNGFPpxwphTEUrSOHLNUVWUUxrufzfB93vaeO3lCvVLKo6gsgrbe21RPvShqfH0lY5lkNGeJeLdAYd18NQjfnUs+UK/FP81v4zv/9b+KTTz7b9t8MSq5oYGJNQcgt+2Xw7KgkABd0ZEHlL+9/KpbF9Hycj11vkvL/v4QmHXmXiH4I4O9Awdp/Q0R/DcAfAPirdvt/D033/3vQlP//8Wt7AAA5o7y8U0H8yzNopOoLD7hWWwmtQFAyY1kWfPTJJ4bsJ0zTFW6uPUZG44bm+YT725eWLjgq6IBgCgEFhIyi/rc82ITBRL1ugUiZ0U2IeHK4Ui1JWXE8req/D3SE1CwaGx5iAqgFHfdABwBSYUSLa6PQCx8Ambta8SyXRLUIZgVmBOTciEi0A1koAHKsMXq7+M/6Ds+KpJqBlpWFIKbNQs3+J0VN60kApYom2EmxI6Zk0wvYKj+yWC9jxLV9UfDn7qEOijYTDwDmq68mfADganIXn0/ynuioKiHWxzWhQqfxcEHZqRKTao1apiJtwTNRkhEjTUKS25uM8NdHHHiImKAQgZQtZbKgxKZBIgKGQQUJFX4K5vs7CAjjYcIwTtDMkz3hcyuPvjLYHLglVgG4u+nBCIu6CuY6NzrXunpS3SB9CJXXdXvUwX0gQuyEAwFQmDRIHNoHr3WEIhi8TesTo5i7cHuf/iybDJptf7fd4KK6CyU6z/ZdLsAQQIFVIFyKCUcCjfFMYAADZSwPK3S3ulsW2mAAS9NrcR0BCAVYLIOo5FwFHc0ky2DOXTtVMjOGpec3syDkiJQzllWzghZ4odlmQ0giWArXSTCyABf8/mVdl6LnLn22FfTb9bP2rQeC6H6CdpaeDpD1z/bAqsZs+X3WsGpRqf59NqxHPj9jnrJ9/yvvvfB9pW2d8K5Dbda3S/O4cVfs3rOxmGELsPbtnVlHu/7UOXvFfPdAt0hBEXWrKmlFThkxDhDJyqtywXCYEELAYTwghoAV5npXDLSZt4QCpVKFrzofbJYNcOOlCChrRsoqyJOIZtNzd3izumVSRS8zQDE0BWMIoBABKeoNEZoiiNHAmo/Vja2qB5TGL4onG0OH3LDx3akg1Ph1sdhbbzsMI549OSCJ4O7FS4D6cWJH9/o90NbdY9jq1iWl08W9UaRToxpvqLFtwem/gdUiSF7HrgJwtYaFjua6codZZTMOGsNdilo6OQPF5AzN9SAVuGrcfeM1qHKSgpSUCmKIEHMR1Ogp41KlgCP7ZGz2dlOI9EmBqCoKz+iLeB4A5Y9AAWLE1XTA1ZMbjIMJ9SVpAjtsr6Yw9fNcAFkBNKWlu9cp3W4J6mB5K+ti2hz4HEHcfTFhXVesy4wiGSjQLIxDRNiF6HiMeil6Jtekxa9PVk+tKiOCuj0SE2Jw90fGEBgxuvuiJovrx+jhFcTtmeoWCg0xYQBBsoY/WIbLfs57MLZ3M68W3p1yohFkXaONW+TufqEAX/A1Ab/5T97Hn/83fl9lXzt/wRLiMJv8EXTMV0Os/IYJWOYTSi5aP44DjmlAzjsDA5HKypWWq1yzySLJZPybVFbweURTEAFQ763uep1rJPBm2SP/g0e++ksX7hUAf+O1b90/B1TtClIGRv3QbQpMlsRDVJOVMzCfTjg+PCAMA4hH8GCF8MwywkPAGCLIiMKaNSNOSRmnlGvQaSDSQsreE2fw3UVQq14YAkJOIB5BVyMO04ScFog6EYJo5+oFb0sU5HjeevuMYG548HS17qgGAAHu5ogOSpxlnek2MZtbFVFAZNYaFENEKal+70KCP+qKuprdByqIM6nroGb6gxEnTUARASyrT44zf4sz7ARyN3VnoFkfbEMr+Oy81mtqdvuJBlZAyiiKCeIOywiqnfX6MIBUX+zaN/Ff7ZkiBmbaPHimTs+O2AsQIKr39+AJUHcRiJg7pmUzMu1ZGNQ2tVG0dVo4ZsI4BIxTQC6EkjNSzkgp43h/xPH+iGkaK4LxeRJRwiQiePL0Svu/zmqFFCBY56s2EIRkigOGEnfwzqXV9mytoVTnzgQY77tldyyuoaOWbZOy1j30Nc5BBbTo+7+bBsN0VVnqTi1OnH3MrsX2cStxVaLnzwpQa91lqxlE2DFxMISkPuduxArPVp0CbsJXLAreCoudC9NMG0Pn2ifphib13Q4W6tqJIKUVx7uX+KM//H08++ov4vrZl0yJBLgiZy2CT5YDnlRBEZurF77bG7efvcm1b+fxe5RVNRbdz+jjQOOnfWcvWOkHWwF4A0D2Glc+//yscezX6vyex9zd/oVdj7y7B1sbUFiFzgZO93PognUP0vYAbduFHQij8+8uWtc23SqYH074/KPv4+7FF7h7eYtkz6wnTWZwmCaACCEGBSSIADFEUpcby8qpDLF2xc+W8nuztHitUBA49+N36wzVzxqgUVa7rEkTfwyDgsWUNGGS9HOxFRF9zIGB4gWbyfmE1OQLOgJ7dwc62WiUywC5e17XDJCcMa8Jx+Os7mW9orZbX88MQkZj0QGxKmNkr40JrECNGSNT6HqNTp85CU7/9L5UCtaUkFLBOAREispHQMg1QuASmwAAIABJREFUNRFqsjMid9+27Nek8TsaW2g7RARepUHJOFXRqi8tU0rRguw5gUWLsV+Hq26+VH44i3l+5Ii20BVUz5ZOtNf/swFWanM7jiNurm80s2BVEqtywePZq9Du2Ql1Ni/TC9Fz0t5PKuGRe+c40GD1zllXpKzui+u6QqyGbQiMMU4ITIgxauhENWSgkxkKlnVBWrN5p+l8MTHioOcwhIAQVOaJISDG7twY/yQoz+3r7Oq6mezoJQc6Fg2fDdL4oKC+uqZUb3TKXR738/V6urtVMp0BPgFAXbkHZeYa98nG5TlaBmtoaRAOgAC3dwOef3bA175+D5Rv4DBGPDwkVdya5XG+1C0VftDccFVGhGQti+Rz77NIgBSq4ihRkzHdE8i7vge1l64/diKSf9EXVd9C6CYvHQ8nqodlnRc8/NFzUGAtmBwVoAzjCI4RPAxmttT03hQDxsOk7ZUCSaoxHGpmQL3EuUon1CvYyiCKGMcJOWdEFpRsoMZdxIz8e2wMqGeCnUXGrtC73tm4KgvZpLhEFWIZsIyBHWI3sOC1I3RzaPrlcRxBHHF9iFWDVSztPNAxcMcf5O4Pftisd/65adz2uWfqMRfUtL6V3xsggGWQqjopcQJONTXxRvduGkgmQTKiXyOGjFEW6XVcqC5/SkhCzzngXgia/0PtHUa2dlGOXfyHaVDAsOB2ZyLGxEXaoQUQAyEG1fqCgHXJDXQAFpxrLoHMCHFQAbgExCLIZcW6imZ0zLlqqfwcu7k/ZUbhgBgHxHHAkDOOx6MKccbc+Uwc3q6ZWxHrfhdzEXCpp20NCIBUkZJqkiMIhV0wQWsnq8k/iLpTxo5IAZ7Gw11HrPSGZU1sOi0xfUUnQMJospiF0EGPXeu84vY0Y4gB14cJfYpTt4CrEOZ2RmjmSvT3AZn9N0Fzl1LhJ0MwXV+ZJr65b/fwxd1vnKkBmk3t/mHGh7/zHSwQ/Om/8JdxdX1ThQJxpml/VPcbk3aaUN2sYFWAgwv3rY1LQngP9qi7p5+j/vI29+CsiSI/O8CRs7f33+m1cWu0+eg6r5+Xx1pptK1sAHy7LjHHN3FPufQMEW1qHj3WbnWL9C0u5SyezpNtPNYXV3D21rI9+PO4vI3bjc1d/77WqJ8NnL+bYPEguvNePP8cP/7tb+PlTz5BWldkIpSUsaSkSQqYMY2TWaPMMuYk3fqYTclTBa/ufap0Yzv+2mkhdUOOofNkQTsDLhAGZgzThNPDEdnm8nR/xO3tEZJWHN55p3LZ/vwAzk93Z2JDu2zhOoVWxWSymzfqXOM64BYCIc0n3K+zetCI0unS3eN0uZa2MHq+dyUDuljiInWdAFRBvO47EAqhWv5KFqxrwpqUP10/ucbVNFn9LU2UFZhqce0irdSPep5Y/dQa19VkD6091vieCKprsMaHCyAFKWU8PBxB0MRPUs9GN+OVJKrSV3bjb/d17nTUxZy7ezo1DxyfY4SAUDIye6iCJqybTzPyumCaBohwBeL2ZNsPliehCO12je+Hbdy5dkf5Zc4F83JCzitySsZTVXkexwnBZAgPe3CZckkZOWfMS8KyrEgpQzrrtXqoaYmAYQgY2K1kXk5ge7Z9n/Q0tX6HdjRrOItZ5JR/Z5PPbKE6q5FzjG3M3b51nMu53fctGczlq5eLAJdTNHwihIDpMNVYfQIsYYre8c+/8x6O9wNevDjgiy8e8N47fwbTOGNessXmKR1/990HzPMRz59f7d7eeMomzKFLeObSZfUkMHk3W7bvysOMVhRwja991fVzB9o++vGP8f43vglG0EXzStf10uQFBAApoawC0IKF1Mows1o5ZBgwHkaEQf9RCJp5CcVcIxiCgBANnHUB7oJsghMUcEBwPB4RhhNubm7g/qzMWiwQad0UyiyF1bdWBH2RcAjAlr/As0aFEAAWc6FoIIaEUBhAdvAoQCnmzu2Bvqj3wwmm1+JiNXUPMaCAa8p5txqcarYnc93IdiDF/cphrgxiIjXcfIOaTn+LKhVMmqBaSw1K0zipv7POiyeGqaMrbqnqhEPxpB6omQ0LGCxWh85AE4gMxGrfsnSEBI3HbnpLdVaNkME5TnNicIBawfAFIQ82ZrLsnMTqIlCtURYbUFDdBojQgsfNxUeEUVIBFyDEhGL17c5pnhIlKRllXSAckACEEHH99G1gPWE5PsA4ti9NXSPhBuVc491tJFM27BiibMceiyAHxmpCR3OfEni6bIgmzektf3UgfruJmhnSmGL/aj9C8DYMwDIgGjSHXjNIRJhPC+5yxv3DCW8/vcZYa+sIiDXGpllmAXGGS937qQtwByBwAUXHl8oKtR5s2yGIH0Wd637MAEqe8XAi/PC734FIwi//xb+CcZyqCyy5oEc+TcX6IrVr57Gg7VI+ILvPOu18B/b2fJouPNsvxdZdsgmCDr628HnbVi8g+3eyvbn2ney/PRA5Gyg6sLQDN28SH/AoIOrHdkELfFEz7LziNRrSvv+b+LAdQPL39Baxrc2gCYL9c70bY9/OfunqnNp9dQ7JvhNsNogD5lIK5uWI0x98Gw/HI+6PJ6yLxnpm9eLSmBEixGG0fpEBGNQMc727Xx3zBolRFXC9Vlju5t1Gr/FUHMG8AkFjSA7XB9zd3luMnApBZdV4dw7K+8SUPkrrBCjAB7/wC/j97/4ePImVO/z3U8pm+aqZdV1x2K2OVEFStoxHpLqjFSoIVjvUQbSPqwEC1KzBAnO92q+5v9NkFJeMvDC2C5JiEqJbSUiA9ThjTRk8RDx9+y08e/sZ5nlFSnfq78OoJXGSqDweSBOVabMMV4qSfe9KXe1Tc5u31bIO1dWDiGhsVYxanN3d7Cs9tym0CVE5QECRTQCmtm9EvXRkV08R1u8YY7OSsiahUZyr6ehTWnBaV9wfj1juH3A4jBjHQROylQxQ0P1bN6xUHldVWmeEuR1EIkIqgtNpxul0RM7quhhBGC02MAatPxg6Q0I2ASivBSknrOuMZc7VehpYcy6EqMlBhiFiHNjqkG7VamJ0Z6PYuyAc7ek1wRLE+ZyZ5xFbpmWxe9jkN5WtDNp503ZWuCq19ClCQPVA21yNM3kIz15ZdekSEdzdjfjud9/BN7/1GUpKSKsmNwQJvve9b+Dh/hlOD0qfYgA+/+wJ5ocf4p133oOG8xitoud47yvfwdtfWqtnmc/dvK5Y5hUSgrnUCr797a/i7i7gvfeO+OVf/vx8OCZXF2qyGQnwj3/zA6QlGH+/4E23u35uQJu7M51Olur/UYbbtFcxRg3QBBAs0KcUTUGKecHp/gGIAQJSK9xhAA8j2EDcEBg5aRCmFKtxQoBnfAOgG0oESRLub+9xd3ePcZxwNY2IwwQpCUtZ9Hk7BIUASoQwBLBo7JyDKzEt/kABMjLyk1ILFZYSQEV3B5EgFJh2Sl0kNeaqKGAj09o0fxMlhN3+1w2u/6bIGGNAKgpqpshYsgBFGSp15iqtFqXZgDSbvQuKRqjEYpR8ipwwOXOSjExUg8EJVN1R3c9XCT1htYw+IY4bYMQWlB7JXC+YlImVpEGcRaobSG+5U2FLLXchCPbnu8VTEczkpqZ0SwPtgbbVhYId1KOCWL88Rq8WuAZqOuYQ3L3V5pQ6FycHx7nbZqL7BYUR4ox19v4qWcxCGK3fBCBE4Hh3h5RWxPFKtYVs8XkckFOCCGG0dPuGSgETAIrNe8maxKXXiFHJzdzqYzMhxcfpmvwNYNOBGnhTC1uxdc+iAM7BIuUEMNUaRuhn1mVVOyuhJjlxYZUwEGH1zKYCLKVgzBmt/p5mA01EGFCwpqIuOEBVKBBMf1PXT89T6JhIKMDKShuYA0RWDNRlSgNZJirZaNaYpGZtq3JFKViOR0QCfv87/wwC4E//6l/G4ekT1bTVVLAK3oSwSYvuzK8KRva/bYzHmcxSPyjorDO+H6pwv1nuzbnZU+KeqTfgJmf3bPpQP2+29HKBxhevnyPbNvfCah/Ltr/exMXkMSAmJFoiZD+eXZs/i0Xu4nNNNqkTW61tm9s6ELeXqtDO4+a7vm27Zx9bV/vk4K10gpFs5/n+4QE//Kf/Az798YdqHeCAlPX+lJIqSGNEHAbEAOSsqf7X5egaNSu2XCCi5TBilM17NCmGKvXIrP2K6fTEqhXHLT1a6NbPckkZp3Xtkpo0XjJdReSkwu66rAiBIWQxsCSYLERCVMsGMqVZfz48qyMZL3DlVJ1OQQs4FgeY3d6qwrGmxefQlTPp39XTXgOeOZfqql0v74c03hvaVwBUHspFLD6NIWC1tomAYsTh6Q2mMeL+5S1KTgpY3SXTeTwBOYlZVZpFjY2FVj6pptXqfprXhFQU0DnrVPAuBgSgKeSHQYu01z1ncYM2MR7u4LMiBsj7jVzcO6IEECmgieOAm6trXN1cg0Aoy7G2qp4/BQmCZZ7x8uWt7lMIxjBoUWnoPvNcm27pDBWhmaLOQPLmfNU1NUhTEuaHI47HGQRgCgPCGCwhCapiWKCsNxVBXtV1dbUaaZ5cJgTGOETj2wFxCDgMwcI1Wj9V1CDbB3v6bHF3HW7TMWJ3kc21jludVLiuwUphC/7q3rvgh0F+UutK1t97flR5iykyNgCSCK+i7USEkgnf+92n+M7/FXFas/HtAjDpfhg1jq1Xfy6W6GWYRpDN9cP9Le6X58r7LYLKS2RdDYKra+0/IUEI+LV/60PFn6ZYdm6nMoE6HGOzPQSSCv7iv/1dcGQEr+tGwD/67x4d4s8PaPOLw9a9hFkqHQRQT3/ggA8++AAffvihfmxF+NiK9tbUt5aBMqeEMi+QcguKETToZg/DhGEaEWJACQFEUV0uES2rUYAQcH0VMU0jktW3eHl7BNFdFajU310tHr4XQmTtb7bzy4Cr3wsBh0PEdHVALoK8LljTimX1eBlL2W9CINnYCwLIspFU4k7udietzpZZEI6nBcsiuJsCbjiq2XgYICVjLUkTh5hQTzWgUrPeMLMJnxZPaAez1pjbL0v9o6VyB7wem4I9j/8SKAOIgZGKQPIKkcn67g21NtwoX4O2SSqjKkCNp1Ih2mMLt0WVq5manDLsBTPfc+7C5+DOmJIxMgesVTvqphsXJogtQ5C+y0kTwerF2IuK1QsJVk9Oiq9zbEJ4L4GJgoAQde9KycjzjDQvoDDi+uYAgQUzF0HKQKGA68MIyUr4VfjTDUo+2D3wsj3l0yM+d/Yv+/yJ1Nu4SC1qSjZB2X4vtCPgprEkNODfGRVqXIGIgqpvfOOb9qXGBhAR1gpeBJIyxsNoyV8IFNRdujF2dWNGLkAqkBAReavdI9FYjcC58hIhc5V0tbXFXUDMRblbywZipApTLqioddzBEnA8PuBwOODHv/f/gCjil/7cr4KuvoxhiMa0TbtO1MXVtL1wEahU5uf9MDpzBrl2z1z4Wo/G1gLZg6xOVNkxoe55nLNn/6z//LE+9dagjWUIbwaYHgN5jwIv1wTDrazeYRe299bBnbRjf78JYNz3q7oIAhvh/XI/cRmcXZps/3PvQtovxv73Kly3Z0pRxeL984/xxU8+w3x/wrxYUh2BKd4II2tWvCEOOB4fsJxOemaIEIeAOExqzcmE7DUrd2vq3lpMnbWI3cKu2eECB0yHA7KtUxi0RuOynECdhWfjHWC1I6MVV3bwIYABt7aWNe7I/ucz4fooqSfg/CImZKhSyXl2n52yiGA+LRiHAHfKry315FdQaXP1dAE2SaOMAGnffPk6y63Gq2UV+AEcQFo+qWjbbAqu07zovHCElHUzJ0WAyIQUCDk1QN//EyEtz5C18LeqmItagcxP3IEeAFMaAhoXJ+Cg/E55rUA127nOvypgWxy1Sx9kyk9IqZb+Yu8ahwFXN88wHa707OaMYtlNSy5YloTTfMI8Lyh5RQwBV+MBcYyIlqxDvZaKI5UzXiWQroZoO5B9PHIPDGIMuL4+KHS2OESNJfeQlYQ1C9Ka7cwl03WoonUYRrNMaiKYweLUmFsimx589fufO56qe5nrAw62Nzy49tqtQ2ZpNrmJSQGvu3vvY41BXar/DdC6fG6q18TubFH3vbe1p+V7Ws/sWTlja7P2hWurZX+OadtXEdI6qyb3cAz6vI/Fwjc2XtUObEWqNx0TI5cZDqKV1CjxSfOClBMCBlBU63HgLQbaXz93oO1yjMIrmKAzS4snKNUdqjFbJT7tgMm6gnLGMgPACXPUApwq7AUgDhiGwWpoRBRWRjRwwHgIkEOACMwfWeteIKtG5DQvYFar1jBEaLq9fgS24UxNRSEgkiDGaxxEsK4zckpNaDcQWmfCixR3TM6LFuvmdMFKgdbplLDMGZ/nBS/jLWIMGIZR0w1bnRNGQE7r9jCQZ5ii+re6jbh/t0m13RopAfU2bF1gWrEiNQuoZ0AqUiDEFTecr3LLtlR3BQVAMlyHWeWMjYTZCKU06gl36ewTkfhUahsKdsgb9UNcrW1b8LER1G3eNBtQ/4xrjAUgRmRVOmeoRtNr1bHH9KlKd1PDCGigiVgBodbsUe2xa7SPd3eYhoC8ZqRV38c5AeGAEEfEUeNOkBZINuDpA9kfu+5v9wv3eDqgMe6QC3JQt5/EBtjI96wgW2ybvkehN0XWfVwTgmArGEu/Xl1/TIARbOXl+jURhhiQjBg39yJ9/3L/AISozL5zh66WVAioAJnQ3IxscvoajKEAuaYnkw2oqu6WdUM7Q5Tqry4FOJ1OWDPw4//3txCOf4R3f+nPY/rWr0BIkwOEnFBIC8buXQX9TG4YJbo4THRAZ7+srwA8vg2ke57EmYyc3afzYpa2PTCvS6azL3BXxm3vzlwbu3HuwcPZ/XsgBZ/y7dw065LvncYbdo2fz8/ufY9+53/vhIqL7pTYP3auoX8MUAONR25m/Kwrj7+3t9j1gs9lq6Vmo7s/3uPh4+8hz7fIuUDyArGsrwRREDCooiSvM9ZlxnS4VkDl+1cyci5a1HdN5k6ppyxwgJTUTSlVrfN+fsZpxNtf+hI+/+K5JsOIURVXLhQ1ql+vAAIPqngdYqu5yqz7YE0rUs4aR2TPsDEWowAX12JPh6r12JV1VbkEBWEp2dduR6KOidVlsZ/dmnCr/1gJIKx/prAuaIo/tdBoogpixtXNtXoC5WKxYVaHKwQMQTPv5qwKchZUcMiEGte21xGoAcb4IlGVDVxe0Oc7yadbR11bB+OxtryP2RN0MX6uCKtz4O9Seax0ewbMAA+QrPxxXWaQleNJ5gZZcsIYCcN0XZN1EHvoi8UamwU7UFuVV0da2Tib9G77ARV4AspTUy4oadXU/GupLpMuPzAFjKMWsh8CYQiEEGPNw9Doabt8uh+jN+exgGc3+C9oQM5ouDSPK6K2ts7bWybtRpket4z53Jgc62UGuvlqNa7O3SfPQOJubL5XOLCW8CG1FEZPFAINMwGagr6OpZ+KXJRWScFIE9ai56mYIoAtsRFB5Vs2uZ+EKp3OVq6qSEFZ7PmqhVE3b8nFMksz4mG8MF/t+rkDbdv1dUFZIFU3j+3MdpeDm6ph9yxPDtY6gurF9QBBWQrKmgEs+j0TZtIDQoGBUc3l4zSpr35UwT+GQf2QxwwWwVoS0rxgnWcMTZEBwEFl5evWvYJpIEjg2r8ogxKgXKygthYJZLaUvT0w8c3az4E4cVRiZ/WPUXLGkhOWk0DkXomTxVTFGGor1aIlqBYslKYvEtEDNkTNeshGRF2I0y6VSsxFNJjcaWmIGlckIIiZocWCws40Jib0OgZT/+5kGkjT3gGVmTnjKjaXgbuilW1HKVN3xGKDcgczIQsk7dROJKr51WxELqTre4qvhbXlYJCIm/toHVYvKAqWZYbcZkzTqAUfTSsLUuUDh6aJ9XguBwfuLhQHRhwCchYsx1mtWDmhFAZz0uLdKWnCEwqYpogyRqTjCeucLgprwgywWPp+qhusnqusAYQi0NokzNVFNOfG0iKptU18rkl32DAOuLm5xlIEx5cvNQU36mNVW6z/toLpV9//SrWu52rds3fb+rmQSMVSnlDA+197D+vbb+Gjj3+oRJYBYit0TqWujVhAO5MYYGrxkS5IZwZEdO8z/Dt9N4k701iTZNpfcehi9wshrTOODxk//PD7eDiteOvqgPz1fxWSzAU4yNZd0IAgm5C21zT2Ql7zACjdfkH97FKR6kuCiNQzvfsMPcByd0bU+nO9e2Mnj14Ekxc/q/S5+3t/7SRmt5Z4fKw9uGUql8BJ106f9GHz3gCtddXRXKo0TvcO7frTjv2uDxeuqmmu7b8CbD3y+aX4vtdZJV/1vYgqPj67/QKffv93cfrwd7CsSd2yphGBgTVGLKcjRFT7n5YT8iy4fvIEV9dXyGkBQDVOerXaUWlZIaZkrSPtkkkINBtbR1gNQJmgaEqQeVlR1lVBhdGnakVzEGTnT8u20Nabx+YsubXNLpZm3bLbNgLxdq47C4b1bZu91kiUaPblYRwa7648wvbsDqv772QySw9mCMoTWxlosbCBgvk0I4lgHEe8996XwDHidHePObsnQQFIeQ4sBbxIUWuiZ9k1cOjyADMjwGMBfVzS7oHxUaINWFMWInVsDuya+6rR3mrlbuAmGK11yYdFIMJVmV15ElmKEJuwIRAkLXg4rXg4zUBKuLo6YGSNIb+eBgBjrVVWrbk2uPqT90f3Fee43x/cUz3/VeWhJWXMy4p1tXgrDRQDs4bxBOOpwxAxRU+13yVX6eawf8frFEMuG27uffQZ9/qByRz2CbXSD5B9HeVuKl5J71xqe9X32oe+vTODTPddDXupw2IcDhNG0XT/7fyqQNe3UVPLSP9uNNkwJSBoCFVaV32iZPXOE0FKGZCCtGaE6YAY1aWXhbU+JZsyP3kHyXIWWBxgMO9AERC3zNiPXT93oM3dCETM5RCu26hJPc8uP7zOrKsJv/tM14vq/cXqkrjGbcPMPbQkmTB5f8RKhGNgYBi0Avo4YjocMI6DAZEA4gEcNMOkxgWVpksQ6bz9CHnNWJcT7u7v8ezpE7AFcKsNiVHKCpgWjg04qL+9MXRBFSJ5F8jZCLsKVZr9MIDFg6hsrEU33HpSbV3gbfyQAyq3N7hPs04Dg8cRN9fXKHnF4sxgA0yoblJ7nbacM4hY0+8Sg1Nz62xjMIBoBy0LauwQIDVAvWpCHdxLhqwmBGFLYgnKq2r9F2MGKpgWizsrhjkbkPZngwUwE5lGkgkEyxwqdvQrldf7gsUEcpeIhA285AyU46yB8qyFYMdx0ExXHVGlIhB2P3xBKipkxOjpiANCKC5JVitdIC0l8PDyJcZpxNX1DdaiBC6LClJrBsauflw3U23fip2xGscFR5LqxGKFWkMuDfAzwatCNOWCa+Im8DDhaQwYCXj58iWY1c2xWhEcCDXJFwLgcDhUAu6CEFlg2rABmGSZYfR8H26e4t3330eJE370w4/AuTRa00EKrXWkezWQAFJaxjObGscTYmvBxqBJ8o4dkT7v9zt7MHIWSGNb7h8W3H7/D3CY/gn+dRwxPH0LgKDwCuERmZLKr8UmlwAOAYHDBnz1QK7stJMO3vq/91f/fQ+kqqDsbaNZ/tQR2plfAzq0a88/3//d1CWqxDo/sGfdPG/LQNFGUO7BkyefQAe2vA2vsvI6sLPLb9DORmu3ufh0QssFUFStfLSd10va44tjd373UwK0N76nCjH6cygFb18F3B4iyjpiNld+KgBKQWTVW6c1IVLE1dMnePLsmXLspLGljY4KSskN6HZ75lLX1AW3y1RIOl88RE2EtJwQuvi1Ns72e2fvwGc/+QzT9Q2ePH0Kt7CDGWEY1D3Z15Db7vWlqno8avNYLKGJf696FQ0FCMG07X0Yg6hcQuZZsVcqunJGZZ8mpHN9sY9FoGWxjD8Y7SsGZkUE4zThybMbpEJI9w//H3Vv1ivJkpyJfWbuEZlnqaq7994kZ9jCYKZFDShBEkZ60oue5ufph+hZAgYYSBBACSI4IkfsZu/NS/btrv0sGYu7mx7MzN0jMk91c56u4uLWOSczFg93c9vtMwVjsKFES2VXGWYIiaYDFKsJ92dHApKtcRFvsu1yTg2tAutrGriiTG4XoynGzqaJWiqYG6Wqh5W6p1zJ3lzrgtz3fJ0Xsv5uwDIvmN6/R14WMBGujiOiyYkQvGzCI2J0cQ/q0zueKGKlF90+7X5r2VHN6BBRB/ycMubTgtkaZTurCiFgGHTeOUQMgTEEgvcd7Puy9WMk6mnvibHv9sTOp3TZXqt8qTFfnytFn2RzFEgFzNBufk8bcBcegqbPX9Drq7XYdMk+8FLlHLUWWf4qpRRLi5SaCq3IrNzKZnA+VwAsdNzfzTghk/URLMhZ1NkrwBBC3cMAYxg8040MGE+do31qKJmOWd/BHNRsOka21hAfOr52RhsA7GsKAtOmrq1jAe2z0hbvLHogTQGtqX57IY+t0uO/ez2XKs8FlDLWR8FKhMcY1PsUA8arI8bjATweANEwbCoFeU063lw0BROonpNSCu4eHnB/d4fDMCIejziOA3JOyLlUaH1hRkFAZO9J5uNUxSmLIV1B+2hVbxUBHCK8F5Ul7KliyQpJH8W7wet8rVnvkkvBELXxYs6KdlkEViCujTWLIVyO8YCRWdP0kjVZNIIvyrV0jBZeDxwUxSsXq/1h5FSXuwq/XjgQdbUG0uriKgyxnRs5aD8tM3BdKEtWRaHYBCgZiAlZFTY1BYMDPMXB6UVBHPw6rWtyxCwxGnUb32wnOMt3JtE8hqQAViB4V2kpwHSasZzmDQvz6KGDwcD6FD7en8DBDL0h2JrEKvAiO70FFClYpgnLNGMYI47Xt1rTmEw0xgGH4wBeZ3BKwG5fqLFuaJ8uOIt5oi2hu0Yd7aCihebJBaptWoF6nx7u7xCPB7AIhuGAFx89x7KuePfmLbKBC/lenOcJ4+G63851iCSApIyVtS/MDKlxAAAgAElEQVSKGjG57g1vXH3KBR8PAzgOSEWAsmLIGVISUAad5wIIS90nzmc2CiBrI3aY4AwVTMBqV9xcMYPEC5eLEU8wOhaBgQgVFNIGxb/8xc9xvL7Gv/oX/wIpDfg8/iP+9lXELAGP718BadFnDQc8//RbePHiBaLV0Fbld8cZVWip8OjTKy8abdTu0fPPXNTJEqqDC1Ww+Dt6c/RtO4KW2tkffWSQ+nOeMNLOMiW6azdE8cTRO142nwOg3MBP+s/Pb3L+zaXzHBypKj9PKFR1zLR9h43Bd2kYXRTxUlrQxbTQ/4RDav834JOPP8W1nHC7fB/P7u/x+KMfKa9fk8lQUzwCA8OIq9tniOMISQnJDWZxXqnpXhSCGtNkmRLVIdcfxi9Ja+U8vUzT3RZQXjFE3hhT23ewX1jRjGtj72pcGy9mxkCaBZKsBt7lT6cy2/1lM+/9Um/mne16doOiWluIALIjJAJYlgXD8Wh/tVRD1WX0PHWUoBXWdU4EzwyRbDzXwBBAhHVJQPEaKgaxwv0XMfHIVCM7JAlFNG2/kLfSkQr/n9xqMiC0ftIDa0uYXLqMqM06Kl92cDIxxlrIlFDnJ1Web5lBjV6R36u0k0Xrx+fpEafTYtGrhCFEHK+uEIeAIcaWzunrL4Q94I83xW40pPgIDizm/L1+X7TuCeJOJzXUS1FwnmlZsS4J67pomwtSehvHqFlODIQhYrB60ODOGNrWQfPOeHPD7dKh9Lh/rxbM8GtVbFHTV2jzZvW6uiI7YmeBdSjOG15IbRCXB0hAS3l8gi9u1kB2Y/e940GRrmUTewfR7p2hzyNHF7Wf+9E56A6q48T2cBwwRrG2VFo4oejghFxUF2Am5FRqv+M2p7qupYg2j2ftURyjt6ew3UH6BgWCeCELpj++lkbbXpC13/UfEVTUuUspIR869mkovkB+r81Cw42By9IgpAwgoywrTo8THoMKpBCD5qUStQafRUBBa91ijIgGlHB9PGJdV6zLjHle8GgeRaImULTnxgAaByXPYgRmHjbPdj97n46YA5mHz9ooeL0OERA8x1ZEQVGKGQtC7VlBhaOUglQIWBMcc6UaC6WgQu6Spo8FeDqXDlGKFrAWZ8Kgs22rOdOWnOaKj2xFgQvU4Loede9fdOxnSgu50WjWnGuatslrDVSTHN2hsLyeRuJfBfLoqD+CWrEpKUMprtQ7nDs0JK85+OrlpMiQpO1MtfRPW60TUGsxnT7ZTZGivcnSmsxwU3ND0dWgBi8TIqwAW7QBdT49YF0TStZJzWkF8xF0vEEoCXldAAHysgDU7TUAQaz+jhkJUI+0eXezeY5gc5lFEAQKdFPMk25F3sMwoCwL1mVBQUDKwPHqGmld8O7Ne6h5p3T4D3//Jf74T3+g982lWfV1afyZ5gXtFYBOuHkfuCEyloWAUgzFTaOPj6cFgQXHo+J0qvOBu76F2DqUxASKew6rYl/g0dwaXFfJq6elYu5/qbJrlRU4Cf7+l7/CVTzgeMUYhhF/+5Of405ucf+7X4HmRxylID//HH/0w/8G1z/8c9DVrU1HQYixRt56A6yiUtqRkUFG+ympgRtjQKaMVtBp75YL5nXFOIxNoegO3z0aTdyLfLF5b0bR3lisBiK1v5803HrjjVEdK9JO2r77xsizddh9fgnB0mVMu+Jp/WNvOG1Uuv29q6b0gffrFRPpZ9FuYWtwNh/7+1+Qixdl2YV3gWwjYP7z5W9/h1evX4JDRAwBJa0G9qiK6nA84Pb2FkOMIBEsOWM6TQqoIIAgKKjEptDeUmlLsZVs89jS1DU65zDny7LgzavXHXx4P8XbcXsEpxo4xaM4rSqazVHHTPD0PjLeXRP/fH3NANWplvaZyVIyg4RtHlutp9V+2v1DZ5N8+Y+/wQ+ePatGIaNFUqoGaIq4y6hSClLRaALY2IkZiMU0cIZGcrT9ghoRrqjXXpJQh6UiD7YUwQAokiqMb2ZpNdBgF26V2KvLvCOvRmrGE6U5jWFivY8mOj2oMtvPvmeo+Hq7vtMZdSVjnR9QMnAYAoZ40J5lQVNAa0pst0cub4XtfhfTwZwOSTSW10eGRYA1FytFUGj+edb+a+5kYQ44HAZdD/aG14zI1BlkjYZ63eXSOJ9yyOx8RRePyqtqCqeuyrkm5nNQpwUEQ0m/MK596u6Th5z/cckBdXHM5C2LnDLONchie/o8hdKMuQsOSwDmsCjIklGy8iQFNSGjvYIDOYa3HiEqUCEJQIc2siKq7/t9jYA1wk6kdaSmI9byGQJe/fYGp4fhQ7P39THaXHhG6qbEXtaJQNA2nL/pmcK/N8ouHD24x1lEjjzast3kZwpAZ/17igNlTYnMy4J0mtRIIPVCCzMCItY1YSkn9QgQrEdJRBzHWrekBoRG21LOFmYllCGqIu88xJk5M2BtA7R/CcN1xMBAYPXT6f25Kr0ES6sQy+tVHRVMQMoKOxsCqoCRnBXpKCfMk0bvTqcJQwwYxgHDOCiUssCMvBbZhBnCFaDDhFA25udeMDFB4sKiKpD7lSb1nOplncFmZ1s1Ure97LXb5fWeRYBgBrBms0jLyXevkgn06pLaMASnWa8lZHgvP5B6pHpflKKSUhVYPFgRNEhTWOq5AhAbPHUlXoBKFbaVxxliJEdrsu4pLpZGQqwComT3fJeKNpqWgul0QhwVnSqMVxAxo03QQf0rLXuTbTLFQFN3FMCjzYtdY7Rb50nRAkCkArWEosW+ecHyuGqdmtFLIWWWn33jGxut273BhPY8ASp6pSsPxFxpDVBjPYZgESOPgamwIhKcloQBGSFGpckQDD5cB99EknS0VqnL2RXwhKfMo+s6L1q4XOmkCPKScLp/wP39HU4nxrKs+PIXv1RFsyiKVQIh3r9H+d1PkH/3DNPnP8Dbt2/x+PAen3/re7i9ftaKuo2fKOx3MboTzMuKIehefZwnTGnFJze3IA4oksEgXMcfQ+SE0/QAygWH4Qa5/DmIhouC9RI37iNqTxtiulwVgOpptq33cV5U/NczznBhbbbPdB7rc+SHo+bvTDz7n3dq4n5sevDmftv6uDPvs/HhM1nljgAC6sZ3g66mg7kxoeeRMHqe1FkWmzH2srE37N3ZdR5xFJR1wpsvf4x3b94gLwtIMmokThRQIQbC1TjgeDyCCVjXFW9evwHWFcTAuiZQEBwPBzAJ5nlRntjrjRtVT+fdXyOOBwxrsu19juTnyu7+8D3n593c3iIOA0ybaM2aIwOeAWMykXra6lKrdrYxap+1D+kcdrFQvYvNfE9VW0Xdo7YMAOyyCViWpC0XjD8G0zGU9jQlXyH4GSGoo5GgfDjBFO+q2FPVU9S4aGjPAFU+W6ARPAddcH3M6ZtFjUw2IC5vC1RE+p7L+hk6w4Sgwr+uke61noMyWxooaZSV4FF/Xwx9j+PxBodRatudAjJI/So8a6rq01Ho7RqSG5xQ51fKCqYjVS4w5mXFsswG5NJqwdjSbmNgxMBWRsOG9N0ba72xRWaU+zifGqt/9hTNXeBWnQw7v+f+PtvrK2lv+hl/YAQfshp/71EVW7jT1ZuH+7cfPKQ5Cs+cOHV4bb+KKHLn6XHGdJqQVm2XlFLCuiYFJYyEGAa8u7vCz37+HP/sT17jMD5WA2yZF6R9er7LYVtPsUUehoj376/w619qGYS3NiIAr18fcTr9/8Roc9EYei+DckRTPD3FETvefk4yv89gq/VslxitCdEzEnYPZ1/82j1vX8PQp3vo2dl6kWiqoSwrIIJVBGuMKEEVyWGICAdTnmPEYJt8GEIL3+oDzPkjVREUEVXKo0a8pIjm4MK4PQfnAttxErRpNXeF1OKlHlJ7UhVTGnIuyIUACsinGbNvEPMaxRgQD0eFmM9Fi4lhNWS2iIwW+bg6HnD/sKixEQg5m8dzq1ZXEVo9jtBEOIhVCzlog20QJqk6j+dl+8L2UVX1tJoRolYXXJ/QiSggr/Wrll9jIF5v4NHR5hUyQWrGoPaB03o0r7UKVnAcovYSydmBMNr7llJq3RYz9HcB4sAgazau06pGrBo9FpFyUI9SrCkZtXFBNGqXBdP9HSiMGMeI49UI72uyKTZkApXeiNPUXZ/XKKKZ7fbOQhZ9K4LoU1ekRl8ZwMoBLIwhHiAE3J8eddiidBgPEd/57rdx9/5R58Lml0W0B5xAo3gpAbk1a4eo04EtPQuAGUkwPcXuwwwHFXHnxLokzPOCm5ujotsRI1DVsQGoM8QZ7T7CtBEqhKqA5FJQkvMep5l2nhTdv6WoN/enP/kplnlBqyoFVggwPeDN65d4+euf4PGnf4Wf/fSXYCp49dnn+PZ//W/x+WffVmXNBOw0L5jXCcKE62HEsi6IV0pPx0PENf8lDvk9UICcMqZ5xnxIiIFwmh9BuWC8/gQP+CHYRcaZPrAVjvujulN6z0k3aZeu6yM/bba7e+3O700svfZMWACwiHdnnvWruFWfPYGuKdhnkbWdctJH7i4pFx4l426+Nin5PnbXW6RzIu0NMf+9Csazh23GK0CXV7wrD3CFseORxRBe59MDfvPzv8VpeoTryWxpekIMjsDx+oirm2udJym4f/sGyzTj+fPnEMl4d/caxBm3t7d1IzFZ8ahs14lIzChvPDAGxkrGbXcOs0vrAKgx5SUDKSVEZhwPR7Cl5xPU0MilgGRFzgVhHCzC1anFT6gTVeQTbShNyxk6gAMzTsjJbhMoEhCTGTy2Ls7TyI0GvXMqBfO8YF0zhiHg6uqqKrW+hkWUJwYo+2YDHCPpeo8WwNO8AIPlhzp0vU4qlYIgWl9jYgQMQSaY480faWngbHVONVonyGJ1w6yuPZEEJkNorDK0KdN9baI7dMeoCXilAPO6QErGtTkGVIS1+RmGUVMVSfdQrVCqpOHnXjBo0M7xKJpHXFNKmKcZp9MDpmmCFE01LUVQStJaToODH6KhPbL2EXb4+UtGmv+8ZN+0Ne2Vvi132l0BNcaeMvL8jMY71Tzu37z/TNDP00VwDCdvbGezN4yekguX0jfP7+96TW9kdqVBtvbKo1vbDtIb1vtu9PMnjof7E25uZ6xLNr4HzNOC6XHG3335BdL8mWak5YDTFPEf3lzjYTqhcMTNMWKeHzHPCaBQeaM6VVpatmfbDWNEWBnzHGrwph/fhQSCzfG1MdpciPXNdp3JNBn1hOFmxx4e/9LRf39JUXCDbO/RhI+lv8dOcD/1/H6oEZ1iYd+VnEFZU+PyPGOeJgWvCIw4RoRhwLpGhLBqmiQp6iOb0tpqBkQjc7Na9ot5EPMcFH1S9H+vqQncIoqOlkUm6LPV0HFgBDMQNVc7q9eIgSRAMLhdn8+cFEVHHmdwYByGoMaaMdKUHXYVQAggIQyHiLEQPv3kI0yp4HRSrxVQQFZ753MYmCuHCNQida70qnFiOfr7ZqSb1bjk4S4oKGDp0yebYh1C97cIhMkcsB1BiqP/mJHMjFKyGsQuvOHpkVzTX4lZwWxkRX1BMyYKMyBF2wO40cXAMAxgZuR1rU1dtYaCrUbXegfGUNOPNJLVDEYBIacVJQtYJiSJOOW1Gay2B3yuinkOmAiUMrIUCDOGXLR+zQ8H+WCgBLYItq2VzZmnPzEJKEREhhYRu2ApogZVHBS9CZruE0RbCTj9g4BxiMawBYELiAKCoaEBAKcF8QI3VK+sRaItYi1QZa7kgmnNGIYIHrWGUwCMQli1SWCn6rvytKM3Adg8+8uyWm++DGLtsQNChwztKHvK59Z1rnOhBrZGAVLOePXVV3j3/gFLSoh5QimCl9M93v32f8LrP/qX+OyH/wM++uQzAIK3795jmk54fhNA+e8Qp5/j2fU3MIYRc5nxPj/i7n4CScHNsxd4/+4f8Ks3r/Di+XMUYXzyyceY1wlD/l+Rxn9ba0V7pVp1/Q8ZXudTA5uuJw22zhO6scjapa2GzM7rlZLt4zx20Oaz3WV3YwAtOcvHsL3mLFol/X17A6RX1KR+61E4V0r8qOiUVpdUz9s9zwdFFyf1/KjP7pyLAgEYoEK6fiy1MLV0z1umB5R1BZUMDkH7aoqYwkGInuQh2htpWlYsxLi5vdaeqGKRD6utcuV1zQXHOsBS23CINHAbL+wHVDcIUCVav7s8x3XumZFzwem0oGSpjgqIApFVJboUSCHkVSNYGklBfcbeTvaf+8/8YM9uqBcbK+3OcXAiXwsyA03EHGECeH2VAlAR8rwizSuG4wE3N1cQSSjZVXFWQKyenqthhZqyHwlYSJucA1tl3JXdXARsa8DmfCOooc65OYv95WuPXLJdZnsyiPU7gyAZYUkpiNTqhQBgCBEcvIceqh9CRKO4p9OEtMwACW6OVxUR+9KhyMNGE51zRod62WhC56gQlOr8TkUwzQum04x1XczfQaAAzVLJBRwjDoM62GPU0pjBZFNfk7Yfb09H/c/9OH1UnSl04TP9+zxKjupw6h/stHh+dqMe2n0Ocmdox6uo+04a9/fr9+9x2Xjam3y750rLaNs7uPRnOdv/l/jBh0uoBOu81gCD4ytAVA+Y5ojHhyscIts+AJZlxP19QmZFIV0nxuk0Iw5R36iYY94MuJRzdSgM44ADZINMui/R+tDxtTDaBMAMINQNVKCZs7xTHKnjdrRtEwZsCOo/eSx7o+sSQZiX0oWpiGhh9RO5sgAM3YnAhwHj4YCcM2SakdekBb9OXEBVwJES8rwgAShR07pCDMAwgIeIIUTEIXYogp77rWkTJBmOoxLIGbQqp/us8Gp7wMEUrLYtJeSulsWBD3IWcDavgJCplKbIdp4qEUIqjjRENdVPsqIuUgwYY8AjZTAKhsA4vLg1JikIkk3Z0OvFR2uDZ9tF5HTitOJw73UZt+oNW31adiQTT0eAeiW1l42/j93D/tGaOc3zV/VOuWDgNqPehLHmVpN6g/TEYKexoXZqn5xLaXXF9kSwWq76HjwgBCuuzoxk0WOpiFIMsbSSGANK1pQmN+Q8MqppiuZ1t0iUo+25Ip6h8P0sUiPImbmmSUIaPPZepIinaJYCbXausNE6N7Z/rMAXYMTQ1omsqLikZDmsjNtnz3Hz6SfA+/dYFjVwiRlrsf5/piCCyNJdLb1C+ihIJ8hdPSdShp1dQdHvp0VTJK7pqN7kwFjrTmtCGZVGt4enXuRcFObcJWYpoBDNCWSKV9Fm6yULiI3fdSzNo8oigmlZwektQghYRRXF6QGYJ0L6yV/jzf09vvuDP8d4dQu6eoGr4dd4Rj9BmgXLuuDu7h1ub58jrTNCLnicZ0QGlmVCWjNIGBxGMEdVfB9OGI+EML5Clk/boDrjjUCbFgWVEOpkAAWa3oS6p6ilRl44Srf5nPWTzXkFZ6i3J1tRN7ZqSbo+s1Mr+jqwnZnVD3f3ClSf0qtFmxfcXG9j2MmlvXLz++/UXdelCZ3VbDxhSF66lz6PtuhBm84bmsExLzP+4f/5d3j36iuE4y3G7pn6fgFhCDgcjpX9DsOA4/EaLNrImAN3mRUwOtHsiGacs8n9ltpb64ZcsRV1Gv79l38PVxW2Nmzj1aZxIa0rJGccr64024p0D3sKHcHAMKJGrh4fHk12bVECL0UwNoo2UJGoNYblb9rdxXi3RgA7Rc2noBKGOVbdmCAyMCgCWQQ9ras6+aieAggQIJrdUHlUmz+PFNc68Mq3lRdnOMSHOR4NvMjKb9WRtRloM0zYjHKKtkNEIKRp2ZEZRazO2vYhszldecTxeIUYgJJXlLQiF3XcLvOMXBKYdW3GyIjmqGxz30scV3pVmQe5w9V+2rz0k+2OnpIzUspWj5awzBNSKSjZZWUAB81E8IRLDgFXxxFXx4MiMFcnPupY/Bn+d//7Hv3x8rE3ap7ilReMn42nwInANb8+3d/GjXO3W13pDQ/Te2jZB9C35SFCrf/c871LNahPv4/fr5UM9QZc/32f8n4OJHM5yraVUdt97mONY8Qw6B4jQxWvHvPuXPVTCEJkhMHKL8xeoUAoKevzWOvHB3PC92Ppf/6+42thtBURzFmVbGXuCnhQBW9vUfhBlnrVHWebo3eH/ROPzT3QjLnNZ2fezwsbxz7yJs5xGDBe3wAgpGXG47s7wEAESslIy4qQsmLyiNZaERroSVpWlGFVkFUi8GHEs49uQTxUBhCYEYeIEZZO5zNJ1FL5bHgRqFDy/k6B2Rr7Ato7pb2IojYq12eDEuQaUG9eLiJRwwGaJiHFPXKWNmJRkpILHu5PWKaCackYDrHWSa3FcBM75UMAdNkZBl7Y4MxFxNJVGgNiIvRpSzAByExIZrT1LQTq387kuUtjq+st1Qgmmw8HVuk3Idkmr30n0VISpDOemge60ZCmS+nvhRkjs3pIc0ZgRWtjZhRKWwMVaqyVJJpWG6Mqp2nVZxBrdIvUOA1W/Fih0bup8oiDiNQ5lG5f+T5xA86VsP2uq8IUgkSC0VCcmFnpo1cgmCBZaYsM3IfIlDwIDscjIIL51RsIlIa4J5CeXmwgKVtdF/OmT5MIoQgDUIREH2cVBh4VWBJyzri6Phg8r9Nfoxl9z45vGTP3foQAagNX4YDb51cbg0UEOB4OdQ3VGNa2DZV+ew1fBIEzhsNQlUBKCad7IP3q5/jJV79GOD7H9/7Vv8b3/vjvkTPh7uEBgYHTuuKQVkzThPd37/Gbl7/FMUZcPTzg4f4BEEFOJ4zHW5SckEDgtOCW/hIT/nOs+QvlJz6XAhSDtPtQqiOBL35+iU9vvKQZcC9xcxC4grY3JHwX7c0hQqWyXuKi0UqL7PQ/sfmsg+mAbLz525J4rfnS7xX5TiB5K5w/pI55umS7325edp/3111Svqj7bvv2m4srLRUB3n75Y/zyxz+FrILDkCE1TVx5QwEhhoCrmxuNIueC0+Mj3r97i8+/+ByOpQEiK1HpleumNrZ01p4eOsXaLJNcPHWfKiCC30/cQQOt+4psTszAKldKxvRwj2X5CFfXXd1IVfigtWO5HyecMPYzdXY0I6zAsRe7R7RXq+/XDBxXmwEo0nERrUE1Hi1ozjYvQWAOWoIgAKPA8T6LeGmDqeekNW06N3pOMOVaSGVo9PUQQQAhS4GnnEm3hnAjo7N/vN6WAyO5zHLl17KjFDgNRgP2ZWDcXh1BnHGa1GAbo5XD5BUxBhyHUUEbmNUR7eymKuO9k3i/Ij2Vu9Gie9bHvKaMaVqwLAuWJaGUBE+yIg4I0WvMDJxCBEGCAmoNEcfjiNHryPfOkp6GLhx9uuTlg3zisfFe2Pu0zK5u3/Rvvnlwt+901rpZke11uztpVFQAh+in0JyPekbFdOvXYSMHan3a0zr77zvUiYuz8/33fVbC/hn7zzaGG9xB2PoIN9my651mekFg0myzGPDs9gYCMR2iGY5EhJwXpFSAkkGw7LDUovn/lDkAviZGmxOnQGXzl19+iT/64z82RsWbULqfLjBAAgB7+/1DisDFp+8WD0DdEE4ET1rm3KXgXXqefaQQ6WR+K0bgCBwY4zONJByvrwEihYadJ6AITqcZyFlTObLWGDERkIpGZkRQphllPSIcVAgV0Tq2nLOhxgmYvN+aDsfR5vdDrszPdnMwIeYRGpPUzrsBWHSpeMG9ErvYu9bUtCECIC1kRjNcUilYUsE0LShC+Oo3v7W+JVrjRcyI41B7cgW0iA42hqZOdBZBhKaAojgy1TbCJrYWvi7sMPZW6B1M+aRuayjbFIO917t4MXGdN6cjQNN8TAg2SGH36Ms2QkxUvb5PUisZ/YgZwHDhzYp8SG1tdXT6XWAF1QiG5pazGmz7m4eo4y2rNuPOm7spNz4bn9gcWpV5LFrvwKVbI79Dv042b6H3Sop70NgM/a2qSmxRdyUwnOYFyzxXOuTAXe2d1LlnW5vSrffGKCUHmDFhVr3v9p85QByAZ10zxlTwkBaMMW4E3wc9pg4+Yo3sOTBSCRiG0Zp1tuNwPFQFTqDRtxohpyaeCWTtNQTLqgpqcGeFCJZZ05Tp9Aa/+g//J/Ljc3z+RxrxTKngzevX4JLweD8j5Ql5nvD2viCtBWldcDge8f7hEc8Q8XZd8Y1PP8P19TVIHiD5Nzgtz3F1PKrB/IEk/A+npfTLfJmHbh1xzWzS/yx1vlOO9kZOo4qmMbsh4fMJvw8MTMH7WmwMN591qeveK1EeeTX1ofvOoys4S6V+ynDysRDtd2H7ff9Zm6NLSarnz6nz0hlHfle/QykF+dWvEIcBU1ossqMRgiUVpCwgShiuDxBD1ZvmE968eqlRBA6gXKxmmGufRyKNdmXLDqjvKv2MFLfm6pyUIuYYkNbk2Y7S3fv0OEEgiPEASQnEQeG2ATxOE5Z1xRUMpCRvZfu+9UBVfi/s7Trf7vCrYynmzKAKbiP1Z0NyZSKLnqMaQUTak7SIIBrPC7Rd8xCCInhaOrejU+9r65wP1t+tECyXxntpRyxap1UQmJGpuUdEsKn7hRTkdcFSBMjq0DL3RCVMkqYXmBQx+0NlRAgBy7pY+mPCMI4Y4kFRtW+u6o7TF2JzqPQOt2a4EdXV6ia0M0tsXUWAJWnN8nyarMm18mCXw15OUFNldQYQY8DhcEAMA5a0ok3tljYuRdj23334aGv2tMNAqsFGlk77oUyvp7nNU+e6Max8VpGgrSZcttGsAm6I1jBdifjs/Z+Kev0hx1NX+Ri5M+gunoMtSJR/bgNU2yMrFoSC5QlKKjVY43jhxRwyIWhWkDopSHUsPgerEgGCRBROlfY5C/LZmX/4/HxNjLbOWi6Cx9NUlTsA2syuaOAegAreIliK4GTABsrMGNE0QyfgP2QSnjLY3OtoN2rE19W0XQy9BlZocjvHHlI9UE3FJoRhMGheTY8jOiKTEk0SwvFqREkJy2lCiIx1zlZ3o9DrSmyiefQ2N35/ghci25glAtDaKDKBobNKEAoI5NFEnc8sWkMQiUGGJJFUetbIkQhq/4WgXLsAACAASURBVJw2oQZXb4I4qNsKlnmvDFGsALwq2dF6h82YT1NdO2LdEAxtKC/uK+SAYsnHwZAdHQ9Qe+cxSiRgGMFp7fr8NWNDBMgS1EhLpSpxFdGvex8gVENJcmNoDvyhKGVNqexVts6cwF7VYkM4JLYCbQZYCIkY0Ws4s9ZwjUSIHJBBVgNnDgMmiwx2YopaSggHRiluDBltGMANAMQ4AKyNnrMAlHPtw+ag9TmX6hkciiAHrz/p3ooIxVMtpb2pkCNZ6ghU+bK0UFqQS0bOC2JhBempc9d+MvmcBozDEWU+uZ7TKdjY7MksZnqL4PHxHvfvjwp7XVRZ1PQUjR4TdQ/sHm7ia2OAz0tBIPWaWctZFOng8LsUbgAdGh3MUPSalfbAdVU0uBBaFC8ASAyQkDbDDQ31spSsvbJyAXJBYcYwBAVygmpjpQBSEu7fvMOP/+oRb14FfOc/O+L+/h4vXrzA+3dqjD5OC9ZpRhhGzI93yDkjkOBwdY2UE24OA9b5hHfv3uL29gbr+tcgjMj4UwTEDbLXB400VwKCgCVURarnkWI/+2WQs/vK7uf5N9vFNH5rBlvN6t2My67qLrv02aXDTbX2GhcuyO08P7x/3qXDU+P6aNRW7TKV1g0Oke67C/fb1ff4yNv3nRImGo24n0/46rcvcX+aEarSLEjrgmWZwBwh6wrCDUrJWOYVb9++w+k0q9ONutSm+lbm/Crb9RS7fykCqTu7GLojME0T1mXV/m+7BdGEW68V1TqSOBxwdYiYSqqOBXVGZnApCEF7ZQEAZa01Xde1yj4W0Zpl44DcKVQbunHnULcOvr8FGrIRJzrjCUTqaNJG2FINHL23QfAPUXs67taH2WRiCIrUODfgBWZAksn9qmd0s1xUT0hQ3kheAy6CdVVHWMoN5zjAAFWC1sspq1Gj7vFxwrrMCBzw0UfPAClY89ZoyGDlwZ1joBSt75nnGWmekQowjhFXh9jqgaD95DRSvafojVl64Tv/6SnQSsspZSxrwnw64bSsWJcGRhdjNE6ubRHysla2FJhxOA44Hq8wHgbEOGga5V0DuNor233ao+sJxeDj/zCjxd5DunclQp/aWGWeuEH6ofs1zvEhniZiYHPCSGlBLgXjMEIgWKYZMgriMGrtOdr7ewdQ592lWBYWehknm/nw393ht5f5eivTDTsjEiAFP2pPrOd+SPZ8aNbFnLq5mC4dGGN8xDe++RcocEAvnXcCIJINWI7BQjVN2d/DI9b21lUXK7b/maB69IXF6GXpU8fXxmjzg5z5FGXggZ4myAztkZXcgkhZDQRD0SMAo8lEAgy16MOeBiJqICR2TX/UJt68RbHanLPpBN4JwyLVmp+XCYEjIgckzhDirrbHxhoGjEPEQgERjKurI1BWUDxASsF8OuHh7gGyZ852jyJUOwHsoVo9iphNORfRKKfnzvsdfUv1kKUwhb8kG6srY90wOgefpZBliNWKxWEAS4YsiqYjAEpGbXBduPWWkiLIawIFBosy9Gb4tjlWkBr1AIkxpwMFfPHxLR6uR+0XNC/2bp6IBzVUVce1WkADZRHdcEKMsvPueZ1UnUtL/wRa8pdHbLR+0LztolE2S8aDsxIKpigA8DyD4Cb15jXN8LHebr4+LB1SVv1H1zzYWtWUFjvL08BIREFtiFDY6IWg8PnF0gOLbFI5sjeJNyTExISV0JDgdkpxhvVs696jJ5acVrx8+RrDeEBgqhD3zvz8VAFpPSAxhGNHZAACY8lZlSRShTFQ0XReSUjzjF//4meQeKzXuUcZRvN1PWyqWgoIbaI4kTzl1iI0FDaOmF6oqtcR1aBHsRo+o4lcVKHIKYGH46aNQzZ6UAVeFFGz7iv9JcZQ+8ssqyCyIpjp0z1TgCHzgq9+Rri7e4/rF4/I0wmffec7WNcFb16+RMkZ4zBgXjMCA+PhiDgOIGEEKrh7/xb3D494f3ePMQgS/y84FMF49c9xiMPvh7xyeiBS4ItKozsBJS162cRy/0lb8kpCZzKOumtg69AMtrZ324X6WUuz2rpa2rPd8bFV37fOu2bAls011cjqxrmXIL5/ewOvN9bq753i048E3fcfnhN/36bgahTNgXgKyv0bLI93kHUBotbhnqYFDw8nBIE5PwRDDFiWjHfv7jCfZo2CbQxCl7+mqEGjFhXNtY1Gx9cp+SLaS9CNi+072i8lY04ZKSXcPrvGn3zCGMeA7z6f8DAX/O7hiOFQ8PlNxg8/e8BPTvfA8QqBFpRC+CL+ElfyGr863GASxrtkymuBOnXgw+oVMZtJacBegMowgQJ98D7nwJQ3Jm29knNRo8gcfiFs11dLaPSTvKMvdSwGFK9dJzXClNc4Oil16dvq3S8AIkQbZYOQc8bDwwNOpxmSM4bAoBCQUcBFN0vOGYNHMkz2RUsNq3WkRVMsU6eHEsxxVHUAM/geHpBFcHUccBtil7LeHIGbkhNLpa/lCugMAkv9bAZUqXrWuq5YUsayrFofZ85WGDaAG4U5F+SkQCNkqafjEHE8RAzjAXEYEeKgafrMIFp0DW0tzyNtODueNti2fO3yd91c+FGjar5fnrh3ZZQ+d+6YNz7WOdtENz5KWZFXjRZnCxCUQmDOCLHpgeqL7CvhnrYIe37Vl49so129i9tfs1SgG71PzzO6fbi/n558zsV7h2DV8QlEBYfxHjkTQlzxrW/+HyCstdGLYhQ4Dw/opYfA0o5rhN3mwBifZ4m4gV27/Pq6UBv770+Z/RoabYC+iwg6A2vLsIS6BBRjYL4YnnDhP1MFmACoZAVUoEbCtc6rPts1xCY0ziJs9n1FmrQB7w28s4O64k8pyCUhxEGJMiusrOQVgRh8dQVwAHFECGKMGFgRMbDms49XwDzNBjLhjMyND/Xaa38TVgHbMxh7lRCoRUD6V1cNE7lYJIss1cMM2t6DAA61mSigCIXFvD8FCo+uhcqwqF/e9KARYpB5ooUJQQoiFJVSDcpiiqk+whklk9Y4ZNg727ZnMtRLJlCMuOaA43hAzivmZdX5SqUqVQQFJqkw+ag7DM6W2hrqx55xx3BUSKpgMzaL+h1r3QeshoyMifppLTqoHpsQA0oCStJ0UGKCeNi9Gi92rT+up7uOIbodQjauXiFQQ0XnmGtNnZuTur6eytOENVVAARGDfwbpORb13aZ+ohbZ96kJW2Vc/81rwnSaMI4HBKM7Oqhhdprn6qlclxklJ5CbgM7/UkaQAqGAaHMNCpW1lrzg8WFCCLOO17eBaPTX16TITuFlhtAuat+tsc+nt87wT7Wm0yaku58KR03qJku3GQdzBDAbmM1WTHt6SskFwWilRqGHCCTd/7pORRtnVwXHBZkK5YevIso6YownvH31GgjRUqJ0TWNUVVMs43QYIg6HK9y/f8S6nDBNJ6RlRhbC8fp/xu23/kd88cm/PkPUbeTYlIJuEqpXdjMvdcTYXaOzsBfA/rsn68jFM7u55L1q4woD4FKAqXsm+TlbRWILavL0QdVR1lfB9d+3td6pPfXzi6n7/jIffHbLNunuVr/zWmBPoS8WfU6WDiYlg99/CSozlHY0XWyeTvZeBeM4YFkT1pTxcP+AdZpwdRwAk8cKdZ01qjVEbdFBnmonOBxi44mboxlv/n1x/rI/UzT1fwwLfvBFwUefEP6r754AZOR1tfYqE0IkpLXgYU74Qn6EuPxMHY9FASjucsa/+fY93k4j/vLLF/hq2dLKdglks2a6HB2dOy+ogRFfe71iWTPWZUUWwWEcVFLUSJwphgZWIARFEt3IIAdTURoVtEfWtYe7kUrdFwytk2dRXcJdCvO0qGOUYJFD3VU+/GD8JwTSDIkYMI4D1pRRQJjXbKi2Rfllp78RWcsgaXNzczUCZGBpFdTB6VNMnPXywmnE6df1rgJIS7FNKWFZVyzzimlesSwrxJzF6sRTWSoimlpelK6ZGeM4IMaIwxgxjoNC9zMbAEkAOKqeQzAEZaeLFgHaO9T2v2+PrQzp17YRmxtHPe9wJWBPgWc7Y/dxz2v7x+jnnl4MwLKbVACQZOSskdmUC2JOyGL9+EQDA43nurTa6s1AAyF5sqYN+/0mm+va2Lgb9/ad987gtuOotkzpZUc/ISFM+OKL/wvLesD11e8QmJBFM39Un/W8NLuPaFZSKTZ2Uef3drCqW1eE405vcNvFny82N4qw/WHD7WtitDVP32bhsCE1VcgZtW/Y3pjr/tArzFtVGQaABNkAWTDpJDjRRNptun6Y1AQJgE3Ptqo0ffA1W95vCKF6Xd0AESlYp0Vhkq8Jw6ACkFgQO/CEmmZp8+V56GINKpkZ4zDqpuuV5SIobMaR79xuzisNdZLAa48YGi0pRZ2uqt5rzWGFBbYFo67xOTNVRCpVbgkQqn3WNtuOujSUbjMLPKzeMwdP7WwMX0Q9xPOakN+8xzivOBwPGMexesmEAuaT9fzy6CFE00U6dc+N0BZFNIHhBlfpfEz+HbCJyvmcCkoNjUcrxN4zKa3zMIU8MMKg54YQzPsllhrD8FID1wdcSNO+QAG61sQBRHnDpAHrGVeNNjIjyxmfnVeRBFD73/ldztVQ1D0SxKKV6kU414CqoLO1DAHBInXJ6DjEAXEYkFftoRQjQCgoaYFHSwu1mSRTp0MgDIza3sKPIUgFBQkcqmer1B5GXWmcHSzqFHAFGt3P/p2k2ydMWpfSHl3NYL8TjmNAjAOuj1dN+coKhhLiOVuu/V4IqrAbPTk/ikHrMEJs7nqnbfUL6joKBI+vD5DMeLx7xO0XCrU9rwX3j4+IISDlgpubGxwOR6AkTGvCadJoy930iEOIeHFzi2fPbnAz/Byl/Bk8Fa5qBOjGIOd0eclgA3rqPP+r7Zq2Aba7drsszkH6hLunz5Pd38Znu3v0xldVri8oZXsZ5imQPcx/f5e9IrFROy7NHTYnuBW3+3hf37Kdbx2P8sB5esCrV6/x7u4dUtJ70ukl8qufYZpXTYcnIOWs+zHNOBxGKOR/wP39A9I8YTxEEDGyACCF208lQ5gwXB1RRGuY07xoZok5G8VkTaOBPsHfvdxlQ1ffuJ3x/Y8ekYUwBEagBd9+vuL6JqNk7aHlKZi5FFBmLNOM+/tHlCIYQsDNYQBIgYzWVdONn4/AF89W/Pb98GF9+oNLUjZOHFVA9I9lmjEvCRwCxuNopQPb6z3jw9Fj+3qs/cGkZQOaxdEGKFXnaQ4eJuVPNTJIwOEwgqCoiCCVLxnaV9Mjo374upr6oSmeydLmhWrKp79QqfyyfqQGGcX2QU27vTy/54bQdqfkXLCmjGWetb/ktFrZBSl9sraCkVKwVlRkAXPAYYyI46CGWozaTiZw5WVkRiURWUYQdTIAtezysuPhDzn2C+oTdb7QzSl/qa3U5Wu2n1N1flc9jNDKXJwLmtwmA/FRkMCkkdQYrF5za5woynuuz7poGO3epdahbdb1ghEGNNnin0kbL7qryJ2T8AIVqsTXtM3z0YkAKY149+6fIxfBOMwI4529UzPUAKXpYP0rU1a90evcdKxbGt7QjAOsgeot9/Mk8vszVr4eRpuYQmjT7+AAukA1qxsCQ8O3PBc3Xp9ipM1D4QtlREVNHBe0qJwq6RVbwSBDBGPvWbHzAGzCtsRcowHAljAvvC5iPIC4ICcVKjwOuHr+HGEYkecZEEEMBKYAoVTDKo58dDpNkHmCLAtyjBARpOxQwQQaBpQlacQpMlIS68vVBHrJWjNAZli5J66UhhDpMiWJkW/0+rW2GUpO+kxuUTudD6kgHwCsv4oyw8wFSG70KNSzXmdznXUhKEsVbJlgKWlqOCUT9C6IMhROPqeEZU14eHgEQIjjgPEwYDweFCafXBkwA5MMsVBQDWGPRvlmd/S/ymRMODFXnAlTiqkp+Lvi2CItHbURaSNeZsI4RgzmWAjW2HmZA3LJatSFiJyBIRLYjLxxYOA4QOa19Uuxefb37BkYquAhkOQqnAC0aKFsFVIRMxyg6TVZAHHErC5iwKYQVE+ueZC3Ueqi+bBW10TQGg24M6G4kWxK1zIbaIHe+/39HdI8aWsG6edPU3ukZI3UEcAxoBRgiEAoOsosqoAKsbVa2Do3+nWunxLqePpzyN/PjKde9b/oSIIZAyXj9ctXnYKgStX19Q1evPgERMD3v/99/OKnP4Mn9KpRpPu2ja1xfiLjSc7f2JQ5ZqAfF4DpIWI5CR7eFtx+c8Zw0DGlJYGJ8OrVa7x+fYcQBPFwQADj84+fIcSA+/sJ7x8e8eLFCzy/JhT6G5zKv7RUU/q9KehV4BJUecC5yJb66WYVfCa6sxtvv6woPDWWdg/anNfHxKg7u+0ZAGfeUEI/ku3I3Vjrz90+8WxIT6phZ8/r9+iHz4Tun6acQYB1XfDq1z/Gl3/zF7h7uEfKmupL6RGQFYPVd1fAvxC0V2hoqehpWvDRpx8j54TH+xNyyhgOBzw+3OH+7g4hRIyDwmevD/dY5kWjuTAZ1BkRyg/M2WK05Gzrv/3+Hb7xMQEkGClhDBnDOKoDBkess2ZeUAFg6f+1Lh6k0a1cMDLh27dHzQIhwh3rfporr8QlO/jDBttO1nsLGgIMqMQUYpPRx+NBHbdSWpElGZ25M9bvAzOQcvc8oooMrSxAao1641VqnADKcpNNemCuPTVD0B5jrkB7fV5F8Db9igEEC1XzEPX/JdkopaIxEmlqtoIy61swMRLcQb2rF7/gTNjPuU5lZ1SUoq1YlgXzNGFeUjU6tS2LqrUpJeSal67rczxGHIYR42G0VNCIwFTTM13f2WRNEUPAm/UV01khrX7t/NjuYuIAj9JsdqvxdRMkZxMg/edmWMuGGLbz19eR+b/iqchGh1I1lXbkpFHZJRXMy6oZUSVjXjR1/ngY4DK6iNWVEc7u1BtZ/dj2+nApqn8ytbq1Om+2Dnua6P+WznhTDhsQWevlBZrWm1YF5DvEgFg3h+pmPey+SMDDw3exrMDDwxeIccY4vkfOP8HL393g5toyMVwjlMZDC1AxBSC9g7zNt79vP0fWBlj3pBDyXmd44vh6GG3YhmelW7sinudqcZVu0VxJgTF6nSAXuVtvRFVc8dQGczaHLTw8gMWKvhhADIxoG4xybuiRtIsSdlG4/h3VGaDKTeagBZ+WkjIwIxxHnCwFUaHFixluxRhFM7jWadaUqYEwDCMQBoTR6vpAiEFRGLmmizWFqgKUmHARi+bUwxqMrt4LppQaSdM5cqLtim1BNUJVshJ31CI5sIgZWQVJAAKDgmAwQ0AbvIrBJQMS9BpXFrJsGRFgCmouhgau7xwspUHvq/VwaZmxzjPu3t6Zos2domAeNRu3FO0RyMy4vhohHIBiEZgizchiroA1YtdeVFatiFVrm2zu0TkNiuDAC771fMZ/+Y2X2svLipcCEWIcMS8rrq6PGCKjQHuTlayGDwXgr39zjZ//7oAlB5zuHnHSAjN40T9ZbVu1N+1fJgazpT0Sb1gvs6dQQR0kvhZkwh+wlEhll1wKcgzVobISYRBgVeLZyCJVpoCaPkYtDaKkbE4UqYxxzZ4FbgMogKzZCuS30bRxiOCiSI+UNEYmB4fBVgGbSQBYLZgUEEVdFbamAmJMVVCdCIQLfMMFN0zuukLsX5uQ2hdJZ0Oj1JNabQITMLC2/uAQMI6j8UA7gwxwwWrCQrCqmdIEVy6aukImoYUVfXcDE2dKmWRCPgUsv4hgNi+qCWGBIA7As++8xzBMiGPAeHXE/fu3oFJQiPHq7WvcvngOpr/CKhPk6s/APFhj9+a8aLWBaOHxfqK6XzdGz2a+qRr4Tx87Jan7tzMVd3d46n477y6267/Pxtgbar0Bh/77gD6jq37nFxKwbbz8xLG//2WVUd9hHz1elhnzMkOkYLl7i9/+zf+Od69eKlqaedrhCkQgrDkjiCjSaThgGAes8wKBoKyroUMq/RAzwFoj9HCfja8IDldXNVukAOCiCMee2i0w77+9EZOPgxBJ8MNvvceffLHi+vYaLEApwdIvBcuyoIj2ZDuwAlJVYBCoQTPdP2CZV0CA58cRAuAmBkTWSB3njOS13BtB3pTlszXYzasqZS1K6FeoXDSZHgiUVWcYhoiSUj3fZWsjJLOYVHtFqQ4Oqnd3vS+LptOrgu6MW+uFc1oxLwtSygYKYvzdZKZHkrzelgkQQ9ZkptoWAAMhiANmaXomhwDF8kw2Jxla82OrWgIyVB+gy9PoLwtAEAshb4IbGo1dlwXLmjDNK+ZlRk4JVlJt/WtVlc05I6+TzjmrPjCOA64O0VIgtbREW6+0dF0tsUENBLR1s7HtDJFiJR5PW/J7buC62/58f1HCdkfvv/f53BtrJoGl1eOeGzpuNGmAQMeiuozWYBpCOxPKuuD9m/d4PM12rWIFxI/U4a1Grel8AAIKgLIdtTssaOtUvcQ/q95FTbcno/d2O6u3rQ4nf2+Gp2Jr0CJhygXLsiCntYJ1MUfw82cIA5k+JhuDjZhwdT3i5mqEyIKUAtZyi5Se4fH0ApEfDCiwdtpVhzkJhqhAfQyL7FPeLKEHBfydt/PRlrjYvP4hx9fGaPNjv6mZpVlTfoJtllwExXauWDi31S14XY5NXBWET2+yNomdsJaWHlWgKHqLfc9ALf4lRm08Sjrwdg8bn8CMU7uvFCBLg+bP0n0nWqytEQ8gxoiUk3rAYsD1zRUWEjzcP1YLfhwGjMMAAWm6BwkON9eIgbDMWZXnYullRYENSucNLCRapGsekGyeBBKFbW6xUNg4lZkHatE6MENSBnkfMAFyStVjVkgBRYCsQoJaLSAxG0w/qicW3Bv0O23GN0Bxg8IjEIJ4GDGMg6aO5oKUEtKaUHLGmlKtA6zRCyjjLtQav0Yi8DhqbnxJHXyJ/ptyQSDDs5TOm+6REzQnqtfbuUtBAKBkfHG14L/75mscBoIsAYchIhY1WnPJmO7vseaMZZoUFdMipzEENYiF8GdfPOKH3zhhTQVfvVzwv/3sFndpqAYIerpES+FrGd+9nkCVKTbLUlQRr5FlZUSeIisCZEdngwl4Eax2m+2Wo7rW7hn156o3uXRj1CtOp3scDzcb5blwUJCi3cFFMCBgHCMWma3gXp8bRFMMuQgkEBIiBpCGSoMJ7R3jLNTQHJ0/eBRR931Tn/T7qoKhbxRc375azp26bbJaxR+qkVMdEc7yCICw9T7S9YnEyIwKLBRjRIiE6hqwa9X94w4wVNoUq3+rhqq9AnNAWgre/uIFrl6seP7NBev0AOrQrab7R/zoR/8vnj/7GB+/eG+gSh9hyp9rZCUOCO7pd1rLbb33VLEnlb0K04TeJR7ezm41zu3cbYLk9vo/xLvpZtLeQJMLf29Nzd3T8lOjAHZ+xhol6p0SfXr4hhX6z6fO9eyKUvAwLfjtr/4O92++wvLVTxQ5lBRFtz8354KHxxlSMsabI8ZxwGEYME8TYMYRMxAPR0yPj1AQCO0xVoi0GXKMIEh1HBIHBM7wgq9Ohdm8i+8gAfDNZyv+2cf3mE6AlPv2nqaIOkhJKQWHQR0w9X5mwCynGVkEz48DUARHZhxjwPdePAMA/MeXb3CfChaTF/2xMY67xd1vZeebRbytjvIbBFb1n5Q2KUgFtnDIfjbDAOaY0IgXmq9FRPtClW6ehBxqyKJjXa2dKerrPGOaZk0JHaIas1ZKAY+ukdU/M9U0bDZQjlrzC9VzObhhwwjDiJuD0SgsHZ36ubconBnRrUUObfSD/kgsCAVYRBXuZVkxTQvWZcFaijmD1RjzipHs75MzEAKGw4hhCDiMB/3d69vNEe6OWsBrJp0n+X7RZ2gESHnoPmOKydI/nySIdi/9qkMXvsgB9pwD23Osz5nefjtpnnlUH21Dcb3Jo2qaDaXAaIVUx8yFFRdgTVjWBdPDCafHBXGMiCGCpGBaVttS3hLHx2MozPDEdZVlT73J2ZvJdvfXbBXpyxSM50vRVMTiUV1Fi01ZAYiKAfzlnGvAwOVFCLlWdhIuyR7Cs2c3uL291vpM20ccAsbDAVIWeHW8A5NI8VH3b3B+9J+K6ZUts6+dsK8B/NDxtTPaNvQoW8Kvh2lz4zDg6uoa9/d3GqkBTAfytC1TUFjzpjW/1AoZSWq9juItnD9rUzzYhSlcYdP0fxUeXPRvBaXQ2w+uMLGiSSlhKppdKVGVLg4oWJWR5ox5msBJc955DMiiodYQAqg4FGRAGAkHusE0zVjnBaf7e2UsIYJDwGApD4c4gG5HXN+qkic5I6UVKSeI5fGntKIkM0rEhavRkyl2ZHNQsinT1BRAN1JINNpUuveua9h7UYC6K7UeL1uqGzdBZXNP0LRKVQQMEl5kkxrHHUM2PVejKN5jJTBGHjCO2oTY+6/ktCp8bypW6C6oDSRKwWlOOJDODURLuqW04ngnOGcErpgWcxaIz43PnTQjzifjv/j2I1BWfP7sE3z39gqffPJxFRB3a8KPfvsSr16/wXKarZHnYFFS96IL0pogIpjWgo+uEv77P53xuxNjVYsaP3tz0FeycXt01Ui1zbXn76MT/vaLCKoC5MpFZT5OKGSomJ0BS7ILrBgpFPF9ahE+sgf5+NDqPe/uH3B7+6JOW2tELiY425QKQ1sylGZik5lDSawusPCeozTapDodcIARX192IdgpG0193gnSblB9lFU6mpF2cv1J3b2ozoJ9Lf6Z3qGAkAzdh0NQ+H+01FJnc+KCwl2x9q0uqRgrdKGh92XLChAQ5vcj7okAfoA36RUA87yAY0TOBfN6wqfp34P4iJf338SL2xt88ulnWOlPIRjAHAF4eswWZbX99Lod58ZbI8nZQpN0rpC0xetr6BoQiCsF+/V+un6gqVVtXnZLdXb0n7Htzb0wFivP2j7j8j2AnRHWGaKuIO2VBfdptmuKKWeaJibphMPDl/hE3uAob/EyRjzMb5HWdZO+U4rCfOeUcHt9DFiamAAAIABJREFUhZubKwQmxEB4WBPioPWm66w9ruZp1vcsAioFx5ub2q5Fa3VDe2dr/0KWxbB9920qaf1MBGWdMZdYU5IhnqrU0AOLlE5ZVcOnlAKOAWEHp70KcJ8Ft0FBFrwm/DLwi8/ndq73c6+nbpU5b0nUPmlgQqKTjVpo7zTNZJkMRneiaXVqRFiGg/UDdadMFnU01qcR4eo4ICV1WipYFyErMIDNkZZh3D67QYgB6+OMZV6QLW0ebgySrltVriWDx4hIWh+wmswr1UDXUYTQv4MPTerf5IYtLFunZDwsCdM0YZ5WJJO9YgaDljEUlFSQrX8ssaKFDtdHDGPEOAwIMSCEwUDLAO/xpr93XLTbL82IbCBeAFlzc51Pz6bRVMe832xbQqgCtF1/zjg+tPubZNk4hYEzXuZtBqqDpx8WFEAmi+pRmt2VsWbdVylnrPOKtNh8k8HYM0AUEXOBp7W685Dg0S8C1dwYVP3R9+bmXXa8TKN1pns4eBapYZbNwaoGmfYBlKI9RnPOcJRQ3y99bazPQXUS1JHQdox+qPe1OjACGUbDGXBVk8fSzfHedt8ffs0eq2G3THVOfp/x9rUz2ojQZqBuLsGZGxLAMIy4ur7G3d0dmsAHfCochcibM/visG1GJkVtMt608dq52rhPLZF+P6IpHQKtjctGOAzTccm8QsagvbGzpxZSsFxpY8p5XZCFwCIYSJlZLgUxOuPrGgD6/Kwr5vf32og3BhAHzIFBwwCKETEO4BgUrCAGDDFgFEIx2IxSFBmsiGCZZ0XomxakOVXm6+FwIihQBlBRlHrF1ueEDDZQTCms8ycKSy4uCdkEPWmdVJXHQFXsqqrSe7B8c/mGcKbWa1V1k0hnqFDN46fjAQKFlH58OGnKj2gPugTG9O4ed+/vQQSEqGmXuRQtzM8CihHO4IkZYTxYTr1lPitmdMc0duldxAAzvv3px/jhZy/wve99B+PHzThJIvjW5x/j//7Rz/B3X71B4FINNoXoj4AUsORKdwXAx88SPnuh9XxCCd/9SCOfKa14fEz4d/9xqEoxbBm0bxvVKE5vcsAiORxV/XDlTLw5rjFOMGOQVluRyOlYxyalGS8la82EON0QmXqTqmBwBUrA3d6GRWbPvbQ6WgYFVKNN+n9Z6z43dXCElnZE/X2pesbqOIy+q6pv/ETXtdG5pjpwSykFay+4jjzbjVo2gGItWN8+UEVJrftOJ7quG5MD+iiCHnNzJtT1I+MzEMu7bwqUp8HB18AEcV8B55+f3g1YVsHzb951OiGBU8bMgOSCQIzbFxFD/hvM7wvu+FPE+FMAAYfjFTD8AGv5LoYYzV/WpbC5wl/XoTu69SJWJxvBeLF41FHTfQW2z/eGMXrBSm3+OwNqrz4ZVcB7nF0Spn6ddH9r2lRBtwp6BIBKu/cF8t0cWlvcRtI/Ud+5rVn/fXXM+LuIYDo94u39A/Cbv0F5eINTWg0oJEFyqaBMDpaTc0IyYJvDza32abTa18M4IFiPMxBwmmZNlSNBGBTAg6vBxtVAuTRvun2kKlJax4Xm9ILgd3cRv4gjvnMzI1kvte3RZH+rpfJvdJrG4wFLLpjXjBc3BxBpXetv7+7xlqj2DfX7sBuBRE+uUx2HuJOri9rYPLpzqVfymFxGqkyhnHcKuTMSczoaCbiRA5OTTue9MZENIAtmoIVhQAgrsvfpJFiGTaUWXN3cIA4HMAFv0htgWUFgiGjbZCHVFfwdSy5IOSvgUp4xTROWJYEJlhWgO0LbJUjnCFSpXh1YRdPEl5SR1gXzvGBZEtY1VZ4HslTXYq0f7N5MhHGIOHi9egwavWQy4K2WxeF74tKua7XTF3akCEBcdb69UXae6GHaew8C1POyXn/5A7iAG2JVvhIu0H6jF033RzWEIIKUC9ZlxZq0rIKYEYLqggAwzzPWacbidYFGO2lNGMYjmBglhDOjUeusdW2UF7f3Jljk9dI7ubyDAIZenYug5IxSkjnRc82O0ihqNoNe6jy67KgGout39pnXlOr5sPIIshTiupk28wegRmKrh4EurBGRRR430sUyrno5o/Iql+0zbEhbpO1/wvG1Mdq0/oOqtXx+dBJ2o3Q1bwlt16J9VprBAeQaIaiZBpWBmgc4hEqgQaR54PSuegl6Zt5Etof9YytEAUJAIQvNC9TLFYFcVs1rDwOICEM8gp6x1g6YYA4MpDXD0WlcwVvnFcvjA2Rd1BAk0Sa7uQBY9R2t/8nqxDlEcIyIg6YOclA42xADohkgx+MVihTMy4y3L1+j5FyZtdc7VGRA25nEXQ57t4H8ILJUglSqMHClmKACfs2MgTQVrJQCyUV7hbWFBudSPdgE9Sz2wrD3YnifmoqkTJUAqpEA86wMI2NMGWlNlQ6U6SszySJIiQBZwUFTx6p3h7wWA5aCAVwdIlJu0Tu13zIEmpqgKF5UtZYxBvzR976D4RufA9efAMs95OpTyHqPT04zfnB9wJtPPsHj+1fwywABlWyGBNUI3rIkhGyeWmKAF9zyowr3ATiOC/7N9xh/8Y+fbBRNZTmWwtl5hJzwKSo6IRUBmddamJEBq3c0+rO1Sn5fURCb2N1LPbctpaOI9kvzJAtfYxcCUkqlK6cZMmOowQgJZF0hw9DqTLFXd6nWVvafNjp1A9yU/oIaGnUHjD9/c7l4LYxyBa7CWeoUblm/VdyWxkXqFX1xNPrIpdT7+LBcaJFPLVnNgkWDQVTnyp/SnFDnnlB9nzbu6nwh5cv/H3Vv1mPLkp2HfSsiMnPvXVX33LGb3W2yu0VaBmRTsGQLkDw8GDb84if9S/0GwQIIQbANkDQkkQbNqdlkz3c495xTw96ZMSw/rCEid9Xppt+uEjinqvaQGcOKNa9vrfcRb8oRH3z30Y1t6znFjfF4f8bh8IC6rcghYnv7Dh/fVsQYcX5g1PrXuDl9CKQT+PS/IdA0HN2XlZfrFBrYGmhj6tHAY8CRGkf1yDyvzwyHhv1nXUEajFbAFa9xD0djbdwHu8KQVuvpS607GnigjRdm/Rtee65IjPME2Ju0mrLz9PAOP/nf/xWeSgFQwLUipAmtZFwuZ1gWCtR4TjEiHFTqsNSuYUoojRCWGUTWw4nAtWJeFpD2e6ytIKSAKZE6WEZ6UsMhRkllauwAVdcTsjYiaw24XxPChzNoy96e4DlCpjzKwKG872kFpnnCkivuH5583wIErKLoV4tm3vz+d57w5X3E5w9J0hTV6csqTFxJHR9rxp06X5nSno9iOHFmcBG5gv3jH/0IP/jd33uuyXFXkMF2HvpngvK/ytwbdLPVpQHQCElUhNrmglsM3AogzgkBjPO2CfJybSIPQvC0R1DQ9ggNVArKKsAuJUu+7zwlcCREiqgk+kJjAX2yfnVSqi+8vOSMyyXjsq7Y8oZS2OWxya6aC6qWhIQgZ2paJiyz6C8pRalH1HY15GBinS+QMY2RsMa/BCkNcMfMXirKggb/y/bY7tpPHNxQ3+9fr1nebapRxAsMwEtqdiKYd9MYCy2YGaWR62SlZmSLnIeAKUYcjydMkwB/bTnjcnnC48Oq0TWLTtt4hKbilHBzcwTzhDhFVCJ03E+JBO9nM86S3cnAbCOVMdZaUHKWVMSyoVYxyO07TQMnJQvSs9UOMksNnjSWH6K+GPiH8eHhpy3i6D63AZuD2mUEnvN5sk0wvfHZfj5/ZX/0Oy1du24jE2opkI7l1Hnkr7m+MUabCBr9zTaZTeAFBBqK9+Xt7ugG9VZVpsy4d2Vv+e6eOCj7jApqlmnfBYwpJqBOrK6IK5nKuEUwJ3Rh40RjikSp4FRE2JCgLmIS5oMAxCkiTLcIqSBvZzFwKALYgGYHlYHWUGrD+fEsPZquFAa7qLU9Yk2pqO2MCmCLUsQZlhlxnpEmASOgKSGGiHma8dFnn+Lx4QHnt/dg3ntOJBVRc5pNSAzKkS9tM2W7udIrgtWALlhzx9GRL0ng7+1ejRmhivelez4Go5mgKSiqqCosseTpW9Nyfn7wuKcZWVNEyZeOYlQEQkREaCbkRDO2vjMpEKR+m1BqRdsKlkmiQq02gBtaVrq1QmelHXHWNCRipGkSRK/lBpSOqBQBzr7eEUC4/wqRodD/QG2EGJqP39in9Z8ZFaTOTqTG4Vt3CT9cL/jRm7nTBgk9WGuGiKvYNvc1rwYWQkBkoXnxBMr3bV8TBIjElIkoCyNrTT19wRma6oxVlQtmSzeWPd8JuNHDxgNNhiBeuyvnQa2Sux80OkgxwOpFgwQ8UUvFzWkWRTRLmq/1RZNmqsMp0z0uW+6AOTSyfPUu6/orvip2lyo3g9noRoAI4wZw7coB6Z6QqGpB5+fp9Sz7T8xI8zSsXV+ycfymVNJ4dp0+Lb0Pnbe2hrIC22NAWBqidFhGWTe0yjgcCl5/0TAvMy6PT4hpxeePj3j14QnvHt6i8oTf+rRhXs748PR/4bH9CwQHLeh89/qSNREjzfeagkIt9/V2h87gQe33G+jJ7rnfin6OeK+AXI/ofSJ1vJ+NY1TAmK2W9NpYu1bc6Op1G82oJA0GP+/fAbB7dmsZ5Rd/ipgKyuMT5lnQ//K2oZSyu3PXMQNSIkwpodaCFCEaoddfA/ePG7baMC8TKEQcDieRaQtwOp4EFZl078baVaXZQIriGqXeR0DFxEQB7+P9f/rLE5YJ+NbNhlcniQw0BeQxBMpxNRnQGj09xwRQlM///PGC2xjx8WlBZcbDmnHWfnNpEgS6QFBHeuelysYdfEjmI58z1yYbqBVkvYQHRZDhw1/tMEjTwaBK6zPa6NKu+V535bMhgLl0eogi76hJlCwSkOYJ07KAWsW6ZtSqZ5wb8prx5suvcT5fUHJV4DAB/oqtCcAMqyOqVuStutM0hqDJAkGd5wHF4Dvc4BCwp1IkgtZaxfl8Rs5bHwcF6fsFyYqqmu5vWTGHOeGwzJiXBSmS1tCrobbLYQt43iqr82TTK0l7srJtqG0kCJr7p5tgNXp9H0Y/sv1q7Xj69+zd63P9spG2A4BzKoDLtmd3YUhbnMbIiutQ6wZmBWFLEcfjEdM0qZ4liNpPjxc8Pjzgclm1pAI7/dBXUZ39REC1XoFEjgxdKxCiZMC4Vnp1/kZe3kDSjuEstYm1Vcl4GUoYHGG3KWgYS7mQg55RX6sYu8zc1YsDu7YXtoayE+aSHsc4rCwzammwxgXMANem6qIqCbt76yz9wBoHJY+k2377My2LbxhfRXOD7e97fSOMNr76baxLsKPBLqSkpF7rehFiRJoSPCSPYdG5p3v4ZQs/CHYbgIU2k3mp1XsVYLZXGIwx+Y8BsAKdSMG1Ao5wBxUg1XzM67mtF0xNcnQzCXgAOCrCH4GQUEsU71gIyBRdYNgqHQ4zwkcfYn18xNPjk6BxYT/NFCNy6a+ODXBZeieIJ+t8EdAIgqRUxghKEYEClhCA44K8ad2dOPJhnekDyZpXRs/3JmGsTQ8gQY0w7uN3pVoXS+4jBhGrx9MYpBeoJjGgLELao24afRgK0sW0tvQRdoPx+miwjjfAGjA2j9IRIKmBEG9RJEnTsObhxep+SNIQynnDJSbkddV+a9GhdnmYO6A1iohATKAidSGhFTBXhHJGymdQnHZFq601MXxYZtdad1ZU9UhZTrqxp+6BUgHaGl5fEv7qyyPC5NSixq4Z5iRe5dYVjdYYIan2o145UmO4kaQab3YviNFXr5hn1XEF+1tbZJCSRjAz1VOINeUjZzFEd8NVyOsUUTY1JpVvxJQQmJFz1tsxIjHSFKV+y4y6VgF0wJaUAsqaJV06Bim6DxGQ0+HCNAaNYKCnKZlTwNw1wqUkCktaBi06Q/N9MQNhpMWas0CmTxNaqWhZa02VJoWGKxBIAQwG45rlvAVIdHOakkbQ9yq/q/utnwujd0uvsTQThgjx1ppEdreI+5/cIH2QcfPZGTQTahPkvuMScLk84rKekVJCYsa6Vbx7W1EKcLw9Ik4zzucHNP4ZwunnSPN3d/16dumS+lOEsQI5cP9cj7J1Q0ze695tHu4Fxs6ow+7bwMuBG10v8bg8N7xtL68MRN2O/QZjLLC/HgFj3KWXFL7+6ZeUPFIFj/d3YUZ9eot3b94CYcY8Z8zHEwIxas6Y5hnMjJzL7p6m6CzHBdvlgmVZJDNDddtLzih58/kfDgcQJIIyHQ5IU/K0v9YsAtL3sw0ORZabKN8yKrUsih6R/+Of3WFZEv7p9+7lbNSK5eaIms/47vGtp9y2sfEwQ82d6G0zSqn4Ohc8akPpTY0+cc4EfPmQ8JiDIkATEEzOX++zPiJo+rHt985RIpklLAcJ5kfr9fLKN1rD/f09bm9vYQarUURrAGGvvRprHf1yBq5hgEZEDIoTPvzwiJgmPNy/k7YH5uhhxsObN7Aa7UABrdnvkoYsstloHCqXpYcogkWT9b1aQalnKZWccT6fUbYN57MY2tDyDngtM6RFi8LNG9pjSlH7p00Ik+hCUdMeR75klNpYylC6Mqd8Q7ObRAVow95YCQc8QtfPYD8EgfcZP/K5FxRsVwr7+d0baJ0n7A2coR5r90lIHX+T3sKSwtwzvlidJ0RS6nE4HDBN0WtHW2vIuWBdV5zPF1yeLija73TPY19IXVY6L+sGrlXk6dECGNq6iSXLCGTyp+t3urr+e0AD54yyruLwZa07s7Il4OVUU9sjwJG67dxc7VRvvzXoeLIlwzkzjfBK1tjvgiRt72vbK7n5fmh++Drwg5wNkxXjLirP08wik7T+7hXQwW+KsgHfEKMN6N6GfqY0JK0J3T33uPluMbMSdutnxr7ui6nC39QmZv/uNaEZI6tmNOgtpyC5qqE2IAbEgTBMEITQPZugDk/aa97YhVXdBAQEAKgxclSPUS2eYhJqRsuM6RDd8x4VuolZ0leWmxNCSjivGTFvO6NNPCIvaCH2pv8+KjkA11XnoP8pw56ZkYWLgYPUBYYo+fGsksU8TtwYVaxYgTg2pqQRSFYDjkLwvmKk30MwkApZseAROjtY0IO3U1evVJ1Bedoxzq487P7W55sH2DyJgKbfBqkHaFqXFIlQ2HYVAnxRRXC0ynh8PPd7AtrweKAPSN3bHCJSBO7fvsPXX73Gx7c3QiMGSzsQZz3cIN+/1WbPEhKmGLWGoUcQ5Bxd73tfD/NeXvOGziy0+mBwZuyuEYwEmqDIjBa7dxVEAgBgyvTombZzomsJllowY9rmiWcvbGkqKAYUR/seBr5hrxMpzK+0OdBewZLqymIkWhRRcuWTI0GFQEgpunOFIsDUsByPghJaa/egwQz2gLsP7yTVK2cZg0Kfj2q4fUslrpAmMbh2emQiLMuMaUoIMWA5zDgelx1NC7/Z76pdtSjcMQOoBVk9/SnFriS5MmwkYanj7M8h+5976onYpTLvQITybsZTI5x+sGlajnjxp8MMcEUp6pUtFU+PGfPdHW5Dw+XxNUqVuqTj9Edo6X8Ahc/GE6vDukp7sdev0YRfvEYlS4rog0aBr9ds3B+YoapgIa6QYlj0Z+McXtulQr00SNq9ZqMZRnv18+93dQkDN9zc4dEqHr/8Cd58/jPkbZM2KLUiLROWwwmlZtSSkDdzTAzqFvfaoePNLfK2omgj+hCkQfy2KZz/NEndayBpMxO1qTGbt3oYrDo5ekqWvkV91cQd0RVdm2NtwB//9M4dOaePPgBdHpE/YvzuJ4/e0NuMqCbFvt2Tb8MgQr7qmyTPYvzd1xMe1giKXV9jxq6li4x1iKTqa92wV+VSLC43eggdRdGWgwG0UvH111/j7vYWlqremECsDh+TSYMjzQeiaiBDeK2AdMnvIQTMy4KtZOTaJNuApZa9UvC6fxuMsDc5ESH21jhE4mhMPm5yh5HNXVKCyZnx4+Mjnp6eulNI1wxawyTRVUIKActhESfTFDVCFJB2ja5lcPu0916rZo7WPjoa/+y/8Hgu98qyBe7dMGCjxP0JbbzfPyMQpwHdL2bj+e9JRdfP1Wq1aHI1FsdiKRXZ6vhIHN/TlDDFyRuBG2iZGVrrtuHydMZ6WXFZN+Q1q7HX97F/x4zIZ+wNQmoNVAlylPfRbLs8JduW384qjWcjKI6A9Ey1Wvhm+t+VJ0v0maF/8jg4deIQeuZX1weoQ+crLZOCdEnk/GqiatiD5ZykpLp3JIkmEjly/PgdW5/rK6jextd0ZXxskAu+ljE8u89vur4RRhsBe6MHALThMhrtDN1R7BFpOF2jSWG0WBW5UA7jsJA7b2i3fv0lewjYPQimGAMAWsUUusJKMWgkTgjAUuksIGEKQWvSJNqYnL1ec0HihIaGrTwgTpP0biFCOQOXywW1NESawEEiCOZxIWv4GKIaEXLNKWIr1Q/UlPYRt7/X1TUAN2AmADmK8mOgAc1CPUSSfqL7IGmR6kk1hqSIWB7NsTepp9AFIoRE7tVkVXKdqfnrDCaSyJNtm42Few2Uz4V70eqOu9pOqEcVFT0iGbUZeVPFDxDwFeN0CFo7QEiBABJvWAymAChjbE0iBbB0P6CUhmkRT9pDCvjDn32J/+XuTjyKhwUA0O7f4vUvfom/ff0GWFep82NI+m0DWtAocOiRtZcURQrkiIu1RvzRT26V8Y2fZauQH7+6v52lmerk2DyU2i8sgYUOWT21LGmDkbpvKTSWpulmhPj+dWHq3mPdm8yMwoQ4fNLP9SiEWSKNMUaUnJFrBafO4qhACrKZEeZJ70UWcBr4kNQzitLXwE1yztMyS+1IE6O5tYZWCh4fzvjk4w/wqNDTMUZJXyQBS6IgChCpIDRENJuMe+CIcDhIH6wYAtLxiNPNSVaGJD3FC3aGvTJ5uW4FRIRL3tCKCMO8bmgtIqXJlZ4xtz/ogjutsqU6q8PFnzAuMwPEKI8J734x4+a3HsDMOG/F00iWZRKjuFXcffSJ1IFWRowLHjMBlbHk1zjcXNQEvjKKWDy5gYIDj4ihi64YYG807Q2lTrzMLEhzg4l2bQzuenM2Yxn7FEd5gkUqJdJnfPzl6/oAXV9XykokcQBdOfxqs9rt5/diwNPzvX6XLZOh4emrn+IXP/p/NB1tA1jBIlqTXkON9hMwBZMkp4Uo4HRzA6aAyoS8rZij8JP5IOmFOCbNeuiQ6ilG5FaEDke2oWe2tooYJMXV5IQY1qNy0xdX6NOUbqF4k9ClEr54nPB7n8nZCOrZkHR6Bipj3fZRBlvf0Vizf//o22d88Tjj9ePAO6422SNlg9MIuKpZUQWTyZoh28kbnotrGlajwPTLJul8xNDsmL7XlpLI+rnOSqs73R4fz3j79mtcztKjLpE590nr8hoiwaNvAT2DhZukrDZ1FFpGil8hSAsQnQPHXprQlRwCWIBtuBav6U4p4rAIovOkJRkhRu39Nta6dPqU7Tcjbjx5Jl8sKiuRPLChQMqeOEz7jt4bmOOwyZYBxA54ZSnBXWHvxn6zs9ZMfwnDe6b/9A0WJwKczpsiIhqEfW0FtbACmkXMyyStUwzUh3QOSkfSi4zxdDnj8nAWYJFcOxCXyTZ1vD2PqgFd7pL7VAjQfnyaXlktta+JMyFEB+vxB+nZvOaJBAbHKHgKeiZaq2hWjhS683UkHRvgWK/mzmJSHcLTrs1Yk5Y4vQestEmScxIGBwhpWVF/LoW+RhZhDyytc3ardn1gyYwz+YMGsB04XyEYlgO4n9VnxvLf4/pGGG12WWhX8oMVvj10gd7nd70gWnM02iVWdKOCx4EP+IqkghkMBJAs7niNer79nc2DwACaIj+R1h6hewAYmpo1eOMqFFUPYtEzKWaeGiCsHk8Ye3+SyFfeMjgGxNpQ3mWU5SLpdwyEWsBRwSEYyKVimRIaM7J6a2S8e4H1//cKMYgSGTQPvwr8Kmr1fm/E4tWMgVC1gJdDABoQWh3ajUh0yoRfIN6tMyDraXU1dnC8lx0A6d0htWUAPNxuIn00+nwjbf7Yv0S6b42l2DVREK8NkRsg1QSn8kxDwBShKUwuoonwgTDyyoDIa4nyNNWyai14umz45RePwOkRaA3/+k9X/NP/7Du4PR4BAA/nM/7k736Gv339FpfaBJWSG9jyQ5nd6IhuO4u3h4jAtODudkI63IFDwL/+jwFfvpVIckqy7iyV9l3BsOadNIB8uDYoi2YR6dAYRRUG6KddnDAjWcSJG5i7y5rUiGVo1A8AEbsdIrZzV76XacYUjX51j7UGz9AkWHQzaR7LkiI5tYYx6YtIUpEFPlhJxox5+4DRBQKiMv8QtH4NJKnYOrZiFfK14vXXb9WDqKi0qlDO84yb02ycCJe14PLwJMAOejaMZgEghoS8bginI5bDhPm4gCSs2gUWS61FGIrmGQFhyl5feqGMqgUBZZPeWdM8IVByQeGqrwpp659kuhZxE9qKGvVyh4UKHjCeXlfkdcLtd85CA8x4dXuHwsBlXXG7TLg5HUBxxmkOqK3go9sbLPMRH338Hay0IPOeN/nFouR4PZFttK6lqlhuRomyOXx58GyOCp7chnt9m0bj7PfdEMwKGr+r9xMI9r16sn/e+PP6Ls+NPa6DcegGI3Sf+xyujTp75s7BxYzzZcVP/u7HePv5z1EqI6UJCfB6FSjgUhzAW+x+knI1Y1lmMEWU9QK0iqhKXkDTtGPC9tVbfPBb30OGwP632qQXZuu9k5hF4ZTonypVGkUaQVz2TnebpxwzjW8jBOAwi1PyQCsOdw3/3Q8vKCsjThKB6C1cRKhEkvPa3NGj8whBamQouOJ4mBqmgL2iPqy3fTsAXvBp94xgRakbp9GG7aNuVOlLYqN1qHJqDetlA6UgezS8Z+NmlnrAljOgkRQznFkVw/XpjPXp7BGHaUrC81VmCbR5RIPIa2o2O0Ve5F7zRCRREfMZQRXeUeEmlnTCqmBjAnDREBigJCAih3nGYZkRU5CIbIjdQFCDqjtKVHn29R9pXg1WyeF0PcKNe6AkGUafAAAgAElEQVQ7+wcNmVTwC69m7CICYNkrdTw4lTQghHFXO/1U1YOsBr/yPjbDLK2Hqp9d4bPSammTVhsVQAyY5gmH+SQ1pyEq6upIX/Ks1qS+f90yzk8XrOeLGGqDXjr40n293nf5+TO1wn5llqhokfYPNMiqAEhf32E1dk+6oldSWc/M4FDARZCHofpiRxs32SOyMfgZ1KBIFPsgJEvVtoi8Au5YGr3LcnMkvTDvwT3kkVp3hJLv97MiK/Oy+mz3dOr8ntVyEcXqvevfb9tlxa+7vhlGG5F3WwfgxOFaPNmCBVh9iBxaGohyf+0iLajDgbf8Uj1Ela/uMT5QmcigGHRvC4+/Aix92nxnjU/UhsrZGUapBTklQeeqcjC3wI5YaIwrATvGT2CQ9h1rDNB5E09/ziANoRf1xM0pYs3al0UNLJ+dKtK2RrL8Ly/iLk9Yc4qljiZoWkPEPE9Ow5aqWmsTzj4g/zQ0BwaBrmWt8j418/x1hi38woqFR2HeCTtoDrukbaGnpBGMO7rS9dIM/ZZaWwWrKWSgqYJBKvANLIU1fY65oejnCJAayybzixb5sgairixA10LqkZgZ/+5Ht/iX/+gRj+cVP23Az9/+JT4IskIrBTwCyFpwHNRwcb9CqaAk3iCuMr4YCVNMeKg3eFcP+OrxgL/46aeoYDw+fY3KRWbOjNCaggUK06RAirx4xWA0FEWBwLUNkPfw9kKNyI1ekFSBJavjVG8Zhb6R5M6U0GvBRzQh3bHGDZenC3Iu3mdPlpvdeJIzDU8XobyhUOhMU+mgkDXOZYkKN6EhHtKIWfobAGTcxmR4lPOXMyo3pGXBFAtmSPoFIDV0tVRM8yznrlSkGJDiZESJVEUpqKy1uf5s7RGTpM8iEWGaZq1J0ygtGliIQIAVBmHSAERKWJO09VhikPqVok3sa0O+bGhTkzSQIHWyrF4UInYk0p0xpHKs6z3kirAodYynNwTQjJtvb1p7kRHijFe3d5gi4+n+HabjEaf5DjFOaApGs/LvYG2fdEMRA19WsTnyJiVV/8ujFma0oPOPneI0UNX4inEFATWxu+3TrvdC9Pr3UciSG1ndiJKfPaXrNwtkk1vGe7sIHNaG+zNfumOtBU9Pj3j45V/h3V/8IUptSHFCitQVHWbPQBE8CHLFP6SE480RU1S497IhxYDChCZeHgUIUKUyaO0Xi0K1rgXgsyAXny8OxNQ0VbC1im3b0GoGhcm1kL3BPCQTsZxLZuA7dxteHQr+8XfuJXLWtL/pky7UAtHsWt5tmYEhXDsHxLAkiW4TK3KdvwmKqWvAMPp3VQ2WtigyUNYjxCiOKuX9ZN97wQC0mnmGlBp0OmgI7uiSe4E0ZbE1PDw8uMNXrGFJ3w8EFI1GJeOrUdKDoVGTQFb/zogwBGB1g7ABG0mUW3g6BqRAEXj6SASLhKn8ro2RS8a2ZRBJq4Wb2yOWeUYMwBSjZoaoIRgs9Xqvz/Wz9LIyO9azmiEGwJXnrix3nc1Pi9847A1zLcXo0bOqDgfgEE7PQE6apqEKrDv7EA1J1FopVW0ILk4KdWDEiHk54Hg6CRqi1jv2/RcaqK14zV+tFVsu2NaM7XKRHmYNzifsspTXnX7qq6Wv7afiZVWke8wk84jK/+n6Dk63SktkUdZu0NqKuypNpCm0oizVGECqT1hz+MisYIDa15a5nxuxrjzqKIb6VZ2ojZKGlFoDrMHzz3VDt/Nsr1/W3hgd6K/vsSAXkzqj2XEYbP19T1TlEd1J5cHV2o/ZHBQauP36lMlvhNEWAuHm7igKFElBaZqi9qmxhR7zfjuFxZQENr8BlnfbvS0vWbeWrrEnW7fwX2QQxhhw5UG4uobd6HTGPXqhJmfZClblfFGNNUHsEYYWg8HY9rS2QHBAFE/nIoCjKHFUJcLDrSFrVEZC2vLkFANKbSDlPJJrPxCv/k0vvHa1GLYQkOhT9yYQST1DTFHtAoW9byYM+wGpVoeoe2UHm83qYqtAYfUckerSvXdR41G916myMHQ76F3Hu1bdOgNvdriHg27XrpKKOvAJAN8TZ0xsO27hcatuEIMoN2Nilqopn4+BsNWG/HRGJGCFAEmMCIj2+cos6VvcBNgji5EVAnnNFQXCn/3qDn/3cId5irg5kY+SyNKpoAXrZsAHsDa43O04+X+yLh5t0ZeY1dBlVGOeDHDoiIkMQZm01huBIWk/41a84DhglrrM7SLAFnb8xhPYFdm+pTVNAsZRFXlxENIJrAiXsh6RJMo90oXIepmP0IDcOsWIAkZoghYaVRkShZWAlBwg4bAcsIlEB1sBPViMJYb2IhpSWECY5gRKSXoIEoFo8EQDHUbc6a3Toihvaoyx5egnpBBQW0Uulo0gvGiaxJustfjqoRcFtLl6wwPLG/2S9rfsZ2Hg7VcJW2GE72+4LAuOtweEwNi2VfowtYrjHHE6fYB5XmR9a5Ua1iYNXluTNZ3S5IYcc0/Z6amwnf90OtAaDPe8+jtu1Nnf/VzCjTVfUyj/4JeSNq+vKyOOr1/rwnsnV8wBxPs5yFvk3+u6qNCX1YDZaNvueZayScjbBe/evsGbv/wjMGtzYq6IcRZHRAjIecN6PuNyEXAAB9ggq3klnJ/OIG6Yplmj4EEjK1XS4mIEus6uKxiBdkHNEZdVgEkOywGXpzNKrTieDjidTsjnFSU3pEmcKIxR8RwVKWU7U0QrFf/kW1/iMDPKNsw8BOEvw5qbtWeyfls35PxyiYDtkUToeq82TEna4RwOokTWKqizACzBe6QRSS80VVYzTpTQbEiGzltKQakFLWdxxFzVn7cQELlBellJtKrFhAhGjFGBmcQpa+UbTRFVg3iw4Iqz7lEAJOsFkqbOJPIkKBMw2RaH+nJJEfOF8npjZjHYjI8Ks9ZMoiLoj8sy44MP7nBzc9D69QHky9Mbyfe461+ysvszr0MgeP0zD0aeXbs/x7M1Kuf2OgNQZ0DTiHAtBduWsW4CS19yxuF0g3k+AmRoqPL9kCT6VDgoSFtFqQU9qgyAKyhEpBgQ44IYh3Q9NvpRJ7XqSZI6KLpTKRV527DlirJtIjdeQDPqa/eiKO2feeH1MLxhWsvY485oyOSN7wkFr1v3tXenxpUeAd9gkW3RonaqkBCQqpwQS9lv1NBM/pGsT6AO+W99TF1ncYvfdEoT4OSPd1oY9dzxF93bNCXc3B5xuTDWp5cXTsYlRr2lFgOdPgC40+bXmQ3Oe3/D5+z6RhhtzOKhYXRvvyOWMWAZwNdXCBHzsmA5HYXgtbjVa11YwAfYw/4YDLZhYXWlxEbhFxfOwEZU1EMYz2jkdCXc9fFrAwGuBskfrYOe2Mc84kaaN0z9NempFpCCemYQQFE9vMg7xs/MbqhNUeDO92vO+/Hrz2ZRp3FzgB08q03B1MfxMPTcZF0rkkLWwCZE5JrUu1CLeLPQrO7EjAuNAJj8BYEDI7r4gRqF3UsU2NJhroqHuXvhZcQW9eq7AohhKJWUFlkdlC3R8Jy+2rhnwxIyC3JgALlC2oi0ftxwzIApinBvTMiVEUlSPCzt3uUKA9D0rdIawlY1qgKkSZSmoB4s8c7LDf7x98748sd3OlfrmiaM1pwZRJZ2BJin7BlvUm/WLmt4ICUrJgaggCLigSt+HlRhUAW1BUKyNWxNcvxHD9nuMZLbfzgcsOWKw2Iu+XF4/RuVOyJd1GL2pnNgCNoku/NgVIr1yaR7x9ilVdrRaXq2pxhB3FBZeFWMkjIZYtCms1JYXxUIwKK08oywG/M4WyKJBrx7eIdPPvrYAVierQ11D2Y/x2J4hSSIkQ2ECqmvS8sk/a1YjGuy6Htgv6t4O7s8I1vnMEZzOj8TmpFYZAgRuRQ8fh1x+uwJTPeSckUBhzliXTckJmy5YcoXpJhEKcVPUfm3sOZb/OqL1zgcF7y6u4ODmvqzCOCr9iXKPzzqp0bx87qv9/ythtnzz3X+73v2gux56eL3fq5H/G3sAHBtsI2vuVQhEoXQ/t6N5hr4RK40LXj16hXmb38bJT/hcs563wAQo5SMbSviTEgRsQlgCQC0WlAykPMGag2n40FljaTdRjAqRcRkSpjR85CWxEDNG2JMiPOM080RXAvyY0EM0YF2LK1PhbLzaF8rYaggkhq5UiWLwzFT7atgGEJjbYzMvEOAZkD7QL3/qqVAeojyAK5BQIiY58X3gVQBLE0g7//Zf/NLUCyIFPAXf/kx3ryWeuRai6DXsjhBgqWCghyWfF2lx6rUz/QoRQO6IqiOH3eOhYA0JQdYUfMGZMBOJreCps0DuwbhwpvhvYGZFXE2CDtwKgpW/2P3sOf4LIYTA1nrKWImbbkCIMTkoDRBQ3M00OzoH7WoiaOBDGdlzAjaOVUtFZEGgfkeg2X8jEfCKmPbLljXDet68QbTNRdPrwUDaT5gyxtSlRqynIvoZi2LU2rLDgwyTTNCiO74kOEGc6kAMMOsiKOsSc0016KOwoiaC7Ztw7pmlFw0nf/9POjaWPOl2Kt2LjeCygDfg92H5LKyFHEYsAdQmuo2hocsdEqDc1u/DwsyyP5FUvRoAQGQIIMqbo3EoHejDkLnpTUBhNJBmi4cYkCKUctXLEK5T/EE4EadOSNeIg2rk2X7rur5aZpwezoAZR3qCAGjy3268j7YcX3x8L9ZD8AL+xR4R8rvu74RRhuYUbYMQDa1bEKs9p7VxZBO2VTyECM++OBW8oiZsVUh8FYLmoaTrbs62PpHNQfJaINx15Vkg2i/Nt664DUP6Qh8YteYSys/lXB1LsJk5D7yvwom/d0OJ1EDl+oHH3B2rLJSvJ4xJcRAupHszD3FgE09i7kOBbG6nmOKpDHD0eJ/6TIvo40Ghuw5GMSunppG7MrKTtN2g4hScsS+cUy1FBG0Bg3d2Mle7CiFZw1iKDSbnw7NjDe4P6erYM7o0fcBbAZN96C2BoTAPiY7+AKHLwZAQZDmoZreUC1dAdhHAnW5GEBh6XtDDEzRBwUA7mRwb94Ahw6SOkhwf50AJMs31yhDrhVLfML/+sOfIc3qLQ4R/+bPFnx1T9iaLxNqC57C6hxjUCDk0FQBYMFVfgjbWgMGMtNUyafGiK2hJIUgdgcBCYAJGrhlwGGYu/I99kMjIgEAGep9AGhNXEDTNCgiScdM04RaCi7ns0Tu0QuEeQdEsE9BEJWWUBDABh1YtBIhRfeUE6ymlVBbxbZubrSZEXX7wUlrEvapGwzsHSJ26kfDs1SUTY1sQs/r332nC9rusRaGH1Uwpii1ewGMkCYcIOvaak8vH2cv42RFRhv241oZcukmz22skRf9+Fd/fcJHP7wHt4ZlnkA4YJkSbm5vULYNX64bTseCj17d4Q5PuI1/hLKd8WH8GnfLHWL6Z6j8fQSYM2rgLUTIJePduzeIacLNSeDR3z7c4+Gy4buffQsz8TM+Zr9bw2nhdWNk4/08r+/6bzbcrtPN9x7U4T3lD53r7580KqrveZB/w2l7+DwRy5lPE+b5BjWfcSnSOqJsK87rBjGyAkJYUCMDJifRpDEvNxzmWdq/AFAVDA1B3AGGlKYZEabKW69EtIbDYZbMmTR5+4lpSqLQhggE/alcMpC1CRFislVndcj+j//gHhNllPUl+SyX1PsIJKsZbCBBZS2loJT2/EsQuS/4QDy+qGdM9y1IfWvNGb/7w1/i+z94wjQVoBYcb2/x8Ue/RKnAH/zB76BVjWLzy8Z5XCZBZF7FODb+wrrSSwpwlke2AyqjWFAEK1dwzkAQ+yWQAoq0KhqSGsDim2EIGII6bUi4eaIg/Eznar0dmZqjZ0u+gjpdSdr5AMrTIXI7ThOmKP1mQ4o4qTgxOeUozFeK6n4TDEDEKfnqnBrs+8DBXMtlN0rGc2QPrAxwI4kwXy44X87SNywXtGrgbs1P5OiqqqXg/v6dlLq0jEoRU4hY5gUpJTfYXDcZyEd+anugVjyw0BqDFUW3MaG0ipI3nJ827aHGXXXSq2l2yvt0NN59YTi1pBYPLJ3zSpbqvXmIMjclSFlC60U83lm+aXJD5iE0avvC6HoTqHd+DRQEQbyyI/UCARwbYCnBQenM8zZJgNdCkChdDOKYGRSrYMBo1usREogxZ6RioEnIhdn57MA5JY2YAjhMWtfJfSNHZW64iNidoe+7SB2Mxo+elS2Zl4bs1L3/+kYYbQxJo/D0jJBwuaw4HJYulrSmStZPEnhqEW/V5fwEAAgpYZ5nxHD0vOmmXsSq4A/crBmhhcPlsIrHQyNzmlZlhCxjhCtlve5Kfh/bFTxn0HsrPIQeNu44ipbCwP6s7sbqB2mnunEA54a8Sq+cFAIipE4oEaOszZ+7U0+vFIJRsRjHaVG63T7Voa8GRnXDFMlhHeTm/RPDvT3aSGYgyRqYUgWQFM0r8p+lDYgRLgZ5q5L/XNQYN2+LeLFCh3fVH7t6xL4UcIXHmDu0PkijbtYLK3iNyXDEQ8AcxZgpzGIUNBGcVVNbDIlLwAF0bETaxLHhf/7dN0ikXuL63BssawIt4pVBx0FNYoILYBOMBHF8EMRrXomQA+G//+2ASwb+zf97h7VJj7hlnhFTVJhsXK0RwYoaYtcT3UBz+gkBiRklaq0M911OVdI43Y1LQG4NMzMOWp+h+Lp6DlmL2W0E3L2MosENA5HXZwJWBlrVWhkGlmVRT3dTuq1AiIipF9ePhrvwTEaHKtSnB7K47t5pABEqZStYWRoUx0C4++BG0CtbQ0xJZIoyaWfY5nFsnfYBuJE3GrFX8lVGonJqjEp4uq2eKavvi4GQUsKWC2JMKGVDr7XSPVbZdJ3QYkah8bXWLL1prDdirfuQ75Yc8PYnN4g/eEIIAU9PGyhVZJxxmAPmANw/RByWIy6XJ4SY8fjuHktqAK9o5Stw/C5amLVuT1deH3jZCt49rgj8hIU3LKHitD6gHT9BGhSnl+p1X1Z29orhtWffOPIV53BF4dnevOD4crCbK/7h3xnHcvWsUd70+/UxyW175JxZok21CJLovBylmfFWkNcVRNKT7Xy+oG1FazaDIvaJIxM143A6YllmBx7ZTZYiIld1gvZIr9EU63aFECVrhsQ7LracJhEGkTESRdTIh/JINXN3hgxAuLQJr6IgMTbej8nXp7GgOzN2NWTTknCsCx4eLy/vAQ/rOL447JLMreJ3vv8F/sHvvsHhsKC1iK015C2DYkUIwD//5z/Fv/2D7/k3Yz8sXXuE8ATSqD3qvh8Y0iT9O90ekQhdq1WanatCWUuV1EnSljQsSiexZKVUiEIZ1XtkdXAM+U6EZL1AlcoQAihJg+aUBAFW0tJoqBcTPS1EM/x7lIMAHA7Wp1IzhPS4AF2+C72aDDHFfNyRgOegQAPIxw5SnDopqBIuNePiBJe6soKnywWP7+6xXrLLkdHQsRqqpsZmjKFHC0PAPM9I6fZZ/RkAtDY4MZRuJJOkoDbRN7k2EFethWsolZHLhvVSBGjuBX6yWxGfsqVcj4tmzzRjGXujB/2MCAnK4bII9Whg9vvJXomBo6fQvjfosXZeLc3W5JdxsoaIxupcxRCjDdHB6dCkBYWIPZbyHsv8GfRYKx9y3Zb6fEoRgWo9IFlTTBnA6Q5Y4tL1+OHnjo+40Qmczxd8/vnnO53Iv0vSHqjUujPY1J9r5ZG+BqNrLoDdITPSkYEt7dOanl/fCKNNFlp+IWK0uuEXP/85fvDDH+j7Ft1y7icL1xpOpxNef/Ua9w8PUh9mnh1liCEGRM2DndMsAuYAQBVDNpSpJiAFgsi0oVZVRqrAE7fG3YBiMQTNixaHfiZ2oMbJieCVv7peThAm1pUJ+V1SWPxQDt6iZ5xieK+oATN0t5BaOEg9EQEOAhggRkDga2WkX9cGGwCvh/Phq7bnxE/DQeDxc8NYyVVU/5BAZzNIEYD6GTJlltRwIU0NUQMuZ9Rc+6KSpMGxaQD63d7LS2++c4fpD1c0LJ7br6qMgJwOVf6CUTWNRBpuy62lTkaL0J2xstcnmjICQ0DEyCyfX2bQRVt/dW5UAGn8mhaPm3e4Gxg6gtYQW8Y/+S7h//yppBQzScSv5bKHxDaGz9zja7ZItrakHkxuyL7mSgRE8Ii4DqJxQ2Lxtj49PKI0mVMDQUM2LkRcwaWAVgrgtYzAqw9f4atf/QpmrCgOGIrWADxuGcsq/X/i0OyVitQs+HLFhI6SKTGENhAgRwMAgTNZl0lmVIWAlIIDutQqyG+sfet6yqnSAEF5lL3UN8jSOYTZj8LTR2ycr78yKEGg3hMQZP1pWKG0AxAt8kd7BWnYNtnmrhDvn9bHIkanKbnCd8GS4sxg5NLATyvagREwgc8PmOcPcffxJ2BmXJ7u8fXrhuX0AbhkXGpGY+Du8A41FDCWjqbnLgrg5nDA/K1PUUrBJ9MZSwiot9/CG74buiDQMA/sfn9uuD2PzI3X80/L/0R4MWXJnDv2LPc07zy6ux189kSL0Nvlj9F7DHg+u88an4uBwNb2Im96rjO4iZNzWy/qYOJdkb9BcB8PUncWtNjfoN/7CJXReWQCSJGkBb0p06wGwAC9zYLwIPRCpI4NkwOdpwh/VPmILgf/+Gd3+K++lfFffPwgIFZVnLEdEZK60Ud7XR4AYrK2J+/ntfYsY2d2X0AiLsfbDf/wH34FUEApGUXXMa/SJ7WWgvNatdxDznFjchS8BijyK4HShFjZyxpsH29ub/H0xRf+GoNRmRBaRd2ypIBuYgxwbZiSodwFV5w7ncpzmXp/OGY1wlT3oBilDyYB8zQhTAIWpKkmeqdRrxFiqR4h5J1YNS7FDCynIybIc4LJZv3bdAIDsbFv+zNs9u89u0p9Si/buqGCJVNlkCUMdcDXhlZEeBtvuUYLR4w4LhOmecbhdMTxuCDF5M4qm1evfTR9QFKQ2Y0FLdVRsDUBJWnIpaLUilwkFbO9QIvvm283MMd1HJZr/Fvv06qOuT1PvXu/yiHPeIknMsjTdnfv+lbw0A9N74OezWXtiaSfoNyPCKha+jKWnohM6TJG6KxJyuhIG9xRYcfMMSspCmp8j7Mb13k3bzs72nKMiYb59Iw5yWYjxKrZPDvnFZ6xdQNhY9aQ09WjRVYQJOz4fv4EfFOMNnSdvnsQhoGromGbbO8yATVXRdNZuwFEln4Xeo2cE41C/BrYhyk0ISgqTQRmhcZV5JPSqgMMWISu1Sb/moVhq6a1tWcb8ryGrs/Pfu/GWx0Oi6U/yQKRGX9u5NH+lNJVFA2i0BawGldqBBEUtUcFaKtqyImCGV84rAA06kiuhOB6n4a/rlN/bHz7T3ZN8Woa/ROkIX018giQ/lFBDnGsAmLQv2sCdzjAdlPjIy9xK32fxoc784TXbfEAdEBQecttl+MsSFL07OgRJComyyfjK7mAw8uNK8fLHAR2QIRxQuGim6aHth6tI/IdGMmkVRG2ovAwKlUETKIQar2X7a81vrQqgzE5kiERvN1a2t9q8Upri752RB2br2wbtnWVcVJAmicEEi/y9VKkecIy96d/67NP8dWvfuWG8bj2zA3buuFcG45LwjGd4JiTiYAWNErYKdWHDmXMw74zS3pQ0/EboE7ORXohNqBtEoGylgylFlCT5t5j9MrYvthUtiEsii6pwnw64YMP7nb77nWVQD9773G32KsCYiSiTgxFk6NjKriOSc9YG+et94peM2BOCaUrhp+Z4GmpGcwBLSdsDzPoLmNbC45zEmWdG7b1go8/+Qx5mXE+P4KZ8LSu2M5nvPrwIxz5O2A+dr7HQt/G20IgzPOCaZrxdV2QuOEYIqj17AentV0qqi73kCLpazby0+HnuAfueDMeexUUv87A2O3JwJfJ/7cVtlNqajZ2PwdVwVmS2EujQj3Or+JyWfHmx/8Rbz7/Fc7v3mErBTkXrJdVskhUkQRB07psoSuOhwMOxyPilFyG4mpOFg+kEIepaGKU1nyZ8SaNzeVnbdKqQvizuu12DjU4janmrfy2Gxh/8stXeCpHfPt0wQ8/OquHXRRkaXExrDAD1Iazw3DUw/eKAJMXz94n1FKR1xm/+NVn+J3ffgtmRmxAQwWlBC4N85Rw/2bFuq5Sj8rQLIIu6Eida9PNAgqE7bH0fSTChx99hNdffSl6hiE4Erru0Zrw1iCo243ZpaNkhqjzklS5HgSA8eI4RUzzBIqT1LOFiMqShlq3jK1k7VHZDR+nA7a6LDjKous4DHUGADFN+OSwOKn3NeD9EXBdwq5uNLhh1OB9YPtiWbSM0Ljisq4S3YPSD6wkRnnIcIZ7UEj47zwnHA4LlsOCeVkwORiUyJTiuh27o9+NP2ZvIWDyoTZx9tfSUHJFrhU1VxQ7e+81RF+6BqwAhnUkcJo1GaGVE+5wrsye1jjq135X7udlP5a/z5jsHuxbKN/sqdJdZrU9DdoeUR9XcKEj32lmALuu3BS3glHr2ZGWDbQvTaS8hnp0wgzq9xhBI0fz/6kvoK+Cj1vjcDbZXGUmhGENOmZA/84LlymPgRASAxwBktTfZ46Eq+ubYbSZsqyr1v2q/QOSzztG2+Sy9BxrsO06N/ccV4eYVVesNaW05wrQR3BEmmDQ4UYABKQ5gZBg7SWZGZUVirVIlKKUilaKejd6qNYUwdE7cm3IjdcYou6HbFQ0rnKb9ac1HFQw8Z0Cw8Pn+qEmdHHJQG09lQPiXZ1U0Y5kQCwDw+Erghz1d/DAWfqr+zkb51Tjo9EYzd/dy7Zds8rgy2dzc+YuB2ErFevDkyjOKtxC6qAQO2bJvHutMaS3XutRUoNcJlsbthRk0u4GXbFxQ3xHw+rxoqs1cE7xMi3sLhOaqqyJABalxvLka614yQD0ejJuviYGSfxUNlg8f9KG4kHThYFeVAwiGPLLxBDk0qDPtOcwvNB+lBChijg1XVdQDS3K3ZAvF5RSUVlTecYzTiS9cEL0CINtv9HmCkmPhEEBgwDt+2i3ilT2aFIAACAASURBVEFqLioX6b1DGQKtYEFCU9qvhJs9S2tjc6nYLis2BVEhIiAqaJD+nZsA8Iz3gEYexGgTNSsEmV9rQIWANBwOhx0PsP9Hhf86ZY6oRzAIQNAoPpQP2jgtqg3AbEVYauVorJEdMOpCVt4bacvGIs1yAwFoG0qLQJmxLIwtV2znDYdXMwI3rE9P+Dx/jqfLE/K24rjMyAx8+um38erVhwgxav3jnn/5PlxtzNoaqM2+Ol1RwHAeu/fVv2rKrN7fjdbdmo4KTVdQdpLJDPmr1+z+tbYRMR77epmBz9u6X71jY7XXGjfcXy5orWJmcWjEMIFZeilxK3jzN/8en//lf8B6fkQuGefzilYKqFakGHHecm9xEKKPb9LebCkJkAIrwi8pKBggdCXrawqR7FUpDcvhgBgJT4/3DpMt9CXnsORi9p07SYzf7yQ+X60G9fVoDPzNVwf87HXEn39+AgAcp4r/6ffeYds2bFXKH6B18B0JldzQtNi7bHcbjAPdz4GGnOcSkOYZl0vG5z8P+J3fBlKaME0R21qQizSSD8uE2w8z/vPf+wJ//aNvCzCTGiljGn5TgyIrvw52zlhpNwRwkT/GsoGYBEFS0hKFl3hqJKAtPLR2R4S+t9OJKWCKERwFMj2lhEYBedtQLytq1QwidYTt5q9AYczi+BvTAHcXS8ZSAzBNk9A2mwQRju36He2/1zmtrb4ajBq1bCDNdiHVXAmhyTsxCPqxOS0J8Mg7Q2REGQwHhuzn6faI4zwjWa2lnt1apTXOzino0RyAWI00tWilbk0cdjlLXXJTecaNHQjOnC6/7hr1PENFHxfLz6ID40m7Jy8puErfvJZnMo4rPQy/oY5WvyM8UHWjoUZRnwQ51XbPYdRafuGtQUhSBK3+FQywpWA2iJMvAmSTYgMikblN04Q0zdoCQ85V44asjcr3YoIQpwhOBHcmoVPauFCSoSefczlIV7RqVwwgbefAqnmD+j5fr12/SdsRQmvGKwkIL9fcjtc3w2jTq282edYQA+DK0oi68W6xx+yUlzwJ7p2oY9jUoh39aUL7phIFR4jqId2wM+S8R0QAiAKWRZQGG0PT+rlaKqxvWTPCaoO3hrsH5aXrum/NPt2nz8EWr+kYpG+GYPz0uoznK61cGWYQNmYU0Yy85qZAIx1LAGLqRchjHZ/ezFSLLnz97sOzfPAqDMmFlU+Jrw6Ua5PUmQY6c7NUD8eMlE1AzlWUOgPtCPJ+StZeIShgBFSZhR5AGXUIwYutGUCpEi0IujfWOwg6R/a1HBbg2brv02eftoAP59/AxYe7sf4naH/Bz0OgIfXyJcVT93StM7YmaktTDSoGSC2E7w0JQ1ZDniloHTr7IAoAthonjSixCUxmJCgCowqWFoM3niWW3i40eMQIhHlKYngWaRZgy2fpvAA8zaNprZqhXPn6NKvL6GA7zhxZ+hqCGaw1dq2yz3nYIiR9vFVROLtVJxETIaWA0hhJGvkJEEsDqFacDjNuTkdMicAhIgXSInrCaZmRAqGU0uGrmcDWT8nWWcfe3Flwtaeu6joRSlNgjJ+FnjH5vVatB7Kzo9+zcyqKXzfkzJgj55n97PHgxiWS9iV1ioiVEUJCSow0FYR5QQgJh5s7oGVc7r/EthZMy4ySM9YY8Hh5xKfhYwAZgnbaFShZn/1ZMn61l459H6XAvKfmvO8+u+uF90bjzrys/vfVOXtmZDIPvEAXFq1Hk2xxnS88vx+jyzlRRANmIjyeL/j6/ACkA+5ub3BcJuTtgjd/+yf44s//EOenFefzBdu2Sd1TiNpcvdN5StHrQ6Z5kvrHSWqJQ5jAQ2XpS0u15YzW1HiJYlBY+nEpkr5naUkxECIqghoXgUgcCyHpE1iVN6sqYadbeX8/hrVG5It8r3LAH//0Fr//7TcAF2SuCLk6P2jcgFzce1+tV6M7ska5avrHtdxkX6fpcEAIPVV9XibEFLE+nVFKwZQCjidxgnlEXc+01AQ1vP3qKzGuUlJ0TjgyI3TcchaFv3KrGrnvmR6gnlJt/DIBaCT11DGKs9L6RpIOA0EQCh8vT57Z4Ijb7qiT3xtLex5H+nzPJfwlSBSzaZ2ogYrobcUhrDzD+hBwRXc8Ahb58qIgRzoWhEFmQq4F+ZzBreHmdEQISY1viURa42XT6BStDABhmiPmw4zjQQy1oH0vW2soWVIcBdiMgFZ1LBVNHQ/E1QGtGgvol8DyF0V6NOeGlQ50Jd746Kiremsg5bnWexa1R/f6+Xt5/RMRGoUeaevs3Z+92yvj7yAfx/Bp39HRWdTf84HvXE9CW10/H2WmLEgEai9NYAoCiNL0WX43LZVJjMgT4iRlSCU3oSu9e87FEWHdsc7k8s7nrdgC7g9Q/n19tgFISmwDmDbQC5aRf8cMTYKjtsrzWz9jVxepYwWQcpOmzn4K5E4hgMA1Pv/y1fWNMdrM8+bbNygzMZoyoYYI9gZb93DDPSHPL3u9h9ztskaTcvWibh1ZL8B1b7UstnRaD55iYn1sBIJdBCA5UwXAzeuGzIPCgzHnofbdvz52dmWuvz9MT2nSjoxV+mid1lWqkClh3aAaAtp6AFRNQQyEaT547V6pFecnKeg271R0bz4g/aUsbGzjtHo1E8rDoPWZwsCsvs1fNnvNCAJX2+fzDiQ56yGGvl46pjAoVSVXtHXTehBStD1x/7YmCg5DkBoFAESN/sBgDuBaERAs5upjI1gUY1C+1CDGe2jz3/3oFv/yv7xgzPx43zXwIy8iN6SK/nX1BOq8U4xY24T7dUYIAf/H377Cmhum1MP6Kcr+MYtS7wa01a6BYemeImvVeFYkwjEhwHK3C0SYCNIiY6qCuiheX3JksmL6B3WDwRRZSUGpKCrXXUgMtQrMDE4R2KTQ2dDrHEIY6IqCXhXS5J7i5EIAEBqpRiNXgs++Z2MFpBA5oqfENRBqKZjThHlWZ4DCP8eYJGJxuWA+LJjmGR7qkm0DwJjnhFqbn1dzaPTnDvQwNJj1e5Csm6yPjDMMqWpSy3oNvAHZQXOKqFJhW9FrEQDzmI92JRhy5sCYYkJIhO0+oZyB4wdiFGx5w+PjI9a8oSpYDK8ZecuIhxtMrxK+eP0a3w7/Abx8Cg6f7hC+hkc5/QWlLxmrRSNeEMfDfV4y4GwNOiVg976BZ3TghIFD2We6Hv2MN3fDT+iWDOUMqqjrOWz+fRk599v7XAIBh+WIZT6g1A/RasblcsabX/0S7778Bb76q/8bT+/ucd42lE2At1hrV0MISDFhniek44Kb0wnr+YJcxCAIUdpXSJuKgBZEJlG02hLyyFWrDSlGxFCxHE+S3sxw73u5rMCrD1R2ivH2re99D/OyiGFHhMPtCa09U+/g7j8jxGGtiQhzavjWafX9/hfffwtiASJgsPdxQinQ0L+uMTvP6PuiQCzm8B/5BvY0brTSqqRgxxRRuSCw1bZGZEW9nuYEcJN0uCoyVJD8BG1YUtsV5Ijh6aqKMKQ5cEGQJYc2IfvSCavhU8MkiuNO5PWMoL0tTQaXKoAplk7qNMrQOixN7WOrEWova6DoxqLQlPAVEGOrjJU3tcXIz4OhV7q6TNzP3aBrQVtxSOplA7RPI7eG+4cnbOcLcisohTHNM47HAwIECCm6Hii1fX2lxIA8HBIOi6See6pd2UCtOl0zCJErmEQHsMyIwBINyrli3aR5eMmKTt46NoHppmUw1pyEeP/TdaZAPXCg6gPv9toI8Dl/s0+kqMBYjXv0+oW9u0Y8Hw3I/p29kSiALvv7NBg9U1fQVMcJ6K8Zvw3cMz9k3IOhFoKWcnWDl5TX1CL6Wi3iXKBoMm2f+SEqZsDhuCCGgLxlXC6r8Iy0bxfjmSa6nrbWFAJiSJhiQwvoodphH8YdYG5Ik+K2N3a+Pq5xT2u91tNs7a/28z+V5tpiGA0F+1e0ZqjWciC6te0K8+C+GI2362cAPRr2665nqYxXxaKdb465uaO3REKl1kONFIDC5hlCwKTROk/bsUnqg2uznN7mdUjW20oidYZ2KSeuKVPpB7CPuSnIgxhS+qzhoIG7Iurec6iiYd8ZJ0+CYpW3PCymzVWJP143CA8O1zr29PADpAaUc4erg7Kf0bMNE0WCAU49uuIM1z0v7Id3NIhbg6fXxkDAIjWNMSVMy6yf6ZFRTyHRouPa2MFQxuQnG700DJa0r/WyoefiD3vwG67dFtj4d8hQ8lOeJQpEA+HPfnWD+23GL95J/yDx3Ept0ATeRXCAnuZJIE1x0r/bcKZUwYistRbcp9BaF5ZjI3K3n0bmNSjQblj30JO/lyjIGYz2DCCvmwXxdoQh+g4hXLE2HsaYiLBuGTQRmBeMke9Sini0mT0lVnQOlrPAsp+NI0KMOBwWPF1W5MuKGKWYekoJzCxRDm16D2wASKITJUsKFwxqwQYoUN6+VrZQRM/2yeel/7wy6coxoLfdfdr5A9k67xdSWImkWCdIdMfoeox0A7pnU9LaGn0tRHArIAgS23FKaK3hzZuvQbCGwA2MimmKaDXj8d0bfHL4DNvlCSn8OWq6QwjLzpnhxhKxIrIymjbyJer05gruCxyjjfR3tWbm8ezzlE/VVgY6thTXbqyNct0MrVzyAGAwXH7cn9c69M9ej3HYGx4/D1y2Da9/8hd4/aN/j4d3b3B5OgsSndZbW7pYTBHL8YBlnnE4LpgOBxCCtoDI/nwiLR+IQKjKk5smDBIDtfT5KpDS7e1JlPYGBFZIcwoKXV5RKntARZpcb8jrhhaiyqWwm63LJOyOtu5Jw+9/+x1++PEFzGJQX1aJujPYZS8b31J6pUDIdaj5He5ZawWRRKZqVfAhc3Sy1S8R3KEbzKgCWA1FxIC4zAjTBBDhk1rx4aszfvYzAsWAwyw8QaDKk/aCM51FiGhbVzw8POJ0POpztf+V1S/uFBfRF+I0aV2+pn4m+b0ywKVga03QlkvxOm+Tly7PtNWD6RQvsxqjC6nVnZKi06akaWsRtTWUdRODn660/J0TQ/5jTRdn5bGtFdQsSIqbphmmlHB3muVzpXgrIDmzFv3rewNmN976MrHOjz0zw6LYwVNoqZfJgMAUXb8T/hgEdXXNeHp40jHoObTkiGu98UouyU8pv4mDjlRrlfRapWGrvRT1IAxzvDbc9rxl3zvsavd+jY6x15m7weDzcwTh8cmsqlp/5nU03GQNA1LLygofQgN/pUEe2bpdGWR2b0ZTtHe4YOsqECFOhDhPmKOA1cm99i2TQCbJjAv3ycoZUUe0Zh3c3t3h4eFBn7Bf0PlwREVwHXmUtc8d9NTPHbqePu6LRLr/E4q02cXN0gr0by1GFiXP6toAQDwKbMYAWAt+4cbbeF17LWzBxp/jZ39dKoC/ZQz96pJHiXQ2QbxL0QtChAKQgoHpWk1dlPorhYwPIXomOCAIdbVVcJHDbsLZjAj3rrKoXtyuCMgOHw9BactkGJRo8S5T/6ztDbMbn24EmWeuScG01wxCGE+IpDC6EafjjDhNIjQY4FYkXdGK1m2RR48nv0+YqGdDK9vN+0OQeiFL0WCd9PW+Mro3JGio2gW9PtcYLaB2A1uuvgi9UgoaJA2JTMAP/HVOAYd5wtoa1uENm2IIQMDzlgIjPdk+7BRyKFo+mzeJ1VtEWOaEf/s3H+CnX08aER6/tU99IGdsatg3dEh/kJcjODJeFToY6yaB57bnKLYLgBn9HJqzoZ9BMRJNCbaocmPCdr5IaqGmSJJGDchy1wdFTIZDyqyvFV2t42tN9io2MO/hw+8fV6A1zPOEwyxNgM3AlD3o9VFpmnDzwS0qS2QhMgNbRkqSqlLyhvX8hPkgLUhKZZRccToumE6S6uQoZnrfm5tb3NyccFm3/tz3s6K+7gRYzc7xKI4GRxk0byhMaQC40e4G3XeghrOuP8WAkALIItheXyOfDzHi7u4OKUVcHp8AbqAY0BAwzQtOR4nQ5CJF2zF0iPAA4DgnbA24XM746svXeHp8wu3NV1in7+DTz34bRJPzK+ENQ60oS1SjaQq0TfWlq0fHBrqHOhlsnSBNkR8fH4Fpxs286P0rvnz7DpGBjz78ADFOIEj0qbaCEKQg3ustmbGtK+IhonnGQafPqnRP7xnvdXrltTLmkX8iPD7c4+u/+ws8vLnH5fwkYByunApdU5DI0zQfcDgeYE7PLmuDn0UxSHodVGOAWnFQgGbyqFVN94HyRhYFTGfawDg/PiCkJP8AkAxN0WAB7OqixjRn449GtDCdB//tb32J3/4gI2fuNUZGw9aQyUdh9CzPvVzeD6veWtMm0EYcwo+gshSu1MoNkjpdp2nWPQuCEqlFabe3G25vz6BwwjxFSARJ21qnhLZl1VfknkwBuRQ8PT3idDyooQYFkmhAC5LSrSAmaZoQUwLHKA3LY0RrDXnbsNaMWhlcqrSeYUu13WfxiDPXeOTzWhrJEum6SyCAtOee1RdJpo18WoITndjttPnziDTKL/Ko1IaSN3Fu5Q25FI2qCG1xYxxujmiHCSkCcUqIZRLwF2YxaJQ4RH4EHVuX9Vyb8jPshGdvRxEcrEP9EyiQVH/Tqwx4pNWGmrM7Lex2BrPv60Z49hk70zEGHE4HLbFRa29lZC5d99LImyFLdn2U+kBV/ffzNvAe+86o317X5wLdPFO8PYyZCsw9YiS6eNtbbXpj1Zy0xg/OW4wMbNTkbUnkVQt2dIyKHntzw0eNZtO/WhEHy2gZS4ZH7wMomQLxGQjYeI1oA+NnBJG8oLaGxAlTmnB7e+tGG9CnL3OXMxd1kjsLg6+/YPvYda7WrnE4ntstL13fCKNtZLCW8tN1TB4gN/dKotPxIP16KhpgXpZOiuTCyT4rv/ecWP/kYDn/GvvtxcuMSkfeI3JYX2B/iIwpWm0QYOkaZsCR9HKhgJAIKUSAEuI0YT4cQCEORpUw6bpJX5JSWwdGUYRDQ7x8ZhwQXBFxZVGZhyUO+PpB+4ZNQIJ48hCFMdfa5D0bE3eBYOiElWckU4KZcbkUlHxxxDvx6FlaWYTVpcgtNf3ravAG6ihGmEXrOrqasTjb0HH6IXQjRgy1hsIBVNt1O+m+gSAQsdSDBUIuDcfjATc3C9at4OHtI0ounlpbWvVic1ZvjtPhwISfM/pBmWFtxaA1GSkGsNJM3OD59Lau//V3H/DLtx85HTd0AWOpHJL6qAKwqVIfgkdKyAxrQ5mztW2SRpMBh3QG0FNdxg0iwHqmBGJtiilX9jmaImpf75+R/mAdSv1vf/xjSdVt0j+uDN5hF5i1gGlCacLkdMt83gxgiuapCFK3p06IkjO2bcMjEdI84fZ0kF5GwZwYvUjfI+tEmOcJE2u0Fox8WYEt4/+j7s2fJEuS87DPPeK9zKyju2d6ZmdnscBosYAEA0EQpCSaLjPKTCaZ/leZJDP9JjNK1GGUSIEEIEIUAewusDM7uzvX9lWVme+ICNcP7h4RL6u6F/pFNnpjPVWVxzsiPDz8+PzzkgTx6S0IGbUVCaux06CPCjKNMYJiBOb1wThunBKXCeIK3dHAFWN3fQNJCYQZ05Sr0QmBsb6JTWGnG6lJIgGbHlfeuF3lQmWvv68QgzYvnieUpJ+XYkbolKqTz9GiyqvWAV5dHzCMEZQLXt1rPdDpGPDq1YDv//CPkctvAlCnyEQORM6it+B8OuGAGe8HwjeLwplCGBtcHVr35lHrTYDBnCERIKelCupxOeNHP/4xPvrgCQ4f/yYAfY4v/vrfYH/9FE+fHFQ6acC6TljXFYfDdTWqHLJW17idORtsaYtOokpQ0K97JmshUlLd2DmMm6ydXodwdX2D733vuyjPDrg/nvH6zWss04TT+QjJBSmvuL65hawJ46BNgH3+sxsNPse27wRS5yMDgCh9+bQkoCRdA6LvqQxv9xIpBUNk7IaIYRjBISBaTRWjaAYI6rgGWH25GPHFxmDZImb853u7M0qJ8F1AgEqY5JnSlJO2EiCyRiGkULjysA+mH94LVARWj55QEiGFgNPdfdNGRPjJjwlDHPD111d4c7fH02cT/t1/8CWI23r64ucH/M1fW687qF4kg93rOGVrqcAIUKZdd85/8fNfIK0rYgxG0CUorL0nh2EwGdL9OYmiDqaUakDKs2nK0SFwg7vAmzqXykQZLThcx5wbEoaCE3dVg0nv2QIUkTw71QW0LNBcBasKhgClYEoZ63rCPGlj62StG6Q0Aoka6DY7oF7enMSwy6CMGjSe11Rr8xTmP6NIgGTSwAAFg8w2+KLOc8FqTMowJ9bHz2apBgFhcuZyAhD4kSaa7sB2D17lCwBCbGgrf6ZhPygE2RzflLQ+bruJ+rne4og8YqRuArzS21D2GreAfH+v/hz+LO2K0n3GMlYC9OHZxz8t1QZ04pbOKNdvMGkgUaBtiPxc7nwSo29aKrBAd3VaPcPayqX8Gh7QUwgJV2dzq23UXl+XBfOq9t9AR3z11VdVFi7HPueEdS0IV9HGXxuI/y38rs2YAW1+NLP6bjKSb4XTpkdb6ASNiC/LisN+hCaww4UMu4Js3vjmXFLUU++iwpfXImpetwg6YgT7lLPZPDIJphf/VselB63f7b6ctx43dffdZ0lciQVqkEttW6ARdo66KfM4INCoVNhiMIIiSKKtCiTnFkEyp4oALClDZs2WKTmHCqrg4bMyEQID6yrVqCbS+kP/jrdIqIqM9D7dSPejiFLfOzRS6v9QDWI2titXeL6h+5gVoUZnzAwUzZZpn41Si9B9/P1Q5dQyhj7OkUMrIvf7qSIq9SVX6u4IrXYaDgysquhT0dYU0kenRH3K27HUh3WI6qW8PfybjGI+VC3FDEhiyLwiM6GAcTUW/Of/zkv8rz95huMaqjNeU/cigFCFKxS7Nyltziq8Wtqc9bczdhhFsk0nAw/CeAXWCoEEhVq2CnJprLXhVqdCcP3kpkImalsDU/6516Xmm5AHO0SdxG4rqr8Fr7fQbbwWFCuhS7Aa0wJZk5JlWGAgmr4RqAO9TBOQVzVa2Pq6eASVAzJnBGmwPSbr2YQmN+1grEV7RcKcWXhBP3S/qbJTv1E2G2BwuQhaa0JBI8/qMFgmlQklA9FRafJIcbZ18GYOlXiJmeE9CPv7VlEJChWJweZVs/6UEwDSDXFazVhhZBDe3B1Bw6ioAREcwmB1TEDkFQFvALy31Z+i4zhNMzDf4fn4EvvphPl8hTcp4vmTW4Swt80fNdvYVLtn0Ak5JxAx1nXG/ZvPNcO+/gmG45+j7N7DafcHwPh7gAiupv8O43iLofwXEP4Bcir41a8SptMLfPSdW1xdHZALIHhmzr/D7pqOcBgY+2v+SDWIpHfoBATrumKeFQZIY8LNTh1GZq+zLRh3I+arG1wPGkg7H+8Q9iOGw06zOZIQOOJcTJeLO3yCIhF5TdssdikozJA1o2R1BOZ5QlqTZswsc9jbq0SamdDgooBDxDAO2F9fWY2wG/UBkGxtQKAOIHsQQIVa/D0P5AB1bNw5W5elfucShhQCgRxKLejWkSCGgDW93XFziDxiMERFAWWvr9I5ZAbKLPizf3ljLuOMV78C8nyNP/i7LzEOGTkLXr68AoLWEaPuO1JtjqpPTSb6NewQTrYsd4gHxBjrxiUCy0rNyNmEyQLEArEMZJMthfNrM+dSWm8wNjhgsJ/wn9C9XTNVqmvDOGodE7WsbOBgNcQEyVtSNa9aDp1Wmc5nvHpzj2VeKiqoZDHiIwtYRyUK2Y0K+xzGAdGzeQTwELGP3oOSsK5LI1FxB5MOCCwQFpSitkDKCWWVavdsoKLue5gT4jaRy19JOgZkinccB6RV9bTPqWeO1Juget62RnzN275HDCJz/kLE3lo3lFzAJaPSZKO3Ff0CvueavKCtaQ/Q9oe/JiJtLQpwSYLnNm3vsKGJZbVP7NPQDBec/LE6u+RDWW9E7RF45SBTzV76vtPQJP2zVm8dzAIuVD+vUyXVfu/vzHujkn612oXdk8KtOP9OsfnTirxWolOKFzC0z/vPYRgwp1n1Evt8bu3SfuyciISwnae+XvXXOWzAt8pp64SRtPbn1auX2H30kerf0vUL6b7h89EMMDMsa41c9/lupKofYX/5gpRqkunG/jaHrQn2xfn7e/rbudyb87bz6B8P6aQTVrtxZ00ENQeu1aFxazbOBI5K88vECGPEzl4nexAmwrwsePPyFeZzizw/OgBkDJo86H4klr3L7nQRgjk9LM1BZnJnqz+ZKq4QAkq2Taf7gBK3AFgzFlqrsT8MEcNuaNFYU7C5bvQWVXQse3m4GHTstgpQ0KKuVB0S6r5z+QBiCh2YTmdM09xghAaDTSlvVFEIrW7pP/nhPYbQEH6P9flr9ZIwAhz9oxJ62Lm8NKTkjGnS7MaOCf/g+6/w2asDPn+9x5Idhulz1TYup5tuxcU6PYS+uLZlmurnGAC4Re6yWjnCVhfHDHCo8Mf6XB3bkmfTy8XY6suMOASjmu4nrzd47fOlwMGtTHbeuplR29zNCVGkfDtlDRKQbTSsGb2SC5Y11c8EDmAhzOcZKSksdbDvidEvl3VVcpNSKnyk5Eey3FXyCNPphNcvX+H6+vqB7KHWC5L7VO2r6D5qxAscIg4H1r+vBbMTgBSxdh20/W5nRNZovs0TxAqtfZsk17jUiB9yMuNdXVtdgwCReJtke/YMosFgzdoDMzBjLepISgEoHRHSn2KN/6itF2l2+M3NE3zy5ITj8QrDboendIV9OIB5aOPb7Rcud2q0JpQimOczkD/Dm9df4Otf/GNQEkhJKDnhxdcLXr9+hZurf47duMM5zcjniOnuz3E4/AjrnPDZjwMkf4UnccRBbjCfF5zl7yBDAApY19/F4XCFYRihtPe6QPs6Rt+xWo2OwnMAdUCur68gIljXbLTuYuQdmgE7H+/w1Re/QDi90sbuy6qBB1LmuxBHGwSCSDBD4z3bawAAIABJREFUzVgB84pkwSwKAV5xITmZjhNM04y8prreAwcUYhAtdWyr0Zy17QDZ9YLDJjdH21PI9o2KFjEkRrXVNrqWIUi6p0mxbMlDna6MbBEshELai1RMhx0OO8h5rqxzl8fGyBZtPSI2Xg4TlyyovaAoAFIQA+HrL5/i1fdnfPfjE87nAZ9+9jFi3LAn+WDp9+watcm16dTzecK6JlAMONzeKiOg9axLRZEz2cabheHpBTKHXKRD0ljmCKLsgjEIJJD2BYXCDSObswYtTdCMoCF/jDWSoCyXqZQawPHMFaw3VqbS6YRuHDsDmmPEMETteQsNuBVkQNS4dobT3X6PIdp1iIx5uzTCk+I1rbC+Xd7btvWP89Y3dT/dOGdmT1ogwFEgUoO7/sFm6D15eg0YLBVpxf3rOyTrzQsAwQjosu+n9fl728Hk3+Wh1gBBIfslW+Z9+50+oHN5ENB6n0r7ztuOOETEQTuXSkraLqurYydynX3xRWkwbbF713ltNh8Z62fNVPZ7iNvl3R6vMyZ+ehuPYlw8piMsAN8/f0XB2bl1r/a9gdyIacMMbJhZLx6rOXjuUPuxIS95eOTcbN63H22v9JP0NCPVft289m6/4dvhtPnkuqcpeEiA0ZYbHht8FwH/6ULTxoPqwkZ9rwlS3Ro2dXFoq6b7SW7g1FOTUZE3B+Xx4y0L4m2f3vov9lrn0NVXWzTT70VlpTlymqXzRqeWqQsWsQqOAYYuxir4/vzdte1zITACCOtqGzW3BU0w54K8kLud6sHQiWhfOaulsmF6MB/vqjHsa0BgDpdmdjSq6HWQDxSoL9JuettAWyrd3+zfataxnawpIzJYk4ixmFHDlotN6qVz78fb2j/0mQaH9/nY1XpPeL06d0a1Hs+vFjy/WvGdmzP+lx/fohTBuB/bI9QT+oZSTE/KxpAHYNityxs3oJIADGW11OgfQ0jUONjtGzwlZ3MejNWp5LbGQGaQSZ33vK5aIxRiG7t6y7T52+W1jpI9QykKx2LWZrWzG27kkbVW00NmvfSwXJcXJb8okADsdyNunt5inhfkV29aBo0AUMB4fdCsXVrNUL/0svzD7VCzXseyrXU3cvV3esxh87NZ1NsXsOq2gnG3x5K81lQAbwfiu1pd43pSP4Nm6ahBTvz+6iGAaC2niLZrKEKIVzNoWJCKILDAtyqVCUAJKhwKIoAQ1vMEGQaEqMZrGM4It75eqDpsviS/yQdc4Q4hjhhpjznp/bV1JJDaT0mfR2vWvsaLL/8n5PMZkn6OtE7I51N16BlAXmYgLbg7A8cQlcL+dMTPfvpTUFQZCvOvUAT48pdPcHodsTsccDz9Y8zLjDiOoPgXePb+J3j64X8MZkHOCt/Meamj1+pV2viv84pUcm3wKyLIxIglG0NZRsoJyzzh53/1Z/jl3/wFaJoxXF1hiCPGwRzv0EEMbW/NxQgfUsZ0vMdyPgFQ0iXJRWF7Bt1qulzJYugC8slbNVPvP+VVs5vcsfheHNVedadNTOrIqxa3EHiH9P3rb57hP/jkiJIT1kXbCpSOEEFUlMyRkdZzzWCfIYa3Om2dRFfHWiwAF4D6sAJtlxBjRAwFf/B3fwVm4MOPAq6ur3F1YPz+30n4m59cYVmV5ANF90lfbBwCAsEYOwMiB0zTjOP9vUK9dtqrMRclIoPoulJQWFHYp6WPmVlhk1abWIrR0K8rSH0iSECDWoZsTOjBIJrWSMDWiDsUxJo5Bwg8RAzQXn4ggFjZMatGsA3uYR2R1xyqzRFDRBrcXtHZKSIIkRGj1qQNQ6hNk4kYa1an1RklvY2SQzxT1sYs3MJvNRDietTRRu7MeduHZk51uhvNJoAAHAc8ffq03us6SS1l8aPA9uPOdnlUtsQCwqbzHI5acsY8r/DsU/v82yRU77Pu/ZAHa9EPf90zfI7eEgRdxEkhot5pARewz8s1rOrE9yIPOqDZevWF3mZum0y1q00eK/cYPdx3WzunaqQAjzwrYSt7FbLrMlD6Uet3OEC0dxc4DiDM7dxdeU2/e/tr6zzXut5mv2x+rb5CP4/Vbi/lcSX6a45vh9MGM5bsvw2skVghdRZlFHi0XhUY1QjP5YPTxWbxyMZRv/cIRKv/u5uBPlrfTi0VSud9pB47pK71v+0kvc2Ib7fV34obeZUBkAhhAPbjgEyEZUnK9OdfsPqx5qCyFeb3Etefu3kfHHTB0+xpY9tQWMxgUGeiOASAZHOfDj3wvnjMQHokGwbabgNuWKMzdmp7AiLHdepC8YwZuePD7RyXjlhntPaRpI0+v3Ts+vsyRViyGolir1UmJCZEtvYVnTI43IwIZUFKaXu+i2uVIg1Db+fUTFuDCNT7h+K7/XP2Dr73ZME/+sEr/JMfP2mZx6KOn4pO6c5vgsVQUhJv7VAE2TJq1DmZZA+kuU07rOn2KityKdqraYjgELXFAgIkJ2vqSjVCbL53ZTZLWLu6i20tlogAMQJrqvPNZHUZxP3SVYjeulqPvlaLEVi3+damAgardWPG4JwGWXHsOZmB6Sxg9WIZCIeAwTMrMSKLIGW5LGrCY1tCP5eV1p90jT0M4uj6qlnPjRPW12RI1V0CQUYAS+42WNTzENwY1Ih6MFhyoKI8g7YRuY5dlhU8JECAnAXIC+LNAo4CrUFViOUuMtacsCS97zgou2Ky3mVh0P5RgODl6yNubgXT+YRxHMF0QB88kFzwJgUMRlqwpgXLyhhiF/mxuTufTnh9f4/IjPfGf4kvP/sJjm8+A6V1S9IkAqVuU/nJ2eBRRZtC55Tx6uWLGpX1gMDp/g3WE3Dz7BnmecG6LFimM4bxL7EMLxGff4Dz+tuQIohDhpisMVE1PO/vXiGEAVc3NzieTvjs8y9w2A9ISWnjd/sdfut7HyEExpoy0jLh7quf4cWP/k/kJeFmP9SgkHotVk7AbpUKpCxYZsE8zThPE+bThJRWRA6Q3R5CZSsP1Z7oYYoKyS0Uqg5NItjZuhRbE/vDCI4DBKsRNHn9XIM0EgdQeGRfdkO/TQw8KPTpqyvcpSt88uSM333/NeLuGiyCaVkVrpZz5+65nhS4oUegWsP8mC5nc4R6A6/qQ/udmfD3/j3g935vBTNwe6s08lrfHJBzwfvfERAS/vIvnSDH7BnWa4xPbjDEAQWMP/x7X+H2dsE0TRUaWYTxz/7372MfqJI4FMsEAaob16zMxcMAjEZMAgCclRW0zItm+YuADW4oZC2TWKrTxkRNn1p9cBy0bpCszU9gNgiZ6wiFu2fJVoMJ31g39bBOOuJ7NQdGDAGFAApAoYghBByudgqJ7PR8SgU5r1pmUbLaNUWqznMbwGenlNaqQElWMmD9L0NUeyVbSwad5kZg5w4HXO7csfCXnCRNBMnYdLtiAA1WmL71XmuPyZeUjOk0qaPM2htsMHbdytzJD7/3riOjqqIHNuHmkdz+gkCSknFJdbylJpa0vEC5BLzmtu0R29q4/jLbx71wc8jzge3tIg5nbXtJc3z8Og5n96958M7et39VZW1sV3P4mDb3plsXdY/VyjXIyiksVmG2cfus79YFXDkaHnMi/V4ufQUNHtrfJsb+5/+viEgAj8qpoaVoK9bmkFEHsxTfKIG6IRtSltRfqH2zlC2p1TNsh7t9/+Hvevy6gXtg8AM2ubxZcNS9V6MNb7P6H9wRVb0h3TNfXp/o4VKpZyGFpY17pXh++eJNbX4qEFAWUNSi4iGQ1jJIbiPl9w3v/dHuX3uxaZQw+aZmDinZxsZMKCVYtqs92yZqA1TWH8HanV/fZ8sM1Gbexo7oz6l46lKVbi1N7Td86SJCF3PnrzWl0I5VBJQymK1vlphz1o+1yeQ4DtjtBnVd3alBewZb2vCwUjFYRowjKD1mjOPB4Q3amyGv12FFwKgyybq55e5Zm94gXA8ZHvjQmj/vi2ObeWk1VPW65PuaILkcXuLhuzHp3yEjHxlCgEgBFwHK2pJ1kRDCgBAIKSs0TGl3NSq6rAuuhmvEGGuUNGWFhvkg1fgqa+YYxYMoHmnTTzDT5jz+r591bpYqXOlrk3UGF0ExneO+FxFVmKcfa8ngXFDciVcLR1VT6eW/mkoXI6dnC5HxySef4NNPP9XvdpvN9hzt1xY53uqNFrmEQZ68/kCbxNfAhp2PUYyUQqrsVIhss25Qcsbr12+wzrPplnZx7Z2m0Lc1ZTO+xHoiakuK/WGPURJKEjz/6DmyDLpeSHB69Slw/ScI8R/CezZtnx3mNM746suX+MXdhCf7Hfb7AeMw1tv84pe/RFn+Erf7L/DTNy9xvLuHSK5y0ctycSYdY78tIhDTmST6dwEgpCQibg7MknF+8QpMQGRGoICcMu7vXuLnP/3vMTDj9fz3cS7XGHd77COBSbMv19dP8PmnP8Xt7sd49skO+/WI3flThAlKqJMKUniOf/PqP0TJK5bzivTZn4EZWJYF+/0BFAJKSqCovQcJhMDWF7QUSFrw4tUZkjKmedGMaFd/K2hQMu8+A6M9ZyZomaU3/yWQJJzmFcva+oGWkrEu2htpHEZrUK/uGoOQmzuGSpbRB6M6ad7u1A0eXgrw6hzx5niFP/t8xO5qj5uQ8J/+4CvsBsJizlgq0pobF61v14AOEGOspCP1el6rFxgiUbk70PZfhxIXAB995w6/9f17pHwACuHFC83olopxV/31O78NfPnlb+D1i51m1IaIOIx1b3z+/AV+69/61DLPjN3Ox0Z1/d//ox/h//7XP0SaskIJRWvAtLGwNnJmJsQnt3A4uJhsEhEigFAEKQZjENW2M4C2a+EYKuFZHe2gsu81dCIrPBsvBchFA7ClLLavKNzX9YcpmzqfIBijpGobDozdYQeQ1j+KtGDdsiR10qzGte75vdlkcqMtQ9QhVbIsrckuAgzjAMqC3dXB7Avtw5pT1iBRSsiZOhsHaiPYXtHXYtXHIEIMCtW8tA43j+tG+NvMPBENYjChFMIwND2tRruApKEK2rO/5aQEYy+kmlV+7NiYPUWq/HS3tXHsKtt5yWjexeXnBKn0Do1tPnVd0eakl+Yv+bpyRnj7QLWr/BykMqdlIRfD4Htc76yxs7Er6qxvMbW5n+4ky7Lg889/jtubpwCsXMVgcf3urDY51VISM2jf6TBv5MjvAQBir+XsCu92PfRrv/4j/18cLerdXiLsDwfcPLnBvCSsk1gSpZEleL3E9rtunBQ4SQnMnG9ztx0ZsVaBVejQhE7esRB8oD3tfHnmynr5YFVDNxL7+4Ejga0C3LoAfs+PvG7nYilagFtf1gwPB0bJ/mSkQTCLLgDWf6oSh3RGvylDB3fU61irAnXOLkbGMw9kxeZETeDJWd10OTExEL1hojtn5ngzQaCwD68t14XcrsUGzyRgwzylcrXNrhFfKIZuCJX7x0gzckJJwHSeQUwYHErCQRulRiOE8dYGNVKjTqZHIwUNaqN3zjZ7sI2W4RmhX+fP6yNRjciQZXV1mDtICBq00K8KWOYkcos0w5y9ENToywUUGaEIMES4/LNozRZyUkVr5710PR4YXKQyxkNEHAeNhBuUUki0PXkBAMfyFzAUxuuyt8wLgsHTiLR59XyezQnS87QapmI49DbGtfRX1IhTp1C/k4pgNJkr0iKBKkcK2fUxZfIIqh4bII7DwCzKKylhXVXfLMtaa0nFNqBaA4Rc5bOdremgUp3PptSFuCus1ter/riYkC3MRmtCdmOEiK4k7YHWdFeFTpPK7DgOGMf41iCWy1CeE44pW/a0yUMughDU2fJm5wzR/o4hYGSGlKy9nZhwur/DR9/9Pq6uDphSxu31eyjjb0DCYIE7hXeKaI+knDLW5YRlfYX51f+GYb7H3aszTjtgR4ycGO89f4ZxfY11mXA8J5yOZy0rktbHpx/fYG5GLgJiheNa+2KF/EJqppeg5B9uqMCghUUasdR0OmGeziqr5edY0gphQjEkQgoD0uGAcL7H/THjL94wUi6Yl4xzyfBm9KF8gbD8twgQTF9eYZ1inXsmwpIykAsGGCmRZKSi62eaZ/05LchrMsNTTPeo3heBEaUUeJbOMzwCwrjbYxgU8VIESkxisFi2oM80zVgXJTZxo8mdoSzcdBMsEFCcqKXJPDa/9RaajTi1e9KAHeOUR/zPf/0Mv//REc+vApZpNqZRG4uiDuW4G3E47FAIWGaFsCc6YJZrW9QJO9zjz375BF+fFWzHru9srTAUzJGL1gNxHA2OGVAkITIjUjG4GuPq5gqCm27RaOZ0nTOWJUGdGuD2yQ0gKwgBS8o4nc549izhh7/zJf70j58irasS9pQCr7VXh9fboNheC13OxAG0G8GDOafRaszRdBWRM1Tzxm5sCtxrneykKErsJFKnxZmufebSmhAgyIZe6PWb2zs5C3JejDXS69BcNrp9SyyzRrbuPGipbyGMETe3V9p3MDDO5xn3pxm7ccCyroiD9+vyOu52brc1BBflLpW8TR7Yfp75Cf3n0Qz1Pvj7tqNtVVLRSvUcTOpwAkjLWjNoFSHhDko9F1W7yhE37yoj6c1fJgJMB+Xc3L029O6clofe1luOB6BmGxi3drqK7K2YuS0rLWvabGM9b6ne8OaC7kXpnw+csXYb6uJ337k4QggYxwGbOtnufL3Dpq+Vdk1HTknbP/tr906j2/b6jwAShCgoSXUVh4KS392r7dvhtJHbz22QHL7kVLnjGHG4OoBDhEAjLdP5hDSvVXG0GjXdfuv8dEZmM6Vhe62ow+ZGzoVj51Ch6ky81ZGTzXe8+L7fdnz6dZ677Et9twN21DALjAWtGdr17gRmCKG1Fuivgyp38MapWx/PHKmOfpaYQFqQ1Izw+gDtSfx8fp8dSVZ1nihwY+R8xPCrz8KEYAaE4/71fFJr44rYHPHFmPmU+Up3wxNSC6vrs/a/1+u333xRe0S5iEDWgpwECam7prIThhAwjCN2+x1C8IhhroxaZIK9HXaxmTajcfM07z7UttmqOzHH12ygzUFEiFHpjmMM+JvXT3BMA26eHOBNwXOxzBs8o8aQLmEsIuCo46GflZrCvnTUNoqN1AhkcyyZBJFVJnzicymauRKryyuAWJRVqaMBFmU79eswE8bdiGW2Zpi2szFQqZyb4Dsdvz9LCxyIaBasd0DdQN6MoX2erFjfQ40Ec9JK1g1TBJkZ2eogg72X1wSSAB64boQFDvHsN0OXRjHqawFKxssXL8xJ1/VZa3Qe28TgBqb1qKsLw8aCNKPeIrfdJsRel6tDF6AZsR6u5J9vASWtSdMxuwhaielKM3iHoEYtWWsFRkFOK9JMGlnnHYQPKGBM0z2W6YQ7+ggUP8aeizE9EkqxBqilYJ/f4P71He7lF1jKPShPmM9HYAIWZsQ44u7NHY73R6RF+yhKKeao6dg7DFgogFGUzr44lFRraUikwWHsKNKyBN4cGaWgsJiTIJrRNN1aWchEWUdzykYGs+DF8aSZgiKYyVWl3l8WAQdnX9PvXz+/A+OA+dUeLr0sWh9BkrGuQMkrpmXF+TRhWRYjyGkNgZ2aXffOUGWvyYPunxxWxFEzRZIT1lnr8Rju8Bbc3d9Brm8xT7Puz1LQowGadrh0wAiERi702A7RTHnTlY8YjwLBN8cB//Sz9/E7HybEcsYQVvzuBxM+f7XHi3vCnDK+8yzit9+fjPRB8HX5bZxwgzf5Pc3GlAV0/jk+f72AyZpR+x3bMhrGjI+/d48YogY0jEBDM/SD7uEWoQ+DGoJ3RTPSJVvzaMu0JMt0OBTtzd0EZsbV9RWYGEkKUso4nydIl813O0aniutwNuQAQCFgvLpSWn9b16V4qxlCXQLo+Rb0BYYgpQySAs5KGgK5qKsOJh+RwaysolqfOFeIrO905olhXmZMp7nqXUd4eG9ZD6a5c8IxYIgR+92gmbIiNYsoonX1tzfXGKIGTuY1g3iFB5VdVxGjEkD5XlztuX4D2+jj3jLAQ6flLYGs7cuegXnLZ7nbP1kDz8y+P7Z1U0nDHnOcitSL9vLx6GHj6vsZGzatbIXAPtv2RSV+2Zyk+1yr6dreBep9iX/OgjOeqVJq/86+Jg3Q++9v2+P6edkmevxH9Twf+eZjZ9TzPH/2DG/uz+hiEpenbo/tj0e+j/Rn72SIUNdPHQvLYjrKp1gttkhLqrzr+FY4bbpeqD60iECyRu5Od0p5PAyKS+aoG0wQwbJGYE5wS6M+LpH1nzKBJGoTCcA/qREcu6o04W/GsZ/O2M8E5gx4XZ2ppX5h1t+baiA7h4OPRNo7lyaiJoJs4rnPEz7ESKunrkQK7A8EQmP08M/5rqjv1+imeEZINZluBFTHE76o6qBczBsRKHY1N9RFG4AuC0ft+nVs2hgRvP8LVS9Lo4ClMnVVeKY/Rv+7iBWEEtiUD7FlFh9TdNTq8PxcynqmmBg10mDP4AZwQfEmpSLISY2zyAnADgQglxX3b6buURkUCOOgrFkhRCBwzVjWQv+3q9nL267jXsSyXyJNju2aMRJ2+xGD1Tl88Trir355jRfLAWsChqFTICZgUgrSagZxytbAXaV0GAPCOLZNwTS4O3bO3CT2ezCtV1ymxPDrBAyD05UDVDyLqht+ZgFntk08w4EZcV2RU7KajYAf/OAH+Oxnn2JeE5Az0pJaX7PeFSflTKnoK9KIGotgFM2kAZZhqmNMtQ7xYvThG46vaZGMu/v7asxTKdq7KBesSCgp6XNMSaEdrHCzaVpwdzyDS9k4ic4mBwrYjQdAIl69eqVPJbB13pS934/fffFJKasazcRQMh09OCgRA2c0aAZZphuojKm1h1Bn+ABQ57l32lyjEWPYjbpGrMF8lRUBGBmFInKSaiyGELCLQZ1QCih5wTyfcTxP+PjD93B7+yG+Pv8+nhDpOLLCdnNOOE0zxvwv8OLVT5Hv7xHGjPPxiGmeEQJr36ZUUAohl3vkdTHyG4HA+tpZtId8wzXCErGMaGB1yCCCJOrASRELOniwy+ZBNJMDGIwQgKBoawvbJ8SUo2v0LT26NP2aS93UETSr6OyKYlEJZsL+2YLp5a7qsnG3Q8kZ5/MZaxEs84JlnpXGXgTjbqeywxa8YCelaoQlVc5Y+4B6PTAHXbMlq7yGrH3DGLov3b8+4upws9kjlnVVuLg5vr6nkjgBRN+bbbvaNqZV990HAY5qnLMLOD57NWBdIpgyXixP8fLIeHW3YllWfPaK8bMXOzx//gHiuMO9PLf+XgkcGJlHnORDgH5Rr6O95Bo8c38QfPydI66vPsCz95/peuAAKqUFMS1QMU0T1nnFMiuZEmDwWhFEtHrbkrPCkFkJYSr9vSFfqt4pYpBlHZ3cGesEQErGkhJQpLIuE1nwSNRxcfIDTzBrYLRlDZQxV1szEJGWqdgYxwAwRzg6RxE8XO834yHDt6snImWCDKzN2SEqI+u6asYwaNlDIIUxFgrY7fe4vt7jej+CiDF73aI7p5XYRf9xdRwsq2aEJUSkstvtDe68ZadEuLD5fF3Vo3OIO+l8cPTS7CL7uM/Wnf/X2umu6x85nwfF0Wq9Lw//TrObSIMy7hyzQphLDdq95Ya6Aan1V0RdtHT73ToWvobd1rD10T6nk+H6TW+T6vncRvXkRYFsB9nXJ7bnrGtx43C2/bJ/RU9VaumEuhnbcZCLV8I4gOZWTqS3XB793mPnaHPyjjF/5PhWOG2Aw9q2lN/JOpSHqI1/U1JjDtCUbkrYTG49esGpM3IpUI+50zaKD1xtskjEhbG7cSKwEbb+ngjd+/CV55FpdQYMtafvuyNh3/BiYe6EsyPMAiAV764R+u3C9rFFFzWv2SaBZbPEspsdoQLag8kmakTqSDnumdXQrgvelD2BLHOj97UlXNJYnLO66bkcoKh010m6+3E6WHI2TD+NZuWqw8qNYjgEtkWIR49+jES8PtIcIovMZYsCdn64ftecwho10ZPohmNRJRFVpJOTXjDjsBuwvzqAmJFLwv/wV3v8Z594VOmhY76ZL9/kPYpvLxQx/kNjYou7HfbXV/gff/I+TgsjZWAtbDTN1qvH4CgOkaKgzF0gDULknJVQYVlt3rlGKiFa7zfa9TMaoSTBm/LC1jJhKQnn9YTrXUCM+zr2zE2BFo5gYRQuiKLrW/sLiTGEJZUzKZB1xfHuDnEYMAwDhjhgSglY1zpObA55pba3e1rWFcEIYuIwIIsZ1gKwQWzdaZcikNCtGa+xsOckESznBcuSEK0RvF9n2A2QMWKaFpRl1f47YQCBkEtGWtaqajYWM4B50dq+rSI3OEX3N3DhQJFubMfjC+zGW3AYTGds6zBddVEmYGh6Jlp7A7Zei8xK367jo30zHxoGagqFIWpWlBhlXW0OtAWHy2wRAlKy5uKMVIqS0NgmXpYZ12PEfn8F5gNux98EQDif73A+n5GWbzDgn+KbN3f43vtPcTp+g9P9PYgE05qAlLXH0xAwTatS++ekKWhfP+S63G/fjQ9ohkwM2ie6xrwuSgSKsJdmPKnuEYNDmhBJMCPVMgceHrKIdEBo2VLDnAuoW99qvCozYAFzxGrjT1IapHVYcf3RCcdf3UKK9qGazhPOxzOWNdeGwSDr2ycG7y3ajkIj7bBMoAWjmBHiAGJG8sAMsTkVQOGgsP7dYP27iglTB7EwMZ7u73G4utIWLCXBy2eULEIHsBgsK4TQ7S2dY1b3QanbYqkYJ/+cZQ06gh8RbY77zbRHKQnAqgECGvD1iXH90YcgjLpm+kAhHDJ8sco6w4qJcLjZY3/YaebbMv0CgmQncskIQYMYOWcU9aPMSWaEQSGQiylNla0Bz55co0jAeZo6UiYHjwkU/JrBwwBCQYTSvPuK5BCxZ627UnIny9BwUFmM1hvNZLoPzugvDIJR7UczkEnHJQ6m38hJNqQGe3xec3ZYIape6oMTwzAA0CyeFCU38ZYKHIBAmr1bbD8PTpIB5R2hAAAgAElEQVTi+78b7xCLDvqZ9f6FFPqvWesCKdWoquUYzWbzYDsqoYRmMw0Sb6FyPXEPYXnLBo1mNgZY0PcRO7yq+i6w5aZorZN+y/n79dH0hcP7t6vibfcpxTOOvZ1qdarUvW7zWA2Szgb0l3sn0I3JYmNYs5z1HvsnMyfP/BsPuLt+bhDW/iksO9efxwfNjWKzaUOFZfv1StW/29FpOsSLo4gJkdjM81I/Vc/U7X/EUZmRq/dFmi1jqckDHd9urO0nM1fSoXpPvt5/jQP3rXHaYMayiOKYS86VgjgnWI8OQhh1pRYESJkxLWjYec9QmSAVdJkM6uO/QBUSAJBixbTqvKiABnurg8Qo9aLi5E3wKhwBjfYewIaK3T9R7f6OSAMwBkd08IPNXRYtAIbzFOmhEDw2djeFByRrBOzBD5VlV+juOJA5sGgrz2S+TzlcRi62U0W2MJ1FyYx6U+Zkgq3ZBYK1DOpGoduY7brs91EtIhixhCpsZe7ztgTdgvSNXVzVmrIxR4p63GY3qvr4rqi0IXcpBQ4UYjNSyGBugDqBxUyuYAXcfQPuIopRZ6/fKFKplJ3lalkzDrYw16Xg1XHC1+8JPrxquOh6f3WoLMIaTM5Za0RWh5UUh8ISrq9GYH+Df/H1byDtr7DfKVvcAOD26TUCB0gRHE9n3N1plkipvDulmbVvVjzssdvvsawrnDXRld1g419hLmLOzgOFI7p5loIyhkoMxGbYShMBm1dtbB0ogC1Dy8wIlqUSAf7yr34EWF3Eepo0MsZBswOisBpnQWVodF9sHCkycipYimCXM0KIKFAynvCIxPclZy5F7OeFwupyWUFUkChqVLp+oQBJs2wclY0NZviOu521Z/BRsisI1HmuDlm/ZTTd5k6C/5/tbcoF53kEyYo1K1ztar8zVjwPNOh5eOjxOXpe8gjjpS6qDKMP6xH8vjgwBg6gIYDkDM3Uw1KuLatfcsaUi0HzgBiH6sS/+PIXICR8+NEPQKLQ0F35E9zd/wWm+3ttZrqs+Hx+gzKro0bB5KsITqej3gvYYKaoWS41Flxuu2ebGNgbdEwEwQIUnp0Qq1P1b2RbbyEAIDes9XqqT6yGpgBisGov7C9kRpPJtq8nslpFgdZfsAACRsoJKRmzKaBw4qDjuHs6Q8A4/koDmvO8WGZGH1ANGAZbPROsOS2bPiERzXy06d5ImdrGbG6mzpUbP77HVCPORV6aRC7Tiul0j3lRJySvBTxE7HZ7gLR/pcPfKcKCf4/T8dNG7k3vEMHLGwioTih5rXSMSLmAhqjOpumqu/sjbm+Hbu/QLG6olHHdQHTw6qv3nuL994DbZxOGnRHdiFZBMxMKCSCMECMCAyUpjftxUue2ZLHsp5LAPL09t0sJcP/mDkUIKQvWeTWH59BGgEwXxoAYBs3Me6/LOlABQFJntXPQKskbszrcvr7JA6YqGyFov02GEpFoE2totlcEkKT2dhEsedV5tfYC66oENzmZbdTVx6vdFCCiCBoEHa9gWVzPtoAZAwBJuZ+JrSwQAQHVUXcUk9tZ27Ik08e24GvgxhwKhii7oAAiisIovZ4AkItluqU5o+86SovGPeq4+XxeLrraB81eDkTwvET1nWhrQfWnY1ANtrgfUR1T0XHKOSOnjBBV92+JsbrbEVT9qRcsF0HDduMb//mR89S67Ef6KraoiNtKbrW356o+4eVg9sPncg4gb+ooPUvH9XLofxNYQIQw7na4DRHrklB4e/7LW9bLi+r7Xvoom263JAOwcRjdvvXH7p1b4gIp765nA74lTpuIKjhdGGKKxqN3WkmcU8Hx/owQZgC64NOaUItifZ1JM6SrSWSfMR3dzZfDVRpxOvk1/XvsGYZmpAKEYRyRFo3m6rksmgszZGpPGzOGTPhE3NSC1eH5N8zJuDCWpKOjrP5BFw0pKdkmUCqWXmuy+kiDHiEI1u7rJil2bdsUqpJDsw/7ufJxqefkuujaAoQ9szFTsSsNe9bNZ2xuelZGd+Dc0WPA0+Pt8akpsDr3buxYNE7QOd12TnInExtDTPvFNedKp1nhasKEQNoUG8hNHvhyZes1i9VgMQdjwlLFFczRTgLEqgyBf/KjW/xHnwDff7YgBqpU6EKoLJMAIY4RwRxrD2qAtL+qEIHCiJf0Xdyv38U63ODJTg1JEEPyiuurawjpJslxwPF4wuk81SbswtazjMwhJ1X0gQjZjdPSCoVVl5uTbrvEk5sFRQjH4w6N/F+N0pwz8mrMWURVnn1D6aG5BKl1n2GIusn4dQOhJCW48PUvizYKDrsRwdYemwPtJC0MgIsHaaUaKs0c7JwYkKsexd5f2HJwg9GhkdDsIFgzG2QGf8kF+6u9rnWiajyCdMz12h53VT2kRpRe5/b2Fnd39xcrsN1MvXepIg4tG9To8fl0xhAZORfsBssKZzHIajHSF9dTqPqjZnG3W2jHNGd/A0bS4muLMYwDCgeFxFogq7ihSACJR0L1vVwW7OIOYRiQhXB3dwTHL3BO/xU+ePoMwxhxNY6YiUEcAazIpwkAgbEDckTOL0EQBES4jqkQ8Dp0giAFSSLW46gylgXTiz2G5xMEwO56VWZIj4ISVaPQNbcHAHRZ2pp3pjEx2nGTGx8vW4Y2ZrRVG65wSY2AkgWIrGyMq/alYo7QyEmTgiLA7tmElAvS1zs4dB+AepRSQMYQqIauNtAmg3BqG49tZNcNaEdUMFoUXMSMe6+57B3f7J8zuUkrjm9eQAS4uX2KnBJSLgqL3WmPyJyNwr5ujt6epT2jdHr7sYO6a+q+x1bb58gcM6ZyMec14Ksvv8L19S1AZEiKAsq6FlxOuBSErLyXYusxLSvO54LT/RFMjKvdHnGnRlZO2mNrHLhCT9dlxf3dEfdvwsZ6rkQEUNgeWVY1VSizYNgN9Zn8+QKpELl+KMWYCBW8oHJiOjfGaE4bVTZGt63cTokx1j0JHMHWDsCDYyAVuVwK8johp6yoh5R1PqvdIXWOqK4XD0ds4DWWme6yMP2ebjpSA6JaP/3QnegMOAAaNNlC0kpHNqU2VzH0C9dgjVyez/SEa/MqbnWjawH6arQ/IpKm4jb3vXG6/JTUnE7A3i9qd3hfzNJ9V0tyvGcmbdsjMdXO2v05VQ82x8CDbqVkIKEibqrDI/4dsozZ44fbPw/061uOIlI5J2ofZiLrTS8X1TydDhFczP+vd5gvj0pMJ5eyeDlL+pLv7VyN4Hbly2+UvGrLDWnv29fbN+rG3Bk7/RO5TAkA6UiF3nF8S5w27fXTjBeDApFG2FUJFMzzZNFLZ0MMhqmmDelEC6N2GoG2C6Y6VOZMhBbKAPUftvcd/ucHEzSib3UelUKYyepOzCk0D6FXULIqvjrEYEa+RQEJqPUSPtGEqqT9IRQ9oO8zTBFnv9d+ZC+MAzToYB9t8ntksW46vWPjwyD+W9Nm7hy3cW2pba+N8c2/d9Rk8xNoSpKq0iNwJZGoGcLSjNQqO3YWzfRAsxtmtG90e4XiPb7w+7osj3V4jZFwgNo8HT2+UyXLxSmZQcbcqRS1wdjUuGaE+/sGlDb3jz+/wedvVvzeRzM+jA2aIAX45fpbSDLg+/LXKIEQo0Zz/4/PbquBICAkDJiGj7A/HBBYc7Mpr8irNsZd14wwOPwlIhfB6V4zIt6+IY4DDp6ZISCUAjHDXsYBeVma0eePDCBCcHOT8Ud/9DWSEE6nEX/+rz5EyQKOWtNHKMoC6VkHLZ3QYn6D3TwWSdTsgEJAmTTrTlGzUWwGpCyrrQeVgSgAkjYoYIMtCghJBJQdLmfzRABRxwaFtllSdUb6KfbM7qV8bcdEYlQIZsrgEKv8SU6Y12Rrii6CMYKb6+v69/MPPrxw2h6MTvvb9B6Tk4pYBrtkrGvGEAI0+GFZtu55XRargqxrX/BogBUNKsLk5xGDKGkgpxp0KJuMt9chuTOU1wzIgnIWrCDkknB3f8RqTv7HH3+MdZ6wzGfAHGEBEEmz4yTJ5KNAkBFJswO5PouBqogRdyNoHTF99VRrgM4zuBQcv7oCAKSbDAoJhw+NiKI+uxkzhkuv2WV3uz14YPyzBj7UmhkmvbuiuiSSCkjjbKNWrE7aVkJy0qizn9dsneAbHRn8TRjD0wny9U6DBWYI6f1xJRup0Vxj82UOmrErWoMIsdMSgSTrePr6YCWbQGlQc2eGJSagMhKrESekLRuIo9bWjqPOf8oIgc0o1/YcUmnyvTa3P8yMFuDSNKywxn7tMCMBEGYs84zlbsUQo+rzQWvFCKgIjpSKlXjp/p468UcI1XgrpFHH+f6II2ecjifsDgewGWRLynjxq19hXQs++OA9XB320P6EHnBzyKw+h0gjqAEBec24Px4VQs0NWlYP1rY5DIW8pjUjw1gXReu22sQIxJwTdTgFMHIkDc2WWi7RO4RaQ2wlAVn1dDHnTJtZm+NXtGRAe3x5yQSMmVj1nWQjj/LAjOtPzz7ZXi6mO7K0IIEOlAezO2dH3P5yy63z7N1iMJy7+L4tUnW8twoShhF2uZ638zhBVnX0UDGtzOVBNkrIA0/4tcfmq253bWwiU1OW8u5r79o9OamTO2y0uYBDF6v8vsOVqvXq1h7ocu+CXfdv4yI5zPTSGXnnIWpbSc2UUs2SPXQBu/XQ2QZ08Tnmx5/YxaPPxW4+WZ06wXxeMK1KJOfIrwfn6g7vIwt0cvoOedAt8rFAhO4bgRVe+euOb4XTBlNu5nPWxaAKr/XIku6BScyIBmFdM86nczUGhkGVZMmKURfXmDWC3U+5OYr2Qst0mZCYlJRu8yeD/cQYa28WT+2W0hYNQaPp3OFuCWy9c/QumKkWyZNYyrq7txgCxv1oDF/b6S6i40ZFUNghbnQh0NRJEjdrVB8W4kYjoTmK2Aqf/+oKepOhMNiaswG5k+vfZ/L+PC2K589WM3yo07NRZhzYsjhe7Jy76/ePKFXJCCyCq1geNEjf5dNs/yrY7Hu1AJsjIYpG7yiwOQlUZWNr9Cq8JBuWPkTWSKVIdVR8bkTaOAKEOQd8+jLiq/sR+8FHCHj//ee4evIRhBhfH0d88/VXGOKA4WqPN/NQM52lCKZF8L3vRggKEhhBBOucMc1J2ThXRhiqoCtdddGanIIMHgQ5BJynGSyq2CGi3Agk6sDE0GWX3SBVE3W/S3j+YVKTlTKef/CFNthmxhAC/tk//w2cp2wQZAJbxoeIINzBg21cihfNqyDVjT4Qt6wBKa1zNgeFCaBSkIkRxeql/J8Zy15cqag3M2bhsLYLYX/00DkLrDWNYANLktXSAbVoWnWUrYVOp6R12ycH3bNrZBWAFHz+s59fXFs2/3d5cp0j4iaNZ4u331SK73at7buq7JTB0zKUb9vU/dREGOJgzqm2hGB4jrWDj3ukP4s1k9WxKOKR3oJlnpW11WozpQhevvgKx/WM9Poe03lCg4IKEgoECyDKOcZe78qa2TCUKbxTjwCgwNiNBHy04vUXCnvKIHOzCpZjhBTGtAhuPzpWfUxAJZ9yFapGoVTnvuR+Q5ZmqBZUtj0pGRIDlHxHDNNnxr3AnCm9WW3bogGGgW5wG36Is3yKlV5BiupUsf51xVpdsEG4USybfGFYus6KVgagxCr6XUAz4LVdiy5wFNJGCFKysfcxUFL9yOappb8Qax2nO0vkCxBV81f57USwmVae/dAIvWxkWf+IHFHSCgFwmmYMpI5tgNalMMzpYiWqcEdBTCczMSIXrIsSwMRxRNztgZCs7yBVuD+JgFhAISpaIM1AGBEZGAdWB3HcAayBOgKBLGAUos5/Ngp6giAwEAMhI6DkFbuh1XC5vIagjd4rWgVQYh5bYcyh1e+IqD4yP7hczI/qpAApGYUI67pgtbW3rkkzaaVlAitJj404B4XaOvy/EiJV+Lk6bgXafoI59LOre4gb+YTa62qzj1p9WYOUtf2q2mRiTpNLiVCFD4shU7yeXcnANLur458B6mrOq0Q2feYSJi7Q0su3/q5ZoAqSN91vxFd43DAHAGLGMMY6z96CCBbgSmtC6BasOzJ+3Vbr3Cl3UpKXJK3cwE/hwfbLrcwdNosT1drDd2572zPAV6j4BaXtR0VKq3kUbQuSxe3DNn+tBGR7ZgC257c3bYvTEfd95dIWfOQ+eyyN/7wYQXhpVEkJmRgMVvutG5B6q5sASUGWgvgWgKigZW/9bh51h0XlmDhDunZdjx3fEqfNDxXiEAKurq/w8cff1fEijU71KoQCKSPZmlGyMt8RMZgLBmsWyqvVE3RXEKgDVmqVo2LjQxw0UlkloHvf4GN1sy7WZDEqZC6tCygwJHXZuD5yaILllOL+GjMe7z/RxRLiOCiMwe/NhLaHcqScsb6+R+0ZxmRNhZpAb/qlUFPyJL743AD3lHKLovkG4kZYU9g6Tqnoxu/mafbKMHKDnHQHwXahVOH1++EBJOYQMLWMF3ULnbZKxeGcwTxPgtfSOWKnLVmvsRFzjnt3LjBVtkQqGinMMQClKTQlVVFKfJdLFZO+NovUKCtuFABSLBscuuia6AYWY4TIqpFnJpwTYUptc9ulA/ZWbXXOB7yZtOfZaL2rarsG357LCpHo5rf2EgoBQyCsUTNiEL3P/W6/kRWXhzcv79Djz/35d7sBh/2APhJc5Tkwbp7d4oMPn2vNJwc8/4gAb+bOwH/54Qv81//NBxYZ9snVqGgga+AeA2I15AUshJKl1pPq/NUJRX8U0ShwHAaMNsbsUUigZsD7uslVGDuXcY/mGSsnirQ6N9IgQrZaC72PAqZwARls7hREGd6GEKpjx8xq9KHJgf/U+KmRAdhba0oPtUMLP6NWc+sIwCa3tuloTh3V78paQKPVkvT6rjOXAWjtX0oYY9jU9l5GnT2rXlQMqiXGzNaLSWux3OrO2Vq0B81gSylISTMvOs8JJBkDR6TTGel0gog6IYuRPQSrk1hMvrI9Iwmwr82i1RFfc1ZDFQU5D4BMWI4TltMBIto4XUSdoyz63HI34J6vcfXBSWGyze2rYyQWiRdzepizBTi4wos4+DzlOropib2uU1aQwaT1nqlonVkqMBZVIPAB7w1/CAIwZW3arXNqc1U0AxICIxIhE+nVSEm9vY+mquGgjkdFSbhcQPs+Wh0yfF+qoqUszVQKwFLrwKpRy16L1ow/NRb1XD4fcAOd+u5FsjGefU30Uhaj9gRd5rm+wxDcvX4FjhEBhAFSHfbATeYJRQOi3ZFT0kwkF5QMxHFEIMJiBmE25EwxArJiuiTNCeeT4MnTYsy0ilJ479n7dbyKN2O3daFtV3xNt70CEOwGxnjt9WO2z1vaIOeMMfo4qbMXrLY/DiPioC0HAm+rcTmoAYgiyFK0Hli0vUsWZTktOeN8POu9EWE6z6pvTaaItA43Rr0mwFVmypqwrsnKMXpH3Uo+uJvnrVSY3NrrWoRY9+GqgULEeNAa5RCC1lUawQWzwpLJnMRa91v1W6nrPo5sbJvmCFtgxLwMEBWweGC+2WvFl7ntc82k6/Z5iJHxOGRa4FV4hfpBeeSwYF0JjCABQmI92QRiAQjerA9sxm0IjDW32lKVjs6pof47l9/v5sTtO3no1G2u++hrqgOKGNrFz3kRjPELSa2Js1kubbw3jhuh7l2VXM7+9aVDve6gaqtv9ztyR1/cTts+P9C7cUAMAUMAUkqAjPAMjL9fEEAds4SuC4UKNbvOaWja2GmQ8ZF6vssxJQEhvNXh9+Pb5bSRRcmiZst2+1GNWiLM01xre/xgJtAYcHVzg5cvvtG1CxWowAo5iSYYjgkGgARggBp5SylIYinwjr4WeEygTRhJazQ4NNpe3SPNgBYX3cucLdVnMPdC75f1s60fmkEDa/rYcex2sqCRRPEHdueENJpV9KRtUKECGwfG3L3aF3LXbKP9q4yJaM5hfVAfHPsR2VmpfDNNAEbDkMJY99qC88hcLSx9i5Sq8ew9f3o1YPVN3eZesrE/smY3duOAORSILLoZVXikH1KNSK/V4jq8zZBxQoJQjfnWyLtmIMwJCszY7Ufd0IzMAwIUy7Z6JsD2DBCgzKiiFhwHBpJuPF5TpvVCVA2dLvDUOU8OUSiQtALDCBR15qMSQoKYkOcJc1YDIsSIcReMxciHQ51IJkHKDyel5AzBzjEtWzOfFPolIWhQxHePONr7gjhmfOfDFb/6ZlRWtZxBRTc5d4RpKUg216Q9FcBlUIICDsY+Z4GDclH/CjNgRKw9AJAWzcRIKZDqNLvcNIhSf/ROiWfQCJrFSzyArDBOaza1NiCSGjYoRZuW6nRhHJUxkrg1F64XlNb/hnz5US1fRq71dy7lerNbRwubc9b3SBVDBZ148IIADKFu2J0wbe7La1NAjJSVdOByoTZTS7+Tk2ZsYxghwRxvmxop7RuttZA50VbbVxvOQirEUUxXBEKtfxIiLAJj+RQ4W6465owlKYxP67cUblgjwCWBhDDcTBiOjPnuCpqR1+uWlCv0fH6zw3DI2D9ZdF4669Sh79r7r7UH8EFlDiAqULdfNjLKRlihgSjRzAWKsSRbHVIuNmiC94c/NN0BlFWQUlGCCNubis+xR7o744aZDE4PgBgDezDOrT2/Z6mBBhSgsLEVFwEbk5SqGqo9Bqs8CQElAFjh5EupFMtU+Yhx3ZJq5secuMcOAtU6ZUCvmXLb/91MZNLgnhLtODKDbD5dtJ2YCDX4en11hTkVPHn/PQxBHZE3d9rPb50XpHVtJQ/k1wSOxwH/1796D0/fO8J73BXDrpai8kis8rCcJ4zxGuNuUNlm7c3GpAQh0zlimUfc3o4gaP1iloLBeuhBBMPI2O3VeRnMgdJstMIjUxIUThhFEKxGuxiSp46vOdfnOWE+T9iPsSI0OIaauXaII0GRAIAgxqi6y1lnqK3jx7IyyjjpYad+Pt1o9jlpATS9Ra77nNs7LmulFEMvutPX11+hsmB61odj0H2smnKKuul9AA0W9VkaN85F90K/Y9NTvVviD9+rWiYviXFdRG913DgGq01vdt04AqtoXz7p5HtzTTvWitBoa9hu0y7Z7w/9XLWauAxdM5u9bmPa9d97uEgdvaDtsAxG3elZ/W4XtOkdNiKDpgFtVm0PNCcYnoAgt322o/Eo+oMIPaMp7K7URpYaZNt4iHYHJIIyrUjnGYUI4zjW+ZV6Jgu8uY4nwhCNfwCtzKKZyqrDfEqkPC4PLaBK7/T1/fh2OW3V+NAtIeWClGaQQ2YAXXwCSC7gkpFLwfvvPcWXv4imaNoRaGMiIQkUPmF/K1EB1QhfLVrs5GFDdV5fs18qdIhq1AnonBG7uq8Boe0iccMdAnh/VvHwgzltJvMtcmCWPJmjJvX1AOLUxs/37moE+r03ZeIbg9Qx75QrudLaxEz0qcz2dKOgZnws6rrBaqMZVPV5u5NcKgWvq0D9nt5LZb3cuAq+j3gFCZQliAOubm5xYMb9m/tKgtDjlEvOWOa1krdIp2OLCAZmUAwKbUwOoVOlUFA6I7spfoHVG+Ss88kKUSxMG4hsf3gtmYgxWLKADO7hSq/i8ju5q8/fRYRzypjWFYcxIa8FKFL7R5EI0jIhr/ocYRyQ2aNdLYtMUjDsRqQ04fLIWZscK1VzqTLgSvF0POPNXWNEIxRk8VYVgrSs+N3fOeGrLz5GKWp8UhHNGucGn2WLSkMEWDJSSMhFo+cAFAZijZKbDOhcKMxY24WQfbauRtfZZuhdzp8LrL8uvXK36QtO3NDvNP2kkrKkDYM22h0Gr89QQommJqR9He0eNM7h/dserr13H1asblBS+JiUjgUDUIc7ErabsbtgHeEPczWWmJQAxF8HAH8YnStlTlwmNXbH2wtd1x/9mpcuE0pigTCVhdX6gnmUnIkR4A3hjVK/SK09AQFJsrEI67MUHwi3tSwyxgLsbzOwEJak+iuvWkdmhYyqy7rRIfH4mGjNl2hbCpcV1/vFvFVd82UbrS9AoWK2g8HMSJ+51jbDgjt2625ooXTtRHLGKmreudEKgjnApTZsZjNlAzXyh35PlEeMHw8eNnRLN35ek8stP0IMZU4kpXWfJ0Y6Z9Sm3caw2KzahmjR7M6WRfVhRL3fJxxY6brRa6ffvlrU2RcgRLAFcxACghDWNaFkoCQLSlhWoMHl9Z7dMAOAN29G/OkfryCs+N1/+2uwke0kC07GMSKlghcvnuA4DaqfxYw2gTFVAy9eXuPP//wa//4/vMNhr+PHApR1RSHCeWJ8880zPH16UJ0rwLIsWKYFc0pIawITYdzvoDWMSpjl6BYSox43nUc5IU8Tsgzgw95QKgSy3rdsdV/onG0dgn6/uXQh2ry4LlUZs8BanUFY2tltFB0rDgHwjFXRIgqqOscZwJsDLjaOjoqot2frhkNAjAYzhe5vQo7Ros156qbXP4td30x51FIIt1fqB7mTYR+H7twiD1+zsfKssY+Zjz2HABhai1Fj8XjY6qX9LmI2WJbKHAlo8qNvn9Xuo7WC6N/bdkGRzbBI93r3l+ko06cNEG+rs0El4TYkyXa83XiQ9lnXoT495MksvO0wfWDnzpIRH3NrRNDAnD3GqslnFg12ZHiJQKj2bP/k9ZtkdcHi5w0axHNmFeoRaf0Y1rerXfz/5viWOW2mrI1FKKdkxbmokUaIRrRSIcg06QbKTWHUjYRoS7/dHYEUTsMilf3s4Z248DRHst1iW4kcrI7gQeNZ/0g7gzoG0j4nLXor9X+9IanXVUiLNmq+3DxBvqG0pHQlu6j7LW3EFORpY3uXA0BSjR0vW26GFXw/86eC1EXQXiNslaMb4SBV9pshBCotuxv/IQaUlKqjWJe+G26PzKZnEtzIhTWOHcYRCAFxmJE7h9f9pkQMrAkbSkqCFcjqnF5dXyHljPNp0ns02Fnv/FXnorsjH1ryfh/JHCfycbIxqspAlQ4H0t5Z1JxVDjrvYhNQSiAoNAkAACAASURBVFFcuznIAnXclNVrxYsXb7CbC6JBQVfp14AuoGIsh2vOKKmx5BUAeV2x2+8xnaatfjXRK7lg3I21DxSTkviUQBjsX9shCLFTeojU1u+aMMSAw/UIEGFdElZjQs25k22OlW3Mj5wyypo2BkW030UEIQ7Invk1Y0c3IoXA+rqvReguhxTQK3SdZtcoui5D19ajEnEwofVA1Ixr4IB1XVACg4rmDotnBiEIkRDZiq7MwBLRTSpEkxMtNkDPUurQ5EuWMDeC1Ghi609luqW48yZaG4MCWgg0OEMoNQ3hhpw7LeSGTNtMma1uGEoiEGKwhA1hGALWlMA5IRgNv97+du363G2zj6LwaCIUcUilTzEbtGtbL1Kz7UURA8gZmjoBhD1ooZfwBiUSFK093q7YXZ/x+ufPkJKRK3TrdLheMFwvdfNtrSNs/ZdmnOqrrgGbzLgyyB1KhEAoKYP/H+repdeSJEkP+8zdI+Kcc+/NzMp6ZFV1V0/3aERJwACkFgSHBChwQQkQIYA7rbniH+BC/AnaciVAO3GnPyBIIggIkChxJE6PhB71TE/PsB+aqaqurqqsvI9zTkS4m2lhZu4e596qHu1KAVRl5r3nRHi4m5vb47PPgrMlisEXrc5GvPm22KNUFpyspNZp+qx1fyegRvG32QCy8ZNGnR0+6A6vKK0/YHquMpu2vUjQzJsE2Pc6NEck7McrpJRwetAGzPDAk7Q5ckPeQ4XwFbXzzgNHVWd0M3tZnVyNdtSpevIiGy6KQ1RFHXQA+XyGDEooFq0+bBy1Dm/hWUkyRBQyb3bJ7d2I27sRAYIvvtwhWvby7Xffw+GwBwP4xc9/geNxwvmszjhDoZggUYbgccA4Dvjy9YQ/+Dd7/O3f+8sGmSXC//mH7+HhOOD16wksGafjCXlWxkbxuiYRhfQ7I62ogU+SteZR2Oj7AZGCBME+AGS1eiA9A0JU45RLAQeFo4dilehFyUU0IMYNYuhrVwMSKpPKkkxVLsn2ip+dj9aGDXFBMFp2cwKslyOC94bjajfVHVbPBDtFXUahiClCaxvg8On2aVTPwAPmatd09fcuYmZ7eA0Yuqf6Z1qjeIMhuza4fGfzRKpK71oLVL2LCnTv5ro+rX4a0LPXHZ3oDI1oTh+69653cYe37snNPx+NWS7Wu/+ZwJED9qwskNTpDfuwQxWfEAF7K2utItvP1Lu4/da9R0OVOERV/xkevUCVEHTuYdXGJgaWAQ1giPWO22JcHl0hViBalXc7K/ogT+eY1JW7vG8NVP3/raaNbHPbDOjPrEmZkCpNANW4KKUgpYCU7DCWFhHbmDQilRmqhoKCHfC2RZP/XJ/anrMdnEUU9Al+VMYYIUU24+4vsx3V6K0Mat0hVqMy3ca0lxAWoyfXMQSxzUhtCzQ7omXLaoamg0kSpQoTZXQNSc1wU/uRgIvNgWYztnfye3YHd9NBYtAdag5l/271DtubElHXK6s3Fppj+tQl4g6gzlUpDHABnPyDLrMKbpBjE7FT5RIs8h2wP+whIlhWQVkXAGqgF3IxUkfH6x30A953T38vti7+jOjOpQ/HyTVCsGibOlWBgOubZ1orIc2ornNqWsojZ6LCheO9Ntj1Fenx1f28VqiJOQo+P4UZ+yFhDEEhaDUSCBAFI5VBpf5nCKyABPurK7z1thdKB4CKQmNtsk/zik8+eaMQNGbMixoBz9++wcuXzyAQrA8LZi6YzwvKmrEyI88zlmXVlgsR+Oi3fgv/9s//rCpdzz4Mtt6lo+F340/nbZsx17nLEIu+B+JGBCBbaRN4xF7lTYJCsQYRXB12mOesgSAizCIQgxfleUEpBbtJDcFIgkKEm2fPDcLjT1EqTYFgmnabyCygeoN6uerEtr+cftsPsY3cm6LwmhmtbXOdajqtDypBj7ZwsfO8DjakZIaaZVRCwJDI3tNbiUgzxGOo427seQ3S4oyloW9zYvLGwiie1eozzTbOQLFaY04OhWJqwHrPqdPDCCVAovZbA444nQYwq0EeghtCgjAwQuQqRyDP6Ktxl8UaFFvwztsEaPZHZ897XOvc82bR9HBPFeJboOeLZ88hXrtn0DtR+KaxHZhjag5NoJqA19rpAFlXIHl/QyXy0PYgnUiQBmIoJTtDNWPh3VBtYXXYbkWKZkDdWRj3Iw7TDkMMBtnLKMtSzygrZ1ZKehJIcaglKimI/9vFtDfHq9UFglv+inqwr1xsBmf2jEH1b0xJg0cdzDwQkLmgLBlyMn0RR8RhQBpHEK1YlwXR5qg8QaHKIHz+xR4pBrz97rsgehunk2AtBZ9//pmNsoBIMATCMI0YhogIMlKPARDGV19E/Mt/8R0UAKuxMs5zBHMGr+dKre9BaAFhzaUGPQVmz1TbMNTzlgTgdcZyewfOBTQMCPs94hARFj3vQrQMW4z67iMBUmomgHO2vd70qXS13m46iWgwDtwFAuu8EVpdT/seTKf2SIBK0nEx31Llr/9Z+0fo5BWAZkKMERPGeulD8W4/NQjT7oiayfWfkJ1zRB3J0uW10ZDoe5A+fgdAJJgMiwW0tzbKU4Z9+2k7BzwUZOahtufBtqfvdgB4MkshpoOAx7aSsNpUTyEnyDPo/llLLEi3T1twbjsH/g8SKLHYk/DBTik8ehWBk/r0OuNRcMd/R/Y7s0n6Lc0gxDEh7SfQvHy9o9aPTErTPe6UitV1eu9b+Hmpl9MJSgh1n1TiHyLgNzhswLfMaXtSygE1vnuHzf8U87s28LGWAvXPMfSw8i8zeUoTABr+um6J5i9o5IravfvifTd845AsStuUkQa+esfkYiP45q1FLe1jvgGbawQTCjdqLjDPFJCCbv5q7PXPE4VOuMPfGyEQODq5G8c3iezFJuo3uEWt+lv1H5PNqB4rh8v7yGb6yJmuuxurc0BkBzSgxooIzvOiDmC3XjWDSdCsYuegSvefX16vMIwBECNN8MLlNrn6/26wlXWP7JmRmuMZGlZbP4sKdfWIojvOMQYMY2tq7HPmRmxt4m1fD9FZvTqVLReHB7mR17WR6NYla0QAL997CeaitOmiNW4eba1YChuTFLZslb17YXiBrvoQUg/iUopCF0kN0iUXfPH5Hc6HBS+eX2N3s8f1kCr7Ws5a9/b82TWyPWOIAct51ueHiBQDOCWFQrLOCwFAKYgioOQRSHN2uDee2+HsvX36WkmQN0o2Y8FkqLBmXvKalXCAWBmnSNdmFSWRGMcRsi7IhTGNCasRmeTzSWGe/dLY+lztd8reaQfCBlJh1nPVHyFYnxs9cL0dgUZ+m0B7g9MKhnbCCZddETXO/d/d3vAARxMpje5ztqyYMeJy0QglBcI4JFBUYBOrN+tK0Q4nNmY9+46tR141UpuCuw26HiVbsMwGUopm8OrQo8FzQmfgm6FFtYAiQEghhsSEEhhUBM8++goPn70NXqMyYBIw3azYvXus7y9QuGPslOhaijqBkQAUhKhZCt8X3h6g1nVYXZDaqRa9p6JpP7DVR1KVN9/383qLQG+bMUQNQh7NCDEmypCaI4wONh09C9zJk5KeqEPKLBii6ZCQQChY1lLbDsDG6tBRgZK1MKtTysx4/dUbPLu+QsmM4/0JSy5IoWBdVggXzaQvKygE5MKY5xk5FzAX1Q2i2eNqt/ne8D3pxvSww82LBXlNOB8DohtOdTe35CECIQ5j1fvuCAh0/w5prPJeWGvUZbagpsCaqpvtYPfdPMl0tMKhk/YnFEYYIg6HSQmghgGRDOIaA9IwIKSIIVg/xxgNebDi9RevcX8/a0bL9opHfD0YXYoyPEbf8U15tQ0aghIwiSMcEsYXzyAAlnlBGlId/5gIg5GlcKwmper4FLWthmhtWymKfNJJcyNe9xXgel7q3y/tuWrEwiGi+m6ozp/J/oVZUI1we9xFrl0/EzpjVxigiFaJprqnZ0i0bXNxqXSEi58RYE3MzaJ7Cpp1cUnn0NZnbkat68tm0wlg8hq1BQqAi6907+tngP3dnqXohG++njK5yII6m6d0dsUjJ66zqapuMI3WHxQeBFLbLQKc61x45k1/pwP3IEFvN/r4qtnph5q0cXVWcr3CE+PejH3zO4F3ahYAmQhRqEMSPB5T/V4nC77GNTEDanW5dgN3si+d/xYo/Qa72K5vldMmolE/DYyqk8IUQBRbQR/MaCgGvRmon1u4VU4hIO5GCAjBDqfgBhkp/S+jebvb7/sktrUNQftjoY+K2ESnGLtIK5lt0QTNjbHqFVRj+XITuUdhAucGvkcIXchhEWjSuYhBM0NCxnYjBgTurkCC2NMxBtqOkboxuNdZF+aJxerWw50N/bFbiW0eqd6vOTaXN9WziYxNs93/Ub8M6v+sIIKmZMygFXj2iqqP4QpIqiXquQCDhMGdIX1IKVoYvi4ZUhg0ehatTVpl2rR3ilZAXUpnRMMcZmrmo+9j2IFONlday2aR2KCOWKCokWtX1CKb6E2Fk7oV263fozhItwQiXbTa3seVhxZzBxQBTucF5+NJacJDqPCsIqIZamg244P37nSsAFphTFtLV74C3edsZB5SCu7ujjidF9xc7XHz/Ar73Q4pJOziBBojrm8OuL07IYaAd14+w4cfvAvOBcu6orAWcJe1YM0FKUY1tFKyegmH69g8MaOELp/jGlkUhh1IWQR9Tf1s7LOd6pgT9ld7xCHh7s29rWHAAIWJFmhkdj8OdieVnZQSdgc1JvU5emc3Dq9uritVuGf8+mzxJhtW3AFtG2MYBhApmYBYrZEbQ4+2Ul0fNZx73QRRch7xViGdCDELIEVrg4wZU6P/Vq8FH6PZZW7EQHdtiC0LB9FMFaB7tmbpTF9zkeqkOYKKSB031xs+H5UPxOYppZY1FZRGluRlfoGQIvDhXyPkhz2++PiM3f6EwzsnzIsdoiCDJaPW+EkxU8VIktiZZ6H3ZVFCLYIYaQQUqttxCAgEwsFq86xEkLw5t2eFCa/LnwDl38cUXpo/2ltGuvZO2IAUTI/p/VmAaHPMLJBSlCTEtmgyx8PNn5JXuF5VB0gdhFI0Oy5FSTTAGghkEcx5xfLlLc6nGZGAdVk06CIZ8+mEsxGqqGIQzMcH5MI1wEaxZQxcvi8vCgElAx9++Bq/+7uf48svd/j0V8+QAvDJL6/qcaJipvN9c33CO+/fIQiQc8DHHz9DMgr9weCmIg1uGohQ1oz5eAazIPn5CyM1c3n1XSAAzGGbxkGDMqIkaC+e3yhMMCbElLSvbIhGUKNny7LMWE4zzqcFp+MDci6IQ1J69DW7EBsrru1jFiSvhyrsxxhyXlFKqS1GPHtZxAJq/p4sWOZFHe41Q8ahwVihaBWFew8aRBDNsgS737oWpYt/dJ4b7L00p2fj3FQHhSB9mLw3oi/tDvhZuc1Q+972cxTwIIeNqY9WQfUXS6lBk4CW0BBsPtrsLNc/aC6pv2uwd4tBWxTqkbudi6eN/Et7b3tFg4TOPGtGfTMX/RebzeMQ/ctn6VReBpjbOxK0DrXvO9nu3en6Ry9h9o6beXUlZeOI9JaVK2S3ktRM6ewXr78rm0c8GoCaDxeZznpmXc5O9wzqPvvkGpFBc7UGPhdGiLJ59lNrKVBUD4VtYLyasAaV5IsbNLn1z5G1JvkG4eiub5XTtimWR7fIxsbGzBeTp5Tm7JE406NCiokeplGdKY9EaDWwbTCtQ8u3D9soVT+cbg5TTBivBtUJ0ZLnVpXrkTyNNjtBQoMawYgoWKQpMi9WNONRrIYhGDuUU/xTcOEHtJOQc4W1DVNEgBA1g/CkeD1x2SFQU+UbQ6B3yAB3FB2z68q2fd6iWtLuU0eyvS2qMWcP6X3ZEEKF0rgxUueBmkLd7EH7d2V8hH5uGLRVQl8fyNIIS9xf7UlaQncP/9Odsto3pn68KQCxwZClzbiY0keDevh7t8nVv0vHd1uheTbIh+MJb27vcHP9DIuT7Ihn8LaKL1BHdvJ1DpuP1h3qztCvIzKZi86+xQKipRp9wfCcua4RzKAp+J0fvAbwDmIKWEvGeZ4RQ8I+7UBB2c1SVKcoi2etuR4iec24vb3HeV6wP0y4ubnG1W6PKwzVLyECxmnE1bU2Q3bWUnVg1InJXHC8P3YRU43gezAIUQlmUo289vNf1XibHRfSejfPnAaMY0JKEbdQ1ra1LIi7CVOHDvAvX13PGNIJv/pkBwqjsnhezL9AdY2uo5FO+BjqWN2h8YMB3ZqK0nhbtkub7xoExWjoxWvFsuqjMFxyvdlnQ8QGauIESHZ5go+XVQkDuI23sJpmXjflYR0BQEL6aq5LzOGKMdQecuzELcZiGqOn1brhODmReSnbfkMGK0eoRASQliR2sgHPSIYx4+p6xjkfkYYHX3g7T1pQxs8aZlTSG/Ji/FocowZMLp5ZMN0gqL0sq/agVpMUjJpdiRBN5xnpz5E/xi6+Xfe+vmMdZpOzOkVqbCuiQOHiEqyZNZmsi5YcOJGHGONjTM04BQTLumJZNUOeqv4G5sz43ve+wuHqjB/93x/gdDzjMEXEIWqN22GH/X5XnQ/XTcuasczOzuiQrOZo9NBk5ZkRvPrghOcv7vD++3cgAl69WvHuu79GKYLnVy/wkx+/ZQEtzdBPuzN+93c/w3uvBGsuOD1kXI87TCnhZz97C1Q+goQtYoC4oBxPWB+O6rD6GtmVBjtPTBRDJExTwnTY4+rmGtPhAFpX7A973Lx43sHklS6/lILjecb54Yg8rzjPswadrOZ5GCNevHiGdcm4u71DXlcUEWNVNa0TLKthZENBgHVZ1ZlirnZGII3nKLmN1YN5ptfOxCiCaRqrXUUi4CVDPNBptZEkQF5X5DVrywVn4nOZgwZnmBnrvKizC8+yXECdLw8lChDJlRTt4pfdZt1C21k6NE7nWqmdohEZss1RitYCEnkHNd2VZIRi/SI7y2E9waWJSD+6QIQUIlbkJy0uDfw9/g1d/GnUK9XJcXvma2vS6i01SLOKGItpyyL3l0gL/tTvOyN3d95txmif9715eb/tz7bB+gqJvxwJl2r3qK1O2BQ5Mjo28b7edTtfl7f1uerHUN+z+7vWI2oGttpHF06SnnfaAqbaUY+VbPsZC9ZlQYiX7XN0zbyucRNwsNd+DF992sF+6vp2OW3oJ5vgRA4UdFNSb2hHTatz13vDf982OwxaoAYnRVIWNDtESs6PBPPRxHVSo48RxNp4L8C315ACBjFHU1wetwvBYEjRw3RdlYkthVYE3BwTqkaGKkirm7LIxVoySlblGKM1+ObWz6nWBT4hBBvZAywl3TI3lZSgq0HzM7fzFgFRnR47auXNc3qHpi5Gu6cGSJ7YEN2wm7FKdVxPO6X6cz+M1HLZVPz5U1sk7YJBqW5S37B2V7I2EMUi6nADx5W5Pw5UqZP79Dh8TLJ9lBpkVKng9Tldxq8wcmbMxxn7cdl+GUDzQvzHVOf8N15m5JMzIdo+ab9XrII4AYhHNYPNdGFlpyoMDgr5LCXgD//wPfzd/0SNwmVdcfurX2O8ucGQRqs342ogavYFSIGQuyxFLgCfZqzLitNxxu4w4YP33sFbRVRJQvDm7oR5zTiMI2JIWMngls4aKEAcBzALHo4nCBHGSqRBVsOqDJOIBsEkMhY+gmc8SYBCQR12Nzr9QCI1jooAQzDImCnqtTDi8Qixniueab46POCwP4FlZ7TyFskXg2bY5wJpT7LL1i49dLNb+UfLS4A5jMFqS3WzcsmQsjYxSvr8mgUL2z2mEf4GNaFAraCfYEYQIKIQtzQOKrelgDs94XNHFrRSRJQYQYr1wDISCIfCcWHbUWwkI+ryMbznmBrPoPYYEm1WzEWqwS2kcE22FhVOVud1qNqnUPBw9wYhJCCtWAojrNqHK7OAVjYmYmV3DUQKTRQjZ0Dbe+RENRTtfJKaSQ9atujbT+e4m3uBtIbAYJQC08uMLHc4lr/cGDGqe+phU2UhF0YuOv5oY9AzSTqj3nQPqfPrJteldHER3M9HkBGCIWodZhEBl4y3337Ay5f3+NEfvQIBmHPBNE1gYYzTDsM4grnUfmcg6jIXLkNuREOdS6coJGCeC+ZV8IObGb/1/YeqhIZhQAwB87zgt77/FSIKfvzjdwAhxFjwt37vU7x4ASTrNUYs+N5375EC4S9+eQUaU6XOp6BTGFKwNjhbY1CgJRBTUhnFfsThcI3dOGAYB4SUcHV9jd2QUHLGznqr6j6MeLh7gy9vH1BOZ+RStJTCHCiCBRyJMO72ePWdDzHFhB/96MdY17XLBFKth1Y/SOePiUA5azaaUPVyDAHZapH6S7pUr8SAdVmA5Ps7VsEUa5BdbO3kBIPGl6oKxyFtAt4UA8bd0KDk1YjRP3X4AZeKzVEEPaGOvrIb/653++9sLQHZHMgqycWyisuqgf3dNIIogKmgkqS4leD2o5/t4iOx/U3bUTCr7tfzojkyT53AT9n97Zf+pfY2gbRuvq5FHVD7d0/sozrS5hb+Hts57edJCiz93r83dffs5rN77pOX23FuIzxhnzHFFjjoHaFucrTxOxq7sic6ykUdel34utBdRrGz2+p3GiqFLVjov6NutGI2eK699552BC6s2E68pVsvfy7MzqJHd7qUXQm9nHyzHfctc9q01iZAYWfTpEpxWZXeVsSwMU+nEFDDJh4WoWCFsKQRfUHtAyRcEFKqNKt1BNImrX+MJ6SkOnvR6KQTQKwGY9RDTgyuRQTDrTdDn5mx5rUaikMIoBiUNW8tGPcThmHQ4nyyaEdQ2EprIKsECFEYy3xCzqVlzkSjb42A4evdHHh0gEWZ/eoqwM/XjTBeOoLaJ6vNXfV7tk/ZaqtOMXf/AwCkEODEgTV6QvSk4PeXqxW2WqWSNVIZT422vjnE9o/QcPhCRiiiL4XAmiLnUixW0hy50I0pdE5Pryj9vm7YbubCNrNH08chQVLnMAWCwvlEeySFgHGI4EK4q2QU27kgo2rWoTTY7JOXj7NTP/Vu/l4hWNTfMifSviwgUAxa1O4/Ltrj8KuvYP0LI/bjDtMH74DSYOyqgpwjfv/3vwPgBNhBmDtGrho1JYClILNgWVbkuWDcPdfs6Tjg7uGIjz/5HC9fXOHZzTWCRVAzCZLVHk7TiCgBYYg4r8UcJ0Immx8QwjIDYUU+qYyfTFmPU7LxaLYJnCtZhtaGtgxKMhkW3ewWGCKtCfIopBlEv/psj08+nTAMVDPDDV7idoLVOuFiD3R/3xZ1Sx0Pnvg8YI6MfS7bRCsYyprLBg2iiBmw3c21/pB1LyQipN1kRDICEINCAK8ZHAOGQfU1rZqFhEV5RQv9lP0WzWnxWgp3lovp4RgDhjSgSEEkzcoWAWLHnuvdop0Om8UNEo0iBwjgtYxFAxAkioYQsoADC8ZR16yUrNmErPVuagORZc+U2CEwIEWJNarWE9exghSTOu+FoaQyOs7K9CsBgTSrwcKI1jeQC2p/JrcD2p7Q+fHv2PaotrIHG2Js9a3CbKgLbGXL9/sTutRVsbVIr98qRUDOpiLmWIpnUgU//IP3UMrbADJiStjvRsQYsc7zk88RW3wy3ejkMgBAXDBOI/K8YIwBJQS8fnMLFgbnGSWzNbQF1nVFMfjimgf85Cfv1DcJMeLFy4CRAJy1ZdD+MGIOhOWhgBGVHr0/E4qytnEvYtB6thgiYorYPbvCi5cvkcahkWW4wzUOiOOEoQievXiOw5cvscxnnE73uL+9x/GrO5VFw0p56aMIIzlBTyC8eXOHh9tbO8t8rozoqqIKqM6Dks5obWQ/2z3LYwtMb0xFZZ4EIKzw33yazWwK1TljY4DdyIqNKw6psQ+bXJXMGMYRu13EUvT8aM2SzTDvbheDINckDNl50yCrXIoxyjou2hEGdtbZfGxMDNZm1afTWXumijJTV3tKLhBbW5+ovaP9jIyB2K/iPR2tBUdDALUbPFVbJnIB6iKgFUx092fGWrgFOi7W7fLzvj5e/lLnopfv7r1U16OPT+Mp+fimq9rI1r8xfD31Sb2b66/6uO53wV7rchSeUe6/Uh1ruBzoXTYOo/0iQCoZjjy10P5xEaxLrnO5qTeD1HOmltZczEVDdPm+Rg2Buc6zv0DckexmgCKD2BqqP0F61F/fMqdNr8rUEyK86IaLwhrFTzRpVP+Xl9TwK4MQaxYrs2CEwSwZT3jT7Q5yMXHCAgFjzbk+X8Sa+JK2k+4XT+EuwQ5+UhgXa4bteHfUaFgMWE6qSFwRreuKmKJm0EQhN8M4Yp1nCAuGKSEmjV5O04gYB8RxQOYMWANc8aLZ7h208LcZit7rsNYweZbCp27jgHksVP/e76KIbaZtazxeKo12B988jnsWwMHi+ls/2E0Wvt5ps8wUVLkG0Z5Eu2kEjYMVwguc0tqhbp5ZFGNG0+ClGpqqiBjn4z0kJsRY82jWa09fRZQ/vc2Nv+/lUOu8myxJqxVgLpWSPQyXc2WGRVBmpY8/+dSc7a73k81Ndb82Gm07FP2xHTBfQ1JEpJT1u90O83xGLmu7H4uy4psD4HVIXhsyl4z72zuEmOrzwzoDtCCEgI8/PaDkBiN4KhLoQQYuAuICCYRlVTr2NCQ90FkV7O3rO+yGEdeHgxoz1ZkiDIhgEowpIUVtEJ2LIBo0MpDCdTYF7FDIEOx9RARrXgBSGmCiiCHF5iiUgtUgmdHaRMSodP8hAGQGd7T+UAztSTUN2jj6MvhUs90Xa9KKwjeraT/b0Ag9+qzPcXeOa7YnmCHXGyrhQv6EtfYnFJCwOjgUcb9mQJyJkIAYtK63OhCx3qvKXGj6ZfNuQEdZbqncvjEPWVa2eNRVjViBBgK8d58H6pyq3k/EnLs5jTq+YvqWC2OeBcPgfcOokXYENXajzZFYJoNgxE2iRB6+/4g00OBy0wAcBtGEGt1e/yAiKIXqzzeiIFy3v/ebmsIz3KTvYc5/ZDVvxxIhRQAAIABJREFUqJl6n8geJuRqiAi1VYHoa6m+t7VTRKcjO3ReG6TKbux7AtoyQ8oKATCOCXkFmAlXV9pLzKe+iGz2kut7tnOQCMqkmAUZUftrEWMMBCoF9+cZd5mwZsGHH57x2z94g4CAvGatsbJnjWPEl59fm2GmZwWTWA1cBrjVyIch4Y//7AYffz7gxfsaiPH96sESigkyJAwhYBwHjIc9Ds+ucX3Y2x5P0Mw1ILDa2TCC4oCcV8zzGZ/85Sf4i1/+TFuTMFtmV/dhEVgbDMtyF0ExBAKzOaPGag2QwfKp6XqbT22+jsqUKuYQSWmna6s76zbExTXuJlzdXOF8PGvdIjQYDFAlXnF5aKgX/VlecrWhiIAUdR4pBlBKkDx3Mv3Y2fi6q7BgLauuTUqAtW12m7sa9q5/ukuEtUl5ychrqfqw1o2jtQLQEo3mVPaOwGXMs4cRBluvcUjWu5TrueWj6VkF+vs5WyrX/cuajDBHUlhb2jhbY39mA1pnuZYnwHU1EEKbtXJ/AfW8qSeFIhZlq5OJvA1M60NX7+/fY65s0iJAkEbCp6NtOSyBIEhRyKFs7yV2ZjfaW0CYLg6JruzJf9IHLdECAVtbx0iWNkzEF5+5+EvwIJqgsZ5WG0W6iayT0sZT19ic98627nXo0xeB83a+v+n6ljhthtG+MFeYlehARVbaTiUDpPik2Y/dy6gCUxgU2MgcoA3zuBgznW6+yqombdP1guyTzlAFKVYg7IrXP6sMVMYgF0khEKUgr+q952WpB7o7nD1Uwe+hAVLbtCbM3jQ8iGA9MdaY1SEEsNsZAUAulUjA6cE1EqUvpO/bV/n45QLlktsJz6V8kteitSgBBUKIAsmEbhvUddluL4JHWeuS2Rxrvd/2eR5Jq/ZbB0NoGOKtxhWz8sZpxLDb414eUJZcN9JmND4Qqyd0peFY/DgMoJSQT6U+xg/PzbuaEUbQurxhHOphSmTRUO8zA1jtCCpBwcYBCwQq6lSTk5oIIZux46j8WjgeYjVg2rw+Xos2VjFDb7vfLhUeM2utxLJWuCfIawAv+oRZTcTta8Gf/vGCd957reyKUKKDzz/f4+H4Aukne/zOmxNkVYKEUhh/fEUmV4473+49sUikvnO/gIIlMx5OZ4z7CTtKFvHr30SdtxICBhIsoUCmhLMYq11IkJJrsKU+x+4PkDqg0orSSzFDQLTW9rxkzGtGCFQP8pAihpQQLWDDNXIqGHeDZTGNtKjuP+rkSuryeX2Kv08tPBfRe1BUp743kL2m48IhNYmBU8+FtIX6bk/wpnuc2piCtkYJnbOtSeKgtYpGdUwE8DKC14TmgJE5Bb632+t71sKSzPp0J14Q3yN6/HqvSicUYVYCF6eyL74XSacn2hyCCGKGUIhKHELiBjCQBv17ACpDo8PS3V7z2jUOLTou0nHBmEHje1nVe9PzWvfXwcAINbvYxNrJD/roLW1q9aq+YbGxBiuIb02Si9PrA81ElaZXYd8Tg096gEyZC/sxNqi+8hARQhwQLKN8gkIV3331CmVdcbx7g2VdzWF0J03lW/eP9pRkBIAShl2CLAuGIKBlwfHujPOScZ+B2cp4h4GwmyJIlK3yXDSqTsL42Z/f4M9++o6O3+C95/OKn/50h7/2O2ckg28vWfDrXwOf/XpCAWFeC8ZQMO4m7HY7kGg2+abs8N7772KaJhwOB0y7HShEMBesyxllmRECYcmC83xGPs/IhXF8+Tbee/8VhBmvv3iN11981RniUh0xzQpprzaAIBxqIGqZZ9y/eaPOJvR8cwp3h077ZvVazMxeG+x156j1TU6o5GevG/M9UUgaFAkR/f4ENMZsAiEAQfV9GgZ1iguDlxWAtw0ikztgmpRIZTmerObVWRe/2SDtbbn5dEYpjDQmTBNQJsZArh+pGseNnE7/X4lkrN2K3pHs3a3Ozv/d7zkfg+BRkoOs6YjXstbvmD1EhC2DoD3xqbyTT/E6LyavUZEJJCjrqnuDNeBBBGRjzG3Gv9oBvf0fvazC7AWqjep11rX3H1V96lB9Rxr4vGs9fKo9ADVxEGowIKVU5785F41Dobl023mqM92XIbmxbY6Qw+KridiOv/ad7d3qvkIIj2pPN9/8RrHr3Uu/r9og7I1uth4tOmnp9DMMpdJQOE6IVJ3ui0jAZo465++v4rh9K5w2gsEWnZvD3VZyoW0RvDYRaBGFarCg/ikiyMcTiM7VgWHhdoCFAO4hJag6y+7RQSbbqVyjqGLU1pCuwzyzwQWD9pgquRtTO6i9+aRuwnY42iehvVOiwor0xvocH1ApWmBcNJJHhs2P5CxmrXeVGva9ce6WfXOItL4mtpe3jdPseqpz2pwfM7C9RoWaMq2LCtSIYqWuk/bLTeAHm6/VOVGGwy38of2uWptWZ+GGh0WMQaieYGcA6T87ZUcAvBmpaEYFANIwIg0JR4tCJutlVN/HRuyGfg/r6F/I1Vr9vDupMCXBYlanO2/qYKchYJpGTLsRgNWD9fUAxnSoB7bUejGPkH7d9lc5r/nN+hbiY2MNQBRz5nulFkzRZhEkl31zYOZzwI/+r5c43DwDC/C9j76L+XzCL37+Bj/4csRHxyPKbDVVUGKYayH88AYVnurrUQ8T2s4vgNqstxStb0spID57tqmvJFDNAXt96ZAI0z5iSgPOyxm3p8XgnIMZ9JrRI3NMWhChKWOVHctUSxfphqBk4GRZ/eBF/FHh3iko/XioghyqzqmOZnXkYE5sd0SYvAe33QAIQtOV9XPAmrNCuqoz3+9/uycDktm9rnpVA74GNNzpAWQtlXXN21gUM0gDKbQsGiNgnhNCHoCk0fZ3vlzx2//2pHVVpI5PChF/8oMRr9/Sfm/KkNccaKJuPFGhV1S83smDK0Dn1SKIGp3uzAUBKCkZFbuBy7aePVJD08XWVkTrPsjgoh48U/ZXsZYWTrsu1dmthCJWmO6VddLRdlaEkBic2wJEvRMIas66rrVglXsc+dP6ngJ1UK1wTWWFUPdKIGW2iykgpdQV4pM6nyIoJSMEhYLHEDsIsGij6ZQQ84pB9hhSREpGCmUkMZ6ZoTg1fZsGaxq/gIgwW5/CabfTmlQzTk9LAcuKMRH2YwFRwXzKKEsxOKyeaSl5QIcRisJlD6MatqUwfvmLazCrI6TtRID1nPGnP0n47kcLDvsdKBC++vyI/+N/f4U3bwY47JnI2Dxt+8Qh4cXLdyBhxLJklHXG8f4WQhE5FyzLjHWeUbISf+RcUIrC5FIacH1zA+aCX/zs5zgdT5p1TpoRLsxV32sD3+Y8icFOxWtqQDgMCcciWLzxtDSjm8wWKLVmrdlILs+My0ybPwvgdoSrFrGzUJETLXA2poR1XVWKBarPSAOQ1a5hq5kFNPgRowYySjbD1+VxawBfZnGa8AOSs7aDMCgjH3bKBCxS7TLNUHu5QruPmFKtAV1rZzAMCTGqjVNr30WaDdJMiUcX+dCaMt58BWhtIaT/zuNbKeIKgIAQImn/QJvHdSnVyK/kWpu79qUKaHJh9pkiO1w90sYWaVm7fnD63YoyMBQKBapoCTLH3CH021mxUXmvtN7+u5xDCooyqLoUF/drCbeq16uthnr+9nbf1mauhtbmvnZU+KewBbH2n1N9O6aIUhiRrEDBN8OT3/FTHE1IiDbPJN9omzN6a339VZ01v74VTptuQDNkRbtrHE8nfP7rz/HhRx9CgqbIRQqEqa2nWJ1DUHgFBctIQTf3mgviBeEEQ6NTkcgKH/XwrVhnc9uZRVmdeu9YpMOKKwsWlSf8/Kw1CoCRdcRoiq9hqZNYBDdo9ASAMUfaZglO+Q14LZ8XfJaijTVZoL2oYM4KO1627UmNJOu4A7jlkl3Aq3nfftZnpZoJuVW0Zru3LWoOotTPmSJB3NyvMtdd4NAJqEZmvR8BHqMLG8d963BwN1ixnkWn01nHUrj73sYvtS938iFKWuNkAoXdQe/G2U2uR00deKGHlRaag1rNmzc030QbbWfvpklnpXA93Ji0ZmGaJuwPe+wPe1CwSE5xBRhqJg51BO2//up1I7mR64q7I+hwhyKbTMc0QGTutSkAApeCaNF8VVwKNQsp4nhKuD+PEBG89+4rPNy9watf3eLDNyfMQM1yuxL+4Aj8/Znwp3vCn++o6kh3sEPU3OKnv/oM3//eR+jlVKDMaZ999hqv39zj5nqPt54/xxSTsebVt0YmRmRCIcFhmDCMCfsD43w+4+E0Y2VGSslw7cWYCbmRNkBJkUKMZiDpKU7Uw8kMGieeGWHQIkgp4Oqt5x0dd/FZg1cRmYsKSGkHthso/soidV+7gZqS1oL2BxgA0x8CP+h1L3aWR2r3DI8OZHQODoNW1qyOCMQalHt7CQ12Rs1KC1SHuuMlAUMW/Ef/6xukAqSFNfJOJm8h4G++XvA//51nOB9athAQ5CwVMRA6veCHfjBjR6EsQEikxB3UHfI6nQrrJVL6c5PBISWM46iBtg5u1Eh9jZ1CBAxtb8HY0k0Hotp8WVxwRQDviWn1S2y942qU2xfUN2sfIESDOfZX4YIsx/odZ9sjEkSKeOu7b3D/8Uvt4UWE/Tjg778hTBKBlbCuBf/jTcACQQna68uNB2J1bIP1DEMaUEg19+H6GW7GHQKAN/MJp9sHsLXJWTMjrwyZ7/Hpp4zDfsR8WiCUwEwIGUBROOO4U2O/FMGyMNbzDEojCghxp3o0DgNKFqRScBUKJEXkzPjLXxL+6GbAf/DvrZbR1bM7CJDPM06nqAEED2KCcPtmwv/w332I/W5CEcF6XnB7LxBRopCbw4g1S4NVi+qwh9MZX37+GXIWY2VcNKDja3M6q14o5oTFWLNdQEGKAQ/394rKsbO6C/coUYjCdlB8n8FsAzFhJoWoS4D2cSsMloJEAQgEEnXcSmfYV+PQnhZDQAlAXh1q6YZ7+3wIAQ/HI3aHfbUbmqFMiMNgNa1aX7wez0hDssySIOfSWiHZ/fNaQB4oKFu57s+ljS1xYbD6q2gddGtSXJ0HQ6hQVfHNRWp7X1ESaUwYUzLHdPuoFnzbwsfbYG3emCq7d/9ClSwWGjuphjq28MjtyxEQtJ9nP5gQAoR1XzpBTn2UtGzeZX2hD8bPnerUijvWWhKgP7N3eOIWyollDLZO8GR6Wg0rwCut2/nns6/ts4ge37uGfB2tQdt6XPFxMarDuYlL955Y53zpnaX+a/PM4GP0NaJN4qJ5cb1hF5CGhCELhGJD6vd6vf9uP3f+f/vd5Vl6efWEL35LMrtT+BLE+fj6VjhtgBcpCjy8zSyqVFlrW2IkcAmQKG39TMdpDdNO677gRrxi2IMVdHoz4cKMEYwsgrWPZNth6sZJydqbKw0JFG0FmbHOipduE94krN9Q7oBx0YicMnW1Pl8esQ4ApmpAw5SRMr2VXCCk2Z2U3LgWq8soRoNtjkJR1RZiQEhGrmLpMt84Wq9BJslQw7ZG8MxheEo2qzC2Wodt/iUBVOA0rq4c9LW6+hRpDiFdeBdOlND8R2rwB1euHuG63BSi6+qOJIs2D/W0e1/LZ3+pP5OqFgTk7h8TpBQsS9bMwcXj3FjcOK02PwHKImgv+xsvb1mhDW5D/Q4XxrJmzEtGLhmFBdMwYF4WSAwWPOjhhF0E6XJypf6v13327z6jZDYhM7hk8LpsiGYEDUkqzNakHlrDYAcH3MgmhVgkIUxLqSxmzWiQ2qh4B+CvHwPuBuDXA20+x0Uw7ZISHdp7imVIxCKvMQasc8aXyz2WpeDZzQFDCIjjBKfbJmasBuJfiTCQRl6vrq6w3+9wOs2Yl4w5eE2SGKwpo0AdE20fAjCv8LqhSG1fK2uryvCQAoZk9W1xsCivEi+wReeJAO4ajurECg7zgpR2IDSW2Uu5y7kgL2fQ/oA0DvB6TGalYpfAGBC6w8ozqd3qZ1HyJBfiPiPvcx0TAEJeFjAII8QgKcacaMbB+XxSx5akOranz67xn/3ZJ9gvgrKsyNJtGlFjIgF4ds/qtNWJcFISqS0AHh2a0gxuCYScXSMRulgGYBk6lpYd8J0P60nFWecmDoM6eJYlFS7Ia0HubBECah0N234PIoo0QINCazpDHXMRVKKQOkry/dhZetKWQFxX2j+oa1vg89fDd9JOHeedAH/zHvheiKAEBNGekUkE/+CLjC+D4Ic7xm2KWO0cIWZMMSINA+acrf5aMO0OiGPAcv8GFALOX97ii9dv1GGJEcNuZ/Bh4OH+hBgThJSMhUKCIxd0PxWQFJRlwTwX5AxEWbBixO0D8OwKSAeClIT5mJEADMiQIMg54M1rwjwzaEfG6glMZsTnRUsWyAJPKSWEGDBOE0JMiCFiOd+D5QFjGvD2ey/x8Scf491XH5oDwFY+ITje3+PzX32pwYUYqzsgpEy3+5c3EMwombT+Zs0YJOL9999HjAOQDSpdWTE72QlaM5sgKDF2jrPaPRWsJcDdXBR2G6MGFliz027sEzTw6/EGtyuHadQ2CxDM58WcNkLvsLXtJBbooArH9S0ah1TPSaIEIENgsLriMmzZdoW4IATCaV4wyljrNKk7YfvLz+Wnofyk9emxg18aA6FDHAy42WWQCAXq1KYhIQzRmEMHv6NBNe35NRCp/9UYnw+lHxJZRn0zxBbk7K+a+ZMWlqvvbP8TC3ZpPZvqU0ffaPZ6a1i0x7Qymn6exhitVRJpjXxxeeImg1V5tPMghog4pOboESE0RvyWESKz97pg/HbyAKDV9V1evTVo07n9nNm+1barw2x2jMI+lazJZb3xB5DNjRtPenYwTFxsjLIZr95ouydabe8YIzZp3GZoPfl+/vvex+t9vUff6X8hqLwGZKzd33R9a5w2P6xBUFhK1Oji6f5UDzttlNwtuQkJM+M73/0u/uTHb/xmuoAhIh72Gi02oU05Q+YVIefq9Tv2GtkLxVsat3ghs2WoFJ5vG8I2Jjx6hs6INiWpGbenFGaTgRACvIm0Rq41pS8ikLKocVKCRs/F4RAqhDmXpgBDIwAgI9MQUsdAB6v1L4YjtME0AxiARtADodFobaNQfVap1bUIOlJ6eKbNN1JbMWkO02V0TajVWnW/8jtVGKAdBJvvktT5IxAiAdMwIA1JiWNqNk1QFZ/PP2nU0kksIAr0CaS9roZImGNSo6MNBlQjfZ2hC6q1B+TKwecVW2dJjTmyTIJDVanOts9bIL1XRMB7r17hlz//OWq9EnWKysZVn+9r2kURO5+1KRZtkGIEKzrWnAuOD0fknJW0pwqAva05TUWU1UycIc+gbBINrhMC3joXvLwvtWdNLzfF5Hgkr//SxwwxoJBCipgL5vMMPhy2is7miFkUnhHUkVzmBXk3QaQgZT+MALBS/DsMlMakxh0RKAjGMSkDXAg4ns44n2d1jNur65i9po2V5p4isNp7pxBAMWIaIw6HvVJiEyFa37Uigrv7E073D7X2iU0OXFxUTiKGYQ8i4Nefv0bdg9VYd6MHSGnAbprqnhMwHo4z7u4f8PzqgGm/Q/SoI8TaC9gLDbYvC4MKICloD8pumuMYkVcBrwrlXg3eikBa7yhSD5wUA9KYNLMPrbHVABRjMYq4/gxk0XYJf+OHt/jv/+MX9UjfZAQ94OLjrnLkUGK9fx9YKsUyJ5AGR0R3gpI1N54XxBghMWqArv0a4v02RfvNsenwdkCHqtfYxtcTeuhgHEFhRiisho18UL4htR2GB1QY5rwJVwIGN2aEL2BO3m8JAVNh/PVzwqtMOJUZw0jgVVvEKNQw4HkR/N2Hgv/tsOLnoaAIYb8fqh4JIsjram0eCjgMABOWZUHm4vE+RBg0SARrZiAohDCao1PX2JwihRzpz4eBUIo7HCtWIby50yb0gbXxtxRGFF+/gDe3e5zPZww7DeQMAYjCeP/9e3zx+gYQhb4FIhyur5TEIgaEFIztVNdiyQVfvr7DOE149z1ARGGe0fSHrrs6OpqJcSdBETqvX9832YSSRux3e5Q1g5M6FlwPFzdVldGyErXYmVO7ropmdcl6WIKV/VCDqL3h2uDi9axxUTDx5sKYFw0EeUYUwJPZ9BAI4zSqvREIaRgN7eSBFw0+EzOGkNQhsDNFSUIGjGZOiO2HGB0dQVoLW0fbLj+b3GZBye0liEApYowBwxgRjX24vmO9h+rxYvvFyblijIiD2k8UYoVrb17ebU27z4WP1P2D2j+l+6r9s3SBIP+0f9b38eYSdSXIWsE4OYre13o/siCEpx1sJd4LFqjUeriYNBBPpIiuklfwsmLJjEjR5gZgMr1YZUZtG4IGnBztoTpVTK9oHamT6G2dtSaX4msKuDLs1sktxu1sUDeXXmYEQc2MAQbx7mTfTcOtw0PN4e48brXNqatTdAuUutZQPhLAg2BKOGj7r4qJVNncLmd3mPmfRBcy0eUF5ethkAFqc/fHwtdd3wqnzTNEvSAQBIEEOWdoc8lQgcNSax9U0ZZLpkf7M0KMOSpY2YAonMFglFkuvmOCKY7/t80iRer3uTYJ1EtRUF4v0b+UOREujxcKE2g/K4VrKl376ABIevh5poO5VFRb3TTixcbNkNG5kToEN9fABWEYkFLAtvbd7lUKaIj90Pv/NdlGQwW7kdE9pTe37F6hTkKvoHqjq16t/OzxnJFtZnOw+siHoGHUfYzcFb7WQm1p9+5fqje0vMQhwrMlEUMioISNs9jYpPwQ1X/HQJAY9fnuHV1c/TGWM7csI1qknYjw/PkzvPXyLaRhwLIWM96k/ge0wy9QU5Bw1+TCiKzGqzl7ZPiOYEeJzxVDqfZbf6e6cPrdEGqvNnHyOlZ8w0BU577KkV2Ttb9Y1hVrpYpWeDBxwO88CL54FszA5crgVHLBsqxoWQqqzLG19pRgGaCAnDOQolbZsAAxeJxY15aNGGhZIaHB7yhoduzZ9QFDijifF8ync609atFZ0ahtCBWqHEIAi0bsSybc3R6rXDx7doWr62tt3rvMOB7PT+gD3TlEDjtVfbIuK0oxECWhyrHvH4cPBbVYARaIBJS14OHhWI2gXdJG2xwIkIiSc4U1KZJAny9Z1Ck3QQwWCHAYVy5GCEKxwvgKASFFhKTHidJgO8EC2/J4ncbFG/c9jvzdAKuVDZuAUn+5CuudOSJyjhUQxDIA9lyD/IYUWusAc5YpQIMOBCy3B3CO2L99b6XEgg6AXR1m/Xg1u+tYmjDC9plCjoagTb61UwGBixpg7N66f40sg8DuDPs+7+ZNzBE0pIStHA6r4MNZwGCEELEujLwsyjYnQAgjCIQUAvaJsBfGMRB2hx3G3VT1aLZm6TkzzssdeF1r5h8pIXCDwI5DwJQGMEVlEDXYdD9Y5oKSMzgQ1nXFWowkRYC8MpiDIk94wUCEGRqIda0VwHj13i32+xN2cQc2ZwIM/Du//QV+/CfPOuIaYJ7PVW4CKSmYknuojM3n2YIGDMqC9XhC3I1VB9cMZg2o+cu4+YV6QBERVs64vbvDu4c9yMnNzFokm6eYkuojUgbOkCLAwVoNuMR6zY3Y3lMGy5MoodnusMM0aTsgEeB4d4fzsm7GFs3ZEQFyafuKCBuYsYkn9rsJkUibrIPgdB1FtPYOQRETZEgnZ71GYcRpAElRmKqdaMOQEAmYa62v6Srfp/7vuq+7M9X+C4YgCgYD5VKQSftf5lJ0L4vWhuXCWM6z6l1mdTZjsCypnkVuuG9O4s5eaDMu3e/agNxuF5szZjszWaoudgesicrG0Kg/unBfq31Q3OYEmnPcXWIPJyLsxr0ShSRDJ5DCboWVXVLGAVLmqg9BwNLiWgDMwWNGIgYobsZOjtrwNG512EzTSINrAupYKRh/+3aXl+r2PgmgyrfiDy7svTaep+1nuyt6QHlDIDUbuN7SbcftSjW7BqxnQe5aWZkcNN2ATm66AX7N+5KgBowEGhS7fBUKVOv5H43ties3Om1E9BGAfw7gld3tvxaRf0ZELwH8twC+D+DnAP5zEXlN+nb/DMA/AHAE8I9E5Ie/6Tle9+OvXw9lw/pq7w1Pb9hhZU7LJX15JWtYM9bsHet1MrQR9bY2QUQ9a/K0KqNJCbNSCLP/kx/Nac8CeTF3CqnKT6Kbn7yYBTHqBu43bo1YBKrR46oQXQEa8xfxVjHEGIGobEDjbgAhG047W98pAJDaWNQe2O2UjasI1OPFjVivYWnjrdGtzpnwWXqaWCRo9Vu/yWBKzuRY7IdkI+i+3Ir6oZGh4/GE86oMVzFcCggqbhvQiHGxMfs7sgDn0xlCyva0rhmRBu1JKbACbGpKvc6LYvwVX96mpM2d22ZWlxGC1rhwi8oSdD73hx12u52Rf7R53cAQ/biRNs+9vPv8uNLtS3Hru1anzxWrH/TOZtqtq/8rBsT+N6QwLA6kBj0F/MUvfo4Xr094NwYciPD8+Q0YwOv7B+RyriQT/k4vz4zfy4w/+GhST6CDVNb1la5BsL+3GBmPGWIP5wXJ5D0GQuBoA2dIVoIVGZKxVhA4s/bggjkRIzANEWUJWIBKFONNeIkIbPAnd0SYGWFQGOSaGUC2eQAk503T3keX1c7tDjvrASkVak3C2uTWpzno2sYUq+HZ79JAGuEWFpyXgmFdcb4/gW72yFl7gwm0l2GMLcPi2VOXGkc3MBPuHmYsa0FKscLIfM8H27MpJQzjACnZ2jqgspntdiNIBHPXBwfQgz9Caz+qYWUy6AamG3xFNju+yqhZUQBQ6w8FSsdPphfEo1TBZQ3YTSPGYUJeVmRoNm3+asL5yx1AhP3bOicMGGmE6Uc2J8p3qjkwVR9D4Zlwxy4oM18khcSPkxIqcNDP8lp8BnRu0RxbXxo/FwlAGgYMg8LP1GnQoFjKwN+4A+IUwDkj54zjeQExW9ZBz6EYA8IQIEF7i/r66b3U4Wcj5Dmf77Dm1bJPYrWuUIeN2YwRW2tY+xJfH9Hm24ULlvOM04On7tIlAAAgAElEQVQiZoojQ4LCAbUPGmFIGgRYi+A8Zzj0ub9CiojTWJkERYC4uDMmlVF5Oc91AlNK6rSx61wl3nDUnYhgLRmDDEjGxKcQcEEypdjqmNn6fklF7iCoI3P/cMRbBmVm0+U9oiTZ+xERkDMQAogLhhQx7faKthkG5HmpNe61TtKDGjFi2E1IMYEIWOYZdJzhpjBRUB1c2Mia2lpcRvhrQC1EEOkeg82fGpqacdwRsEaru40RQ4wYk9oTaVTzcc2MzDY3RroiOUNibOf2hS3Tr+kIgkElIBK24yPC6XTG0XqiVSZSy+BWWbIAWkoRY5yq7STcIZHMwapzUkfRgfcIVg8tzR0gsz39hCaBhMeOXiCqevpxpVV9+3qfzVxwCwo+Xbem3x2GAdM0mDOdFEVFuvZk8B8aR2DNCCZH+eJ2ZDaZmkB+zktbK+iZtTmthqGr66NHMlWDDXY+XepqdrCouM0kNYtdr7B1zho6sVlG7qT2tmknWXCYqT6zmoTN5pL2DpsJgZLnLPOCNWtPUoxh84xvckj7+/SXy1R7i+bM+TrofBCIGIEJ5Tc86a+SacsA/omI/JCIbgD8ARH9CwD/CMC/FJH/koj+KYB/CuC/APCfAvh37b+/BeC/sj+/4WXVaNdGeP62aqARgBSD9RmzMzqQZr/so5X+WRxWov95IbA3quZSsDyxIVyp6IHJzSCHb6BivZjdOXr07Ro1UgdKN6Yyqulmqj3m/krX5TPau6BY1CyGuugCQlnWasTFpCLWR5A4F8RDwIvra8jVVXVQtB5OD2knCSEbu9S6C5O7Krnki4YhAOMugjmCIuqm6edGzx3vV/fYiwlEGBIgQSGJIlKpuz1jBFMycAfVHfeqDPRnfhAN44RxPyGvq0Houk0nqDTggMfRm2EkFLSfTik4nZQ5kq0fi0ADAR41DbHNBbPYfBaFflm21A+ganwZvDHGiGVZUUruFJJ9TrSe6eF00sh1FmXs+4aojk6mHn41GlQjmv7q1AxDXwM3mH2tQBgCIR0OWJYzloWBJSvlN+khUN1FUiZJgcIaxd6z5ILz6YxlWTBOA54d9khRmZl2MUCmAQ/nxbJUQIgJ4z7h/SHi+TjgOM9a91EKxnHAd7/7HbBo5n1DwWfrRaYfAtToqo1jiRBYI4sxJOT1AcfTDD4TKCYzRBymqsoUM4Eoo+RV220Q6RoUi6zCHAVjRg1E2F3t4QXPKbW5FRYUUoiRQ7a3h45urv1+RM7ZevS4rrBsUb/Oln3hXBDHAdRHrNTWsPeQCmv1rCWzsgQq+5saCRStjma1gMXghqvgeF51/kKw6Le9VDBD36BNMSUcpgQhwlqaHGmTdlJa86BtAYrVvIUUMe0npCHh/HDG3/nXd/hXf/tZFWaFPLYp8mhsq0+1WjLpIOkme+6YsTTSmwolK9bHcRwx7CYN4h0Jt5+8hSENYKxAIOyvXuJ8vsXqUDPA9qiNxGA5TFT3mI5DKtW/N0AGGUuwMMDaGiFFAsVoAT3viguD5rXDHABiYaSkhlkgQgqxBgwq1AzAM2P3W2atZVLo3ohxGrGuubI/AgDHCI6EsGbklXE8KRyYggY7NOgB8Lpi1fAznKVLYVUBKBmnh4IsEfsparsbK4qR5YjT+YyyrHaOq8KJMWi2xDJppIoRtQ9mPUdVT/q8/vSnL/HBd17jxVsJDosmAtY16b0vswUmR+rcsgV3g8lJje3XwJrXkRRzRkNKaD2r7DOFahP0QAp3pcKgZJmXons8l9LVuKojVEqBkGZ5RQS8aK+7uNvj8Ow5UoCyUp7O1QYKMUKYDYoYdS95X7fLc6AGeQW8rhcGpMBJs/ydHa2ga0OgISEyAGbkwihrwWI6aH+9x7Dfq20VmiFb9+eaEQujLAqnr80TO9XFcMRE1vPODFXVp9peBhZ00cbirjsIpaxasx/IHFbqyDJU5td1hZCe0cua9Zxasjpyg0LffTxWaeo3qEa9H4c+T81eaEiiqn/IZalJXY8ecpunZt/Jp4TVKbRZcbKlbLXtX3c5smQckuo+CkjDoP18zXlFtPKZEJCmEfDghSiTbu5sgiDKkuz/ARrs9mAI4QKiybZrpH9vG1uVNanPq3aayYnawl0QptpvbXoEMFh7J2bdLQEgVAb49sVgY5WarDAYqd/CIzQAmAJiZy/WP5mRVw1MlSIYeuISG0A9Q+pZwta+p9m5j5aQTAdQAHc1eT5UEUFMBZwTak3db7h+o9MmIp8A+MT+fkdEfwzgOwD+IYC/Zx/7bwD8T1Cn7R8C+OeiK/aviegFEX1g93nyCkSYhog1Z+QlgzkixUFT3OSF8s3YJI+uUJ9s7Yq4obCpNQYM06gYa1ZlFnMxRaGOUV+30hS43sXvBrSatd7Dx4Vg95dniBRCphLoZAxfd10EEC5+154XjA1IDV51RmtvNqNf7ZV2ILJ+HuqIiTWBrbAvYYALCqFJUz+E3tvxGSZFq4YYcXM44Hq/19oFFqylqGIWd1So7kqyCNVlNDlEpZTmqytwKcbcWOBpVHWmqNaOENCi7y2kaGuvTu04jgCRMpX1LyJkinirEWqUTNQVHMcB++sDTucTbt+cNJI4a23bMEXMs8MASTNwAAiMZPuY84rlJABp9lSDV5pZi8KQ4nBbhbhx8aywOj2n04L1vGo7A27GoIpCWwsxOFq/XG54f5189leteOpkL6aIaZoAAUqZK7xFBMa2KuAQQCIYTEEzSGG4ISBzwenENSqdhcGZAbZ6iagNqVWJhVqEHQT4e58Cnw7AvxkJ91X4WpajtgARoHF2G5zODrfqKdv3M+teRFSmWYggs/ZwzAhKJ88FUjLmOSM52YvOsDlBoYNheSZFaZJXgyn5FEZNo+qhakEcgjQ5z56V0MJ5jZCrfHjgpFkZmqnRwIVFfEMACbBmDRKkoNldCgGHw0HJWdZiwTCysVGVb7hR6bM7kEIjTTcWC2GoDaABBs4F5BmDShCgztp5WZV90FnEYGywar0gxoQUrLZnGLHfj5h2O4QQcZ8mnJc7HF6vuH2WjOreivV1FNXIc/9GAMuOd3IsDS3gLL9iskyA9Q4kjLsJCAllGbA8jLj9+ACKAQ7oCkg4f/YWZH+y+WvwoFAdQG8Xo8GxAELhYu1TLEuZVcaJnBk3aAJZBFE0y9TXIDNzvV+lPuh9+6Cvv4uEFdEcAXPaImEYUm3MG5MSSQwxIpmBR7ZH427E83eucZoCZqv5HcwYHKe9NbFXPfNw+xXmlZFLxrIs4PPZltTg+6VgQAbYW9kISFaUvGCMAbKfAOgZlUsBZzbmY91bg5HorLbWec3Yqiyq5+Lnnw1474OMIaiBe/tmxL/6X95HCLEzdtq+ISLbf+jOR7UNDvsrNUStRvX+9lZ1ge1j/2ytlbZUcARhFQHAKKLG9PVuh5dvvVRj02QiDBMOhwnTkJAL4+7NHTIKKDSSDjVmtO4rpIRlWbGsxYINlv0SC+cJawPs7E2LBGVZ9Oz2M8FnwGSUgs+dvjNzOyNEUMtMgsPaI4AQEczGCCEgHXYYrw9a/1uZYYHazByEyeqs1pP22CMLmDG7bgYgeg7f3z3UVhGuiygotDYGsT5lWpsb7IwuResmhRlnrPAszziNqktFsJ5nnOcVwtrfzUna1ChO1W4ELAPjdlK1lUTbg3RnRj07u7n1n4VWLAvPxPaBYf9qJSzbmGQGDc4C5hkBjW38m64YA+KQ9D3MnqPgGUe299S9EIcBZc2QUiqo6NKpiLA5YdWXVSaxzV6ZkdU5tA2d5VdtsC0CFFTCQIHbF54RRw3E9FetPbPAaU3AVCdZxyIUNlDb9jutoacuOKMcCV7Hrm/wyC0KoeM80GcoORVD3JC7nDgPfHXtpAD3JWhje/tfL1EDvUyVNYKCZiHlYi8/df1/qmkjou8D+A8B/D6AV50j9ikUPgmoQ/f/dF/7C/vZxmkjon8M4B8DwDiOOFsT32Y4djVPIiDrm9Aivm0h1RfK3c/0EIwRGKahPXRI2kTTa9BKwXI8d6OSR579NoOmAgvf7Nv3ga+EZr801S9QowHZhbWndN2IHtwZArw3WTNQyQxEYaOUd+EqXKOSISgUMg3al4e9h4tBLwDRQuFgPU/0t0jMWIYVsmoRq2e49DKiBDJjoq8fdDgFqRHppA5q+ETkYlFL0tqPWmvn02hT7kZwTBGHw5VCkpghJWMpuauLgY3Do9pbBqd2jOi/pK4HLi6tl/SF1q3tBmK3KjGBYoJR0Sgxx7pW0oplUdgQurUUK7ofdwnnU8a6ZOwOA26uBzw8ZCyrRrOYyepGCmIQpBgwnxUyqBAvxanHFLUfnzfrhlQyAFRonEDIs2tex9ZPsP/0Yk7sI0xdw1VXkiBtP+FyGAhSqDnOLbphxc7WcDtqJikFAocIWrU+Zl0yhkENqFUUpuwU+ACMIpu1CBuCV0j/L3Nv8nPLlp55/VYTEXvvrznt7fJm2pl22WVjZbkaXJSqVBQlBKhmTEBigBBiABIMGPMXMOIPsMQQCSGBVCCVagADoHBJYMumXG7TTabz3rzNab9mNxGx1noZvO9aEfs7NxsPkDKuzrnn29/esSNWrLXe7nmel78h8Ju9aDIBhzMFx1JqEFKDm3bxbU4sGcQ6N2y2V1hZKbjQdO/1fDbHXeuXZCvdYGQAVNK0Ve+7GN6RZ9bEnY20ZdbFK2z54mJHzoXD/mAOi423OLo+KsSv9kuCFoSqaIoZQVPE9NYW5DSOxifSaxs2WzabDSKZKZX2Pl8NbRGtvGVtvuoMOibRIVYtUHqcMM+pZXIFrVhWKK1yR1ROe0rqOHhnfB3UofUY5DsGnChcdbsd2Gw3GtjkxGYIfOh2/J0/nfjNX/DcXHvL9tYZbNBUM8hSlmp+TWbVeXx2OOWwSZsb6vjsdlecbnfMxw23r3qKiaZIUocTlzntJ8rYES6XPcFbOxrvgzWANYUyamWvVgr0udWY2Nt+ngVqn8PK2es6awrvHSnpsy6lIGUx3mJtL1K5o+QZ74RgyqRzMQhzEcQEN7oYW7KsiDburRQAEWE7dAx9T8ofInQ6T2ZdSykXnjx5aw6Q5/rJM62S5JnT8cTNq9ekaaL2vwol6K4SAs/HE986vaaUwnF/5K33/NF2R8r6nH3QfllCNI5qYLi4wjvHeDzofiY0CG9LxthA/MEfXDOnW7797QNv3vT89m9fcn+vc6qt9+aA6QanQ7Mk+upu+LWvfWzcdWmbofcRMTGvmuhcTaZm26SIeRs6N4vtHfoZx9WjS0ueOkuqWPIqhNXpbA/LpkZd1EEMwRNQiGuDh9mcSrMqe9ZZXtfAAlXTORH7DhEhzWt/ZTWfzCGuStb1HDlrhavmbYeh4+Lyks7WkCZEfEtg1MC++jMhdsisAafm19aVFF0p8zgyG0S2BUAx4ja9jpkLWpEX8+EM4TSXrD6EgAuOzWZgiJrMTKlAiFzf7Rk9pC7a85ZFiv9BlKDDuvTpVWe7RWE6Xiz+aJ0rNQheV8JXJ1R9gVJMwr8+n2XcHaZbYH5CSTSV6x93VMSDN+5eaHxLbevUdWqfUy7QBRg6mB2+CMzpbA68Mxirrz/jGpq9Z8WzW+zq8sEznYG43PSadtHOvQ6E6jow21WKFVF9tb2r59Ou76tuYO33cRakayxpvrCUM7/gQTSt1yALcmL5incibxWJs5ff8UV+6LW+e6iNN17rT/D+nzhoc85dAv8j8F+KyO0ZnlVE3KLM8BMdIvLrwK8D7HY7mab0YDDte2t/NgHnlHyKLcLaL+Q8flrDHAvF1M7EoEy5io2InAUDesofXpl49/UaZC0TuJKWW1ZA1lwr5X1UYy2g/JWycB8qrEzvwrXrqRDIgL5vblCIhTelm6deU7IGwYAqUaXE8XRintNivFYlY3WeMpK16lMbA9e1K2653zoK60mt/ASPixpA9w5mHxBmkmHRYVl2bRmuojfvPV4K750yP3eEf/GoY99pFj+lWTfm1Tnceo+1F+rCEb1tcirtebe54Tg7DyzOXg1IqBmjohndNCvMophoSEqFu9uJIg7v1YFcUJ+eac4ICeUOZU6nhBNn/XwMVx3sekohp4IfHJPBBWPXKQk9eDZdZJ9mfMkGA7DnVco5nMLgmzXRs2ywFgBUiyGa/HCOM/WxNkLtoaoT0P7pPJXYKe2Zr8ZQNHhbuSUG6VHOTjT+UhFhHCdm63Ok8D8NRIM5mXX8P8qBv1c8v3Gt17s4GitD52j3c5aZ46uCNrQ66LS5Q5vWVIc+NthW/VVtoOwsYMKtVsJKEKm+39veVIfszO/znhiEi4sdThJzWhrnSqzX6um60O7NOc+w6Y2fpwqytaWCuFp1d8xJ4W+hbcPqdA+D4IaBXDI+JUqawT0wXM7ucaYOsiqoAlKyJX/qxNJ9RqDtEziDWvkIORMdBrVz/D8ffcTf/95f6NQRVQzM+yP700wfoN8M1usp8uEsvH90vL12Wol0FRpt1UdfYS+VIC9LUAYmRS5Lb54zR12D3WHoCXSkmw3ZlIgdKrogokCDUgqH/T0OR+c7wmZi6YumY/AQdgyLM+ODU8XO+mwdmsxIheOLHZcfHnFOK3+x67UvkAjRZzoPp5TIxStkLgs5jTjviX5DwjGVE7sO5rrwgNsfbCiiVZgFRgxlTsxB21Jg/SP/aL7md19+zFEesybx6yEcj7u2Nzx//jk+ZILzbIaBQ99r7zGnfd3i0ONy4l//5BM248yj8UTJhTkljs4xPX+P7+wuybnQ9T1PnlxzPI3cvt2TCsRelU97yfRDx+HuwOl4XHiTK16ZZOHP/uwJz57/Cq9fHbm/e9ue7sPeVm3J2ZhUp7ImPqpTWL1K77wmIRwtSbWG4gJNsTf4FQ3DqX11QNd1BCf87Le+xWeff46k3KqnKlhWDNJr3y2FNM7cvr2lXF1DKU1FNztVnI1dt/SEKwUJviEFmt7/6tkJxRK4NP6gzk1MpY+WgCmrwEOKtgjAL02Z55RVNRU4HE+kOdH1GzaDtvQ4HQ/0m00L/rqhI1pgWYPhltuzrwrek4NCl1WdT/0hbU1Q2Gx7YtzANCEILnQMcUPhyOF40oErtYrvKaI82UdvDsT7kW1J7DvPFDUp5INvY/eQttH2NFh53qsIo71L9/oWHDtDDISAc3P7aF69J6XM2pSuk6GY7yRWPPDy44M25zSBGzzgg4nDePPZFkg4WGII8D6QglBkXeWyqSP6vrC63wo5fmdOPQxgWPu5dfiWZEK93uZj+9Dsb7uQOhoiZ35I8xFlQc69Oxi0eVzXIJZMaLbY6bXbLeh5f6II6isgigYb5ew6V3B9m9s1wfLjvmbtp9YXlir4j7/Gnyhoc851aMD234nI/2Qvf1Fhj865j4Av7fVPgW+sPv51e+1HHjVjsMC8zmF0+ls0oxwc06yVOf9gkOv7SxFIucEd1u8pqHwvrCBxZ+c457S9G2QvAgr1qBlS/bzDBwFrwlmdQnXizCB4T4yePBeThF6yrDmV1ULXzwezEqU5DEuQWVXWnLOmlmLQBKcBVwHGw4mDZYHaNa8EOtq9rQeiZZyW51H/mufEmzc37Tr9arU0jodtlEpQ9S371TKEmiKmFOF0PHFxe+Cvvspc9B3DdsNtdLq7WKXJ1SDZLbfhvN5HMLhCa6ArqFPrtVm1TgxvLRpqcL52XF0L3qsjqlXNWcUeSrHg02uVYrLviZodrc5AEd3AppGWnUtT4X4ewTu6GJfeU3bkIuTjXN1k3cydZlZzmnECsVOilP7n7PoM2mrfW43tVx6rlNB6P1tP7Jps0A3Rmeyza4ZGqxCNRdgqlILyTMP6fLm0DL/uZZ4uRnKekKSBdC7VuDhiH+iGHikwTxP5pJn5Z27gHxw6vvdwndu6dKHydHRcvK2D5dGun7Oj4Jmcx3Umu8zi3Hd9z2zy01XoZF29aT//iF1Zs6i0c9dGwK4s8zLGwKMnT/DRMx2Vd3AyhTvvVM0vxkgqmc1uw2a3Nd4jhBhbtjYL7DY75awZ9zZLMOddK9wmtaf94rrAjHI4JU8mrKHDo5L/dhOpgFHvgnea+c+12qbV/JQLcy4ENADMKeFjpwK/JTdu4auLS1tHpSU2Ys7gJm0Tsj8w9D27iwu8j/z1TxIvd5kXG8Ay4ZVP7EOHmIpmybKQykXFJZCCiwobxOYcaPNpsT07Z8cnfxxxMpqxFBN6cU3AJTvdNH0BP3n8AIj2T3LQoDzRB5Io1E+hvg7lTy0w3VwKOTvGm0fIqWeeMm++v+WN31IePeXZe+8pPyVBkpG7FzeICO99EPnWzx4AePvmhtu3r4llw2X4Fm/l9/DDQHmTuf2sYzyO/P1PJyaX6KLy5EoR5fGUog6kc02B+ZbIi7se5w44F82GZkLs6ILj9q6nlAQi3L39kH4z8tGHnxKiCiGMpxMhWlCaEv/osx8wvHlDGhN3tid1zjF4x9958YL9B4Hvx0hKibt7U1X1yquBmVIcaZ5bYiTECLOqxQYTGBIxfltxnI5PmeYbvL9BMlwGjwTHQWr1xxm3VxahLe9VqRFn3GAY+g6kEDqVlZ+OI9fX1zx7+pS7+zuzbYszJdnhQjAOriYZS8r0MdAPvforLtD5yDxOaKVToaWgfMaapC1SofxCmhKH+3t2u406/4q7NMSJNOVWFS3ZgAlHNDjtSsm4VJvwwPt2zhunONs+pQIkNAij/tEcs9qgKS09ROcpMZ2OVkXXoNEZP7ELCo90lnSuKr1YEq6eXRd/YOg65nHUQL62ZcjKaxSiVr6d0i8cmSJKdYiVFpImSsmKEkC/881u4NmXb5RyMCbyNmtgZzz7NE8KGw6Ly3suICfNYXf2vIGz4L3t8c3VWUctNkcEqn6BtKrOUrkRSeS8+JbacoIfezjv6Xrj8xHavFz7ACJFk23BM8+JdBotKA7gc3N2W8KoqrGLrNQrfZs60hxwha2K+QUP6Rh1rB54VIsveP5qO/ead73c5/L76uvWo6mfrpK11efyribnNQlgAOPV1Z1fbXG+tSBZVH/tZxzFx/O7eRClPvxEVcF84KRQUNrC6p3vfP4nqbKuj59EPdIB/y3wByLy36x+9T8D/xHwX9v///Hq9f/COfffowIkNz+Kz6Zf8iAbUeXS37kWddA2mw39bmB/c6DqQHoHwzAwmiCH8mNUdWzN5X84PFUiOp2943z61TGtgWV93/lYr5q3OkEbDsjZZ7SprhA7Z0RHrzhyC0K0J5fCMlrlxvWEIEZgrpj/81JqKUVJqV0wKWExWKKeq/PQbXpAVdVK65m0brWw3GyVyK6Taf23q069bZZnFdfV2KySHMuQrWMk1kGjMO1P/OqnR4rznLY9f+P7iX/8PHCqwhP2qbZBVfWoxstzLbOmZIWCdwEhr5xs81BrwHcWyeuLWU9OEI93nr4b6OLMMERm55ZqjtD6KbUcmm1SXaeCGwUo2dQkWbJPzUhgcBMUGlh/VUTwJVuftFol87QMnRnVRa2tns++Y3kgbX2ss3FK6l7Bj1fHko2zql3jLtEanCM1vFweqbf5mARCKbjg6UJAJGsfMAS8BnfdEKHANM6WzAg4F7RS4lQhb54m5nFmnmZk7OiOE2w3qwyeZtBzLkQHPoYzTkFlBjkzmG1v8ZZRlxVsyjmwtee9JkjSrL3nahxbx9W72oLdqhfrpeNqiqOeV4OedUW02kGVAHe40Cmsfs4t6HaS8UErRj44U03T59gDU1YDrLBHMQNVjANauaBa6dfnlZWk7ZRzmmxOVShkXdNtLsS6t6gx8pEGxcbU46TCr0P9nOAk2Z6iaoWI0A1RuW6ptGCzIQpEA6lpTOz3R7bbrVbsshnMB4kFrfA4qjKrC1E5Ds4cJAskQlAnMATH1dUlm+2G01E47meOnz/i6vLaklqO25s7cnH4nFQwhhoManB7eHXJRVeIMePi3OYCzuG7jt7mgHdOFRIJ9BttcFzmwJtXiddfXrHdXbDddLy4vGj75OXVY+6OBVKiHNWBznJJ9IUvXgY+f3mJC4B8wN//5hXz/l4rFBPIGJnePIJyoGTPxnl83xO6QHGFznV0wZOmxJRVWGO4uKD0PYfTlrvbO6Z5bmNccGy2lzx9csFhf+L29hYkt638u999zDe/CVeXEMIJ72E7z/zdTz9lezhxvx+RnFsL0GzPoXeOJ9PIn+cdGUfKnuggF+W6jW9rtfgSKR3eF4ZeKF6rRCF4Dqcdr189Q0rGOyHPG1Ka6OIjYu/pho5pnClFE7Tb7QYExvGeEAuhQMEzW/WXFux45aOeTpZ4VMRI7Dpi3zV4eE3B1Oy/whmdBvchEHdbto+vyVNiqn1ki5DFRKmyUHcGbA1EczhrMrDt/zgNhlJiHCetONvenYuKU4Ha/FpZXufqShFOJ5V7b6JfD13M1Z5VaQKCoZDMjkkR49kmXAxcXe1w1ypgpglKoe86Sk62b5vdyemd5KFZEEDRDDE4XAkcc8H1G7ogTEXTSSKF4DdsLnsO+xM5ezZDYNsPZ/eQSmKeJso46Z7dCT6ogrGnnFVcglMulMTQhH6aE14zBfUK3erfq+1HHoyhlKLVbB463UvD87XdWX/d8swWleB6rT/Mfw9B7ZL3QRE+zmk/Y6fwx8WO0vh9ataselYWf1VkLfRR/YLqU9pc9cvFtM+5QKUpnbkNsjTxfsCGsxGp7Xuk+QyCrN6wen0VRzdbXsesOW4Oivow9YOyMv7q+6ppyqsAtJhvG9q11ftrjrvarbzybB74R8vPjsqnV4XSpT9lfY+g8N6zFijtPs9fq8WnH4X2q8dPUmn7e8B/CPyuc+537LX/Cg3W/gfn3H8CfA/49+13/wSV+/8TVPL/P/7xX7Hgx2sVwX4ASivr45QnlqsSFxWWptH1xx9/zJ9/97vtrGIZqujqtospIp0fazf64QL8YQvp4WvvvOcrPiRiwYbLRmZ2Vi20SUJw2gcAACAASURBVMVyITWg8SQQp05XW1w0p7AGC8EyOs5pfzHvQtskcsnk/REwZ9HgTSEEtpcXTV0TyxpPaWY8HJtUdoNYoM5tDcSaQauOr9RnZk4+smrm++6Yr6RC+Wiv2P856mYUS+HrJ/jTzulmQX2eDoprRq5+Zanfleu4ZqTM5JQZx4mKR68PQuGT6ew5nUnYA3MujXu12Wzo+0WQoDm1LDBdbwGb945QilUnWHEubBQr0Bt9f/G0zbMezmkANM0q3z0MvbZucPW92Hf6lWKemIJlafK8602gnn6TEx8fCyFqdWAOji+24ey79fHUqp5uKtktjkddK8GqCb5u7H4R3qmwUW8VRvqMO2GVH0coSw5Oe9OUJvSxtp3dlHn+Z5/z5sl1e45r/PhioCunDRr01i2vY4IaXezI03SeKLLnUzOIvfHt9I9bpnb13Jpx8fjaL8AZj8tI/Ej95GL4U0qM04yfPV5gnmec96RpavLNk/VCC86R5tSgVQCHYgkpVFV3dCpA4b1nThrszdPMOM4E74ghUIoqqKWsmH5f1KnxQR1EQNVJ0cRHXed4DOZoga5XsajjaEGza0OgYiXOacItdFrZoTAMA8OwIRlMPedatbY9xcYtp8J+vyfGjmc/KLz6KwPFWZBt87itm5W9q0gDQTmDDml75xADfTcgp2vCacP8asSjynVVjc0Z767KTxdWkB2Doe8/v8IFYfvsqHPBUAxD3xP73nhphXTw+BhI0rGfIze3TzkWx+trFZrhJAzdpBBMccTTxM3bN6TjxHSraoKVqzJNierk+LhhyM/56x9t2Gy2zG+3pPEx/QDjqNyy7XbLtuvJORGjtp7wOVfMMCEENpuBl5vn/P74HiXft/00WDY+Z7U1WqXVoKaLgXHOUDKvXlxwe3PFbquO8bdefZfHb2+RqLDYbBkJo9jhvTrpf/f2ln/O3yRdPEZcR27rMCA+UpwqamoGP1jj6UQpmTwLffR88L4GLV2BP/p9IbOD8oHy57KQEeZSEBc47rWvY9ffcn19Yh5NWdlP7DZ3On1yJk2jiYksHFIs8Rcqn5easKl9A6u9sTy/CJcXF/i8SKvXPVdSVpRIDcbMifYhmEqeM7vl2r7tHGw2G8bjkVF0v6jrLMRAqYm8uqmsDhV9UhvoQq2IgKXUmqOoQZmJ2NR9tCIEmiOKCvsUaddee1K6rtOf00yek+7/Uas/zlsrmJUISavQx1UAXIxr7wTfdZrEzgnJM6AQ5hA0kRc7XafBa/9TQYguEqrNOo6Iiwq99EI/C/GYOF0MeK8NukUKMXimcUZSbkm9d700HY+YEt1pZtwOK+i6jfNXcdXMh3tob2vgtvj+ts+Uc3v/8HHWqlIphdpUW5UKdY3QEsA1AHIatNUKrChn25WiMNdV8NF82rNr0jMFa3tBmxuloZp0bsSl4vXghB5Z7PfqnivFoKy/sppnWX6sl6JV+HM023kSwsa5QIyuWqx2DotRzf9cmYvV+Grj9wdOqa1n/b2tcKmu2vLeJaFpwZlzXzGP9PgqyLbYmLVY0f7Iyh/5UcdPoh75z3j37urxb37F+wX4z3/sN3/F0WB2qxqpSCXcG9QsJ8rhoLjp1QSRB+dx3hN6R2eOfS4FqgiBva+IMGZdzD/8mjwP52j9jorZPSe0Vq7HQmYudVEXofbEEFlq4rk6ldAmlsPpRlhqtsYMuCllLYV8m2TeLd8RoauNsr0z4r5KmWYR5dY568dTCb7OHB6T0P2qR75c4xJstQVTg7hV+UqrHNUxWz77MDAG+MV9wUeTsY09eM+39wW3dfzJsJrdsgRKInXrte+WBjBkOp0QyZSCZizzuuKm17PI8X7V9BbGcUZu76hzb7Wmz0a/GGywGkXvFH9ejKfo7DsqhFSHQ+dMjAGiclrqb2tQ7INvTaVbw037/lwyMilkJsZANnEGhdcaL/KhYhEQi/DtG+GjYsIxDkoX+H0cn+40B+XEKr4mbuMecEcF3lVw8otICZbRjiEQY4Eg2qYjwVyy9mYKjpxdq64hynNABCsU2Uat6yt20RBP5assbZub3sHQR6Awr2AnxRoRV3htiN4CfKhOSUtBumVdgQaifrU+26ZrG/dut8WFoP24YqgPGOxa55TUaBVtWr6/u9fnmU1FT2pQp8e8UygPAmmckSrdTDW2ej0pBoL3Cs81GBm5ME8T+7t7Yuy42PZM40S82pFSYdOHljBAQJJoNSdWlP5yeKmkcIU6uhiVF2eKexp8jfis84+kFeLWpkAKv3Z7x3bTs7+lVcYrrKvNdrdAcFNO/NyfJ/7kWwNS4Tv23pyEENR5rAksX02YQKHQO+1fRCq8+SwwvnRI8Wx3nnmcVPHW1z5dgavLK3LJpHliOh41QGHZu6QUiIGc4PazjTrvAB7GGOk3G4btAEUY74PCWg3+/mLYQB+4jB3H48jpcIC+B4om2XJiPo4cT3pduj4Ks9Sx0okR8SQ3czwISKHsr+l618RufnkKbPtgXD5VCM5FA8/Qd1xtNgpdDVUu36Ctfcfjqwt2Fzvw+rvNENntItePr3AUuhjYH1SOPUZ1HFP6Ba6n13zj8C8ZTyOh04p/daoq97TzntgFdpc7fv6Dn0cun5DzbPZaWwrMc+F0PDKdTkzjkXnMkGZS0d5fLoshD0oTJComxhHdKpkCYImWKpSUypa79JgxFVwphJjYhs94/33fOOT1giWLwumdfl8qmd4qxs4WnvfKK41OxU/EOUIXef7smSac0D5wa7TbCiPS9qiiWTATlLLkQMnkkkzIJqkQUiln9IxgqryLq752Yu17vLahaQmp5qHToI4impxQ0adlLzBLqn9c3YsNgZJF1Wi9XwIR23NOhxOui2z6wYLQekHn+4nyEjMSoqIDREwp0iMEvK8boY5730VySWfnWNtAna8biniGL490XUcWx+WUCDdHDlOi73tCGNlfDcxDxzxPJsBkMPo2g5Y0YMiZqxe3DMkxJ8frx5u6xbSxXviB77oP9bUlmfhuQOYcq0rd+eedJeKKSKNhxM4UMes88m4RkDIfUXl+gdD3hCJtPeSULPhrLvD5NTefzDWBvErBqde3+CdLIFp/WZVZYUUncrTE6xm33PzmKsC1dD+Q6trp60aQb76oQT/tlM3nfzjWZ8+heslOzpL2ta5XAbmrDywJhqBUiNqjswmvNS2IunaWwLL5zFJtWl2pX30sASHaDOCHvfHB8ZdSj/z/6/AO+qFrzuoCZXL0fSRG7UuTrcn2POc2OfRPzcIufViqkzUbvyI61S+K7rzZYPcgIlsTzOvi6bq4Cjr0gp0zvLYpBenvDLJnD3V3sdPFV3+XC3NOzKepKbKpA6Sn947WcwqAsmyaVOfGtalRrxhQydjoHF3nKcUab5rCF6gz2QfNhPhSTIjEW98mM4x8xex/uKEoIL8O1pn4Sl1g68m6BCrr30lTWhOBXz04nl0OdEElmut4Bu/42widS/yhn1uz5aWypONcqysEb0BodWKHoWdMGTfS5LCbetE66KjnYzW2osqAw26rDXgnhW/4Kq2OnC3WmkCoO5XYWEXbsGvVV7+rOqvnY6zjVeFeqOHabBARptqSwcb5b305MmQNzj95/5JPN97UzR7uXucBSBB4b8wU38CixCI8Pnk+2apBDs5bw+118O7BZbPvBhXEnBJfq+SLrEG9fodKnN8cTg02U5vgSsvs2oZvcJ8WHLvK3czsXt8zfvKC09efMx2PegV1bmkUxfuT8O19YtfBP3seUBp9DYgLiOLOnUOrgQ0mtVxvndewuvMirUdecbSqV5VbLzkxnU7E6LnYbTGmnEGYNDiTbC2cvRrgedKKu8MhTgOkZVktVT7nXGViLo/TazCtkNAVgVkEJNseoRtKdhp0aoBTcLIyU84SKjYfK1RSclEoq2Vtu+Lpvef+cNBstvUtHKfZ9ja9VoVie/I4N3L51w93lDRbki23QKOwjCOoMXRo4FvA5rJOEhd0PL0rSPEq8mEYF+/EGlc7fOiRUgj9wN1Lx+lNoNtE/FZhasFrheF4mlvPMu+jSTwnpimRilhPP08uhYJjsEbm85wUrhaVWF9EVV9Pp5npdCSlbPB14c3miteHG3y/4/33tgwbrare3e9xCP0wUEqv7VpypngPHUjtIVQf0gSews088ccvD3z74/eQoIqluRRiCHyUCgFHltzsj8fmk6xgRSjEdJoTIUQ+/vgjnj99rBV8HPv9nuPhHpFCyAoRn/NEdBZoF08oQsTxQSx8I068ygUpE+tj6IJymCtEth+4un7E1G/Z72cO9wemcWR/f8/pOJPSrCqK0twsXDQlQWsxEYLy6UKIVLh4cAYTnCeStb6QogFryRmZBUTRFt57QhfI6X2+8bPa9mOxT+YwmqOb58Q8TvRdf74XSN1frSWG7ZXRWhOBJodqa5Y1ZMwbF1VzORkfIsX4s5ITs4E+hk2PzEmTNt7TBW9CSA6KoiO8+QXiPc5lltBQTF3SFDDFg2ilbvGtaLoKury0GteHwHbTI5Yg18SQJplzmhmT7mOx7bk1SanJk2gBRUnGfa3mrXF56z1YaxjbO1VBsWg/TAo+RELO2joE5f+mBJ0r1uerJiV0fm7uR65v9sz7mWQ3Kujz39wlhD056bjFvuP4waOV2ZV3ko9Pv3yLmzNhzogPxP3E4/HE7cfPDBVl4lw10Uf1eywwXtgIrULWEtosv3PWu2udvF8f2hPW/FATasJ8jsWTknoXjR/unLP1DGmcmkbC+qiXkkX94ZpkWXpeVoXQOl8qLNGfP8tVELWIgmGJoVpkcMZ7N2+h1EiXpQpWT9WCRGl1m9ZbELcEeizfox83X3DlouPcUgGTauOXNjIVbXbu3lbHA03k2Loptp/WW26P0vZptdGrc7SnU/eLlU/WgtMV8usvefxUBG0CZ6qJ4PBipFMpzGmZkBUhs/QYWnFzHlQWvLMG18GTgOwdYV4gcQIcU2LXRZNkXjZloBnqhdheCby+GcJSlgpLWS+cGDjc788mhbN+JmsIWGgZP2kwQNASeG18XYMDFTmQBu2p2QPvaxNSq/RYKrikwlwU7iGoulWMS4ZMihhJWt3M0ibag6ezxCY60fwCG2lGza0/ce701kBTYwA15B6Fam5D4Pks9CLa0oBFoALUmPztFPlb0vO/XWZej0dO0J6JglSUq1NrkBSTxO0HupC5f6Ok8m7oVK1xFRzUB+1YnHABI8lrYKm95Qw+9mCO6FcuvIKa5axGsjoGxdMC9XXVps2NagRsAxXncF1UhaiUETzPTsIvfz7TxdI2OZczf+3lif2HW15YE/f1Wc//LfyDlwXvI84JfYya0fWOIIWINoIGjNdjXIuCBfmrPY0V7GCdSlwlXELX0W89IRaOY5X0XwjUDfpW6vhVmKEF43bKUoR8GHF3J+TtPf2c2BShDJHBWnpcJOHX3maCwDBN/Fu548sIv/G0OlRmVHMhGX9smZ9LouGr4AnSiPawHXrG5JhLUaK8c9omIyXG2Rp5e79UJwFxnnHOeK9jGbwKoRxXEFrdEiqaIKNCoNJ4jXV8g3MQPX0ctHm7Bba1V0+Vy/DeM/Qd2+2WsB1MXl5alarOv4xZTlkCBm9BEt5RnCeJME+qoJiyNt+dk+7LtVG3A3L2TDbfa2/Mzz97yeU8U/ICiazJLkEWaLZOCkSqVL63vV57E3XxOb1LuLjB9Rd0Q2CeCve3Nwy7yPXFBXGY+PLzL5nuLhhvHP3W011tyXOmlExKmWl0lDHy8tU3lmRbmPn4g78g9le4+UhCuUPO9umcrd1Hzm0uuSKErspuC6cKW3MqEHMSTy6ZYLywaRqNkwey6oVJ5f9R2/2aoEpda1ul+KVSOJkj57KQnAbBv3Qz8WQqZA9pnttcaBlz27R3secogf9l+lli2DMM2rbkeNzz+WdfcJwyzCP39wdEFttifvaZkxqcg/CGm8NbRApplXDA6dqOHrrgQRzdsGXOwvF04HicePv6rbb4EYXGDcMAUedB9AvjBBRRELwnemfVBl3vXjVVAOXNJqtMLg6lJlpzmpmmkel0IM+qQvxbvwX/8B86fFQPzJmIjQE7WXY4NMNuytPVI09F8Dnhu86QLwbH9eoUHgGp2RCE3pvYlAjeKUy5pNEgxDb1redeToGCBjDOe/qLjb1BJfW982y3A944b2/mdCZk4VNhsw18OGwJP3jBzdt7wm7g7hvPudsfqFWEuuNl9JpL9Djr7edqix+3rFcvOja6T1coZCDLDD7w6LMbuD/QnUYOu4H76x3UjirU79PvTMYx1JMrHFJKpQFogmkcCx6t2ntxHEoBk1jHglGRQDiOXEwTswgSIsFPxkdW+xK8x/XaY44kfPDinlfPdsaxMsEgS2Q/erNnGDOPn1wTnOPzz14wTnu881wJlF/cIUETSjXJvrgCOl5ViKoFNO7cpug/tb2LD47xVAPf5RlqMJsU4uzVh43WM0yruWpvxZAL8zQxi/IzO4O0e6DMqek5VBPdaEgiTYijJkcfBhhrU+hdWRIq6wBUZ2qzn2D9MDUyWoK3lfK7ttbhzNd0nnMoI9VMLZ5GaWW5c5+zJnhrSLf+v1QhMK96yEtyRSzhqZW02Xqp1k+n5KzfrpxX41aDk8WCZbH+l6trWz7xbmi4vr/1GP8k0Ej4KQnaFul2W9jGGQAagb2Lhnt3ji6C8wpD0l5gKoe+4BjNubYHWQMy7z0lBFzRJsZTLmv/ZQml61napKgGLDAMvWW49dzzNDON4wO5VA2gSiUlG8wsehAXmKfU+qv5EAw7vgRn7eG5VRXAXvJ+ea0GbzF6MMWylEoL4nAGaypCQXl0YbvRZr7TAjtY6l9ClbbVzXkJbsE2bzyx4p7ttZoBqlmG9omWlavfo89a4UmOD8bCR4fE5TFzBOUR+KUPVAKiVd6cc/zbd5FXfss/7U8mJFDa10Rz9MTRJLSXDJFmmPNhpOs7HS+0aadetjROSzth64+33MMZL64dy4a7PDdMDWv1+UoErkPEMrZLwFYDPpu/uTBPiTRNOIGuQPS+EUa8Bc+SqwkM7Vw1u9eIxfbv6CBGfYZDHynSkXPhZyZ4exK+v1NOWipaSdGmn9KeW6gODBBsQ64ZrSCi4h323jIlrpNnlspB1HmSpc4xhVUUFkjnesIsmTXlKL736Sv8Jy9Jp5Enm8i/fN8jlxe6X9xrA/BcCkfJdKMj+8iZ0tUD46kJ++WZreOH9j60qFMTJlmUY0PRLHjnob+4xOHoNhuGPmggLIIna+bd7qIqqQbv8a5jfHu/NOVerfFpTBTV/GCa5lZtd86Rg8eL0PXOKkLLBZeixegKe8U5k6XWpBS+kFNiTkm5HqvPeuA0zUhOdH2vqpw1UIxeK43AIdUg23RDz3IXS3+mGDxP00wnWm0sq/1NM55VZRHqDqQo44LH8/6bxIuPVE1v6Af+/Ld/lTx6fOiY6RFXeYCFGKHrBn7+V/6I4C9INx2b+MKy9cZjuU3c3sH3vvgQ311wcSmEKDgXcKnj+9//Jof7A7/w5BPeH/aMRXgdl8FRxVfPPEXSaSA4R0/P3m9IqVDkgEMdrbtuyxgiLtWEmSBZq1UXVxdMJ4MIiuP6+pLLywvaomp7qa6ZlDM+KMcjRt8y4uM889g5LpLajjlpYFqSqkWqgKruaSFqoNOFwnscuAteoYWlMB4nZMoEH5FY2Ox2hnao/NC6djSB6oomFfzhFZJmLU6FoKR/W8PFKvVzKXRDx+5yp3yiUyadTpY00X06dp011a0Q2WVNlsZbL0xZnX3nstk87eclZBWccpHQq9ukIlqJoq2p2nwuOS/VXn/AUhbNAa4IGliqD76JgqkzXnBE8Ty+PZA2gvedJSW1pUtJykV0ttZ3pwk3zsxotSR0EObMuO2Ut+M9rgjdJpiAj84zijq6nfVYxHv606xKii4SPn1BuN0zPr8izVOr8Dx9fU//5S3z0DEVoQuejfeUL+9w28DBw+xcE+fwVvHBKRxfiwerUMt5c/SjwuBWe0ZOidNpZDY7cT1s2J8mhrsT2/3I4YPHygu3al5xei+d1wFOWPJOkrLuipBLxkmmFFW3lKJVNynKLffR44NQSqQ/zlzeHskinE6TCqk4DeJzFi42fbP9mKpoSJn3P3vL4WrHfLnBiRDnzKP7o/K5Li/ptzvefvkZDqHzDvEwbnokFzrRpIIPwVBRZx6j+YmLzV1TIvQxxuYz1X6iuref859CUEpATllbawQNgAqWNLE8Xi5CVVIWEy9L86TBSgxILs1mp5SpvTzX4nxg7o68GzgU81uKUWxAFnv6IInd/CCbRxQLeGo2+mwk6hmWsKbBI7WguvhB9b0PbLPymauPLxS8JqzQuR37Dh8jodPm7bHrEJwicASFH9u1dU6aorwDKJl5Ek3eRW/rxa2/XDUYCnQCrl70OpK0AsU6EK4jsA7SK+e++kc/Lnj7qQjacMqfEQSMAAhwOBy5ujgx9IMZeZM8dergBwXFM0+5OcTN+XHubEMWZ8IHIswGfRnOpNqlfW+TUxVa9UoHczVp7TryChJTDw0yTD0qpcXIbDcG71wgG9ttxHeR8TSS56QiFsGro+A1u+K98eTQxVOzJedNfZfxcd4TO98yQDhnPbO8VY00G+uEFfxSb1hp6MWCSFpGaYk1dDKK9yzKjStirSzE2zrJa2BUnchvHTKbSfjmvZLNj6XCOux6azbHKf8uxmhVQs9u6PgZN/AnPkOqgaeRUesCWWLeJrsuWatl8zSTg8Ju+yGqw5Wr8VjGoaa61xh6qX+Z0uCSiaRdebvf5a2WddVXVkOxOqRd/8LP1B5vJc+t0uK9I4ao/EwRitMm1nqKcsa9XOJ+15IGP3Nw7IbA1eVOoaImgpNtXjgfjLMQ+OLFI16+7cil001RnpgQiqOLHRUe3LZjKY3bh1O56mmEcByBk1Y3S0a8aDsMsKrn0gdsnbRYwy+wikHXRQ0Mx4nLWfilFyPf3QyMlxuKG60HmTnqwJ/FwjjODENn8JI2Xd5lJwtodcevnsH6ueq8SrkY9LZjtxkY5w+Y82NC7OjDBdvLPafjl+Q0Qu3JR02AFAgOZ1lRVjAUUEOds2W0SzaBhtCqcbrB6Yh77+3P4iRokspaiITQDIF3dQQ1CNwfTvRDz27omxEnOnrXo9wShaZNSVrQ6Y1jd7jLvHy502s1tEvfn7i6uNMssCU2vpZmfu3FCza5tLYQIrQKm2sZW9fWhS45haD+yu+f+N8/3hFC4MWnT7h9e8TTEToBb+IoFDovuDJTpsSf/s7Xcc6x254ITjgl2E2X3NwIX//ic74xj/zJPDF78AH6oSd4DQLKPPHe9IZvv7rheZfZD57/44lvMua4yP7+CeO+534/4ELk4mKj2eSU6TrILrHdZniisFdviZXCov6JcUscQp4nNputOsxnvGi1MdM8cnd/QnImBMcwdHRdT9f3DAK//OrI1e1Edl6z6ga1xHukQK49ykjklOkk86/5z/mu+xpjtnU6jsROBY5y2PD0+pLtZkMMERcCXWciUHitjpTEOE08erXHnwKbPrLZbhgPI0erJKqqrae/uOTJ06dc7C44nU7Mh1n/XxZKQDVR3iDtVfm4BvZ16a1IA80BzUmTr87lliBTPrmNpy2ZhaejsLN5nhnHiX4TjJfZls9q0a9dca34HQ5Hhtf3XM2Z4TDCMTHMS0UyRk+SgD+duHp9S0mZ7c0emTOnLM3JjDFyd70j9IGL7YY8ZWbnmcicrjZNUl6A0HXs5kI/ZnY3J+ZxwntPnmeKc1x/8ZZs/ketHGdXcHNq4iZzygy7Cy7nzGtfeHU1IDXp14KGoNUfZ022cSowZHDU+/sDKWkblsvLC4beq7y802TW6WrD01FpKjIrRPXyxQ2l74jiSddbpmkGZ7DLWtOwgNHVBGQRiosEp+0EiiiFRWPXguSECs+qamIRrZTklHUc0MRV1+l553k2IaZOESwp4WPg8jQTsxCsEbnfbDVBTObVFy84HjTxdrzaMneB/PSC66465V/Bh0IhvGqanf28wGbXRt85dJ1aQJxNxTfG2t9tcfSdo/luaZpQCSr1ZVTlW8Vu6DzzNFJmDdy6vqPbbZhm7ddY+8Y5zISgfMLomsdlSYvK86sXXjm+7x51vdXkd2v0YL7ceXCyGqtqxnDtDSs3qy2/6l/V9bX4WQsi7IwfJ4s9cQ6uiyOeEnJM+gnzV7Jou5Hqv4gIues4Pf9AE1OIQXY16XqcErteE5+lBqw5M40T05gIXQDpaKpaiLkL9TqXOytnY1Knxppqc+7Xf9XxUxG0qdCCwUOca5vW8XjgaNjybA5QzqVt2I18z8P4nbZIMjSeBEnlaNf43z6Exq8q1ZA8ONkyiNqf7McN6lJ1Wn6epkRKB1uwFU5ZGMcRRppyVCkKxYoRw5urA7a73BH7jv3NHXlOi/BIzW7bPVdBjtB1IJo9cybr7+xevChHp8yZ0/74zugpJnzhQSx/r4bWSvPZGnicNXpeZV/c6uE45/jmPvOL+wJz4TjNzKexcWcWsuuq8mTwAO+1yfDV9QXfTh33neeT5YK1+CTnk2GdtVhiAEdJhVMaiX2kGwZCiNo3plamalbFeQ2SfF7uZTVKS57o4Wt1wdbrsPetxqERfWtPHdvIzJ3FeRUp2W4GRp+YTlP9sFZSLbj3zhGBX7nJ/J9Pwipb7Oj6wHY7qOMtwi/uZ7oO5lwrZJX7F/leueSPjs8o9PRdjw9XhDDQ96E1hQ+hx5FNDdIqTzb5QoBc1OjpZQqnKfOb+YZf5p6PZG/zu5zBiWtVd03ZX4I2G1MRrSyGgE+J4BXq+74PPL4p/M4Wct8TO21M7pxjcvDicWQwZ8Z7T4gGpZqFYs22XPs+g7zYZNdm7yagEiOXVxfEzYbjYeaTv3gKOdFvOrr+OfitVScid7dPePrkJW290pHLTgAAIABJREFUNc9UidRFqogOzdhUzoI6nKX1x6sLL6wCzmDZ+Bi8rm0bK4dC0rLvQVITDDjsjyCZy6trPIXoHLsYm2Ji9pEYgnJlQqfj7HRedduZ4WLi9uaSN3cXOApzgmHbs9v1zNJB7Ilu5vISnj35nPsXf87f/M6f8Ajh4nQildyeuUKPF+7c+WFrrxSCD/RZ+Ct/uOe3nn2Lv/jjn8FLT3HC0A08efpI+1GWwv3tHYe7ERcypTimaeJwFLrwnOwHTuWKTCL2L/lFueX/mj80+GMhDsp1ds5xmhLvl1su84lJPFed55v7wp9uHXlOvL35WfJ4QSeCYyKnxP40EZxjFwNu/oDbw4n7uwzcwjPl0Tocp+OJUjJOhNNpVrXJUkjzSDcMWol0S2sI75RrNx2PzH8R6eSCKXi+CIH/+3OhTM/4R/57XN7O1qTeYLS59ourwltZnQRLFO4PRzYy8a0c+U78gDKrwE2eE4eUSeLpt9eErqPrO2okU5NIVfVzx8y/Id9BRBj6QRUEU2q8G92PHRdX12yvrpjSzP3Na6T0TKeJOWmFS4VEROF4ttSddwalpc2XWtQNtaGxJRbLnBGEXDyQ2jpvQaDREVRxUFekC4F8GplzIYo/42er4wnPnj0j5VoBt2DrB68YXt0wnDKbITI6hehPx4nDd77P43/lW8xTJk8j/N53eP52z2lKHMvSW7UlWbzn8WkmFs+ORMBxf79nnBO744S3gAXg8bOOrThidkwU5jTj+o7gtTdintPCpzbb0oWg9tlVmGtg6HV/uTocuHm8M7TDMjbeewKOEEygxGynonZUECkXbTPQKv9BxUL8xQYZetyX96Y+aBsXjqubE2F8xeZuyzxOHJ9c4732qE3BEweDbkeYfRX/SXgfkWzBTWeVbuPjeqdms4uZPgjTqEGrCnss+8pswWyMqizprTdn10c6U2OepTCdRtIMIwdtI5ALYx+5v7xk6iPiHRsRVYGuExULNCwp4P2SpAELvMyHOS+rCDEEai895wI+KGTeh0CIWl3LuTQ/M8YIYloO2gxVg9uUrLKrY6LIA6EfIn3UIMIFxzhlVTK1y3HiyNT9po6XUV5spdR5vw4m6r9KqWiuYrztJfHv6xsdLfFaRP0asbkmInhR+2PIfKOjrLwsEUrlV2JB/kOTITQ7osJLRmmZMrsvX5PuTjBO+r3V/ojyx2tfQVd9183A/Muz7V069vl+T/r0JenZJX0X6USxG+IccyqMY2JKM0Pul7TjqmdB8wGtqKFQSnubnFfVighpzswpG7rphx8/FUHbOtfqvEIrwur3qagEMSlrZgEhFWDO5mAtS6lmpOt09KAKjFZhyqsM/lnp1f5fq2vnzQOXtbdUA5b3v3t8ddSc87sP490HVCuNC0QxOG1UXGFGpYj1WMII9dX2GMzD1c0vU5LDR+uVVCfPPOP7zq6/Bl3n99nuZL2u4Ux+XCwArlmzd8IXqUtOF/BHx8LPvZoYZxX2GMfac+5HH/VZTJM2VL9+dMVVHxS6kjPOq+pXzdpZ+uQsu4VllOq6kiLMUybNR4ZNz8XljsPhSJ4VmqZyzgrTreV+11YcWj2rX4HCaLy3zVhKG4slo7wmpqrISsqLCMf5yFU+pgZGVWWqlEytU3gzFi54snc8nnUDCCGQfeTi+pJXL3+Oz75MFiQ5flBUZMElFTDZbgY2G4XLEno617eU1q5Xx0vFGwo5OXKY2vUdzy/XbiO3F/qgG1v2W9LFM+L4JTnNVFhki6/b5oU5XIuoyRK4LYtNQqD0PX7Y8ufDR/xe+Ih4ukJw/NMPbAu3z03Zgv8EY4Lb+0IXHYfjkd3mj/FhT/HRpMcDzne4oePySST4wBcvPmIeO3yaEf+Mi/iE1AnPvqHzQ0nc5tiKJh4Op8zps5/lww++A5RmjIoY7t0pKKsaeBVb6un6TmVTctEqhzl4mpiy/c/XSoKzRIwnF697oUFVs0wK7UggORmS1tMNWbOL2bMZdtDv+PLNXyX0A7vtFp9CXWzgI65kcjpwOk5MkyAeUh455pHYQfAdswyUAmNwcOgo09f4z6Y/4O14YkyZybLGa2jkWmSqPn9dIHUtKPyuc45HN4n7GNjfCU8eR0KANE+8ffUK3ES3eUrKyv/tNleE0HP3xQ8IXc+zxx+y3ezYPdqSUuLrl694ensgfhbo4wYfe7bbHSE4yEJJdwaRM57h6LgIAdnApz/4GsIGJ5mxZGarzpR5ZkyFMHRc7rYa5EsPL5/TX94j/cSYCofDDUMX2DiQaIqy3rO52BFiwGXlbWiCy+NcwbvC5s0lh9cK45+joi+mvfb9Gro9hynRhdrAW/ec4J06uiU3Z6mUwnS0nqbO8+9+7cQ/32z5o1cq116SCm9sLh5xsdlxOsyMhxFtbLzimLkC4vDTay7ffp/bVGCcmWdNpCx2w5Fy4tWLL8AlfIj8B+P/yq9v/h3mcSQlg0W7wjRlQtR1EbrVHNR256pwnAsUTWz2XcBFsQAyk7LHk8jo3punURO8YeHvkJUl5lEAeTHpelW8cc02mKYim80G7yOJTHeY2H7/S/Y3d/RFGLYDKReK6DPL08j85g1z+hlO00xAOL14TRJITiGTsSZJBFwM7HYDgmO/PzGOiT6CzIkggrs7qIhLAXxgOKndH3NmTGr/x4NCTNd890W9r6jQjHF5fAjELhisUOFjjz97w4sPHlNSMZ6m8WpzggLOKAliCZcQva1PUbSEfWeIHcPWQUp03vP6gyt2b27oYlYOX4a72zu6Y+QxDp9mnh5f8vbrzzHHDSQjpiDpQNEfKfHR5284bDo+j5By5sJQSgDOyFiDTEQf2E/TKvmnLRXC0LO96GDOzLPO0YgKqxxPJ6Z5Voffwemk1bi+74ndQNkFTh88InjH1vanruvRYKYmVqk5jbMjAMWxcMnA2odU2+/atToURZVnpRXU5EjsIs6nltzUPVFRKRroFCQVplm51BK8Jmu9Z4i1BYz2pAs+qACV91b5cxSpAdySkHec9xN7WJjQoCIxVvuDockMKaOIpEL0Dh86nH2nJkI0uJumRNV0UL/Q/l6Vn9YhYoiefrM1P9LhVtdXg0IRIU/ZFF8DKRXEz8S7A9NhPPe5pSKdZPnZ7BPHE3f/7x9y+dd+yV4v5NPE8MUbum2PXGz1XpzCeOeUyTnhSu2fmEghEr1jnCeDNVeOujCNs46V+eWanFSaipioV7w58Ojtvml4/LDjpyJoExy+KTU565GgTXa3uy2XV5ekrBkRFTOwlbIi+z84YYvgAXCOKEIaOvoiTL6QZ3WE93Ni28UlaPEOyqLg10754Gu+Olj7y7/n4fsV5qD96LyHlIpOxIOVgUXlakWcQY1qdqI2Z9ZMvG6aYhl4AZM9TeNIcrAzQrfdNY15szK8y99LYF2kIHmphtTP1aBR3+yXf1pWocwJ3pw4vtWFtPBbVlexGnPnlk2lBnY5CYf7vTrLjx4pr5Fkn9ENuOHuRYzv2MKntsPWbGwNbhWaOrO9uiAPhek4qohLFuZxVCeo3Z2009VamnOaKQvRI6Iy/TarDdZsuHPLvokUcqnVtoqFXu7fmbBJMaU3h9k3WLiTAvOUmchED30XeZw6snR8On3Mm0+fk6YJHzbEPpr8fqQfevChZVAPpSCzUOaM49Sed5GsWUDLF4l39KWz+63GpxgUSshS8GWpkE2AOHUi/0X/da42X/Ds+BLvC5Q6P1dNxakZyzqPl1rMAr8FJ4XXu/f5J+GX6fue3eWlZsq85/Z4VDhIwZI0TuFozmAfnUOk53gS3r79Jk8eP+b6egcENpsNzilPrR80I/9zjxcYslbiHOM48vn9S3IpJOebcl4VGZqS4/Hjx3zyg2/y4fvfo4vWq86BF1NtFcPhW9UAXb1q/LLCG6vAhfbcoRnlUgriOsax5/Xrn+H2LlhFPoBBwrwJGWl7EeMEfxnVWRS4evSYwUWGrZDSxGF/QCRTcmzr0nuPj4U0BQ7HIzZxceKYszCGoFBXmTkdJ+4Pb/j3Nr/D8fClhh/tmdUKek2sLeunChq1faDu53YNuxC5dh0udJxSYethPI70sdMWC12hiwNhp7CllGa2ux3X15c8enrJ5ePnfPMbX0Nm4fFf3PHEvebibkscHnH9aMujq0eEGDne33PxxWv+1fjS4ketTHXF4aaBnDvmLPRDR5mTOe26ZpDCy3HikAq7iy2XFwOXg0f2V7z95DVfPH2B6xMSHPFyp4iLXPlDRZOQzuNctL1A94acE9NBKCUgXvulddHjSuapnMjTTG9l4cqXC6aIeLanmkjTODskJJ48f4+td7hXP+BwuCC9vWfEU1zg8mrH8XBQTrEyOW3f1L0MoHjhP3W/wdt5UhimrcmUdEut1XPnHcf7Iy/mL+i7Dhc7prTXKltRaHcQ1BFLkIrHl5nQdzhURThLRtIyXTRZpkqLlCoF5shSUB5TwVC8hFTIBvmm7jNeIZ7zNGtmXqTZsQZJc+C8ENLM8fUNm++/5NaSw8NGURmU2Ro1O+URjRMUweXM4TSzLzpyGmxmprkQYtCAMc0c9hm8Vv3n08gpC5tNT+cdfRcRcRyP2lj7/v5Inidcmhm2W6akz7PrO2QsYOq4a0U/QfB9Tz8MdKUg88Th7W0LamIXTVUafFS1EH12We2581rdjxFTceDy8TUlJVwI9JbwLUWTyTInZu8p5oQq1FHI04gPgc1WHV7vHX5/4uKTN7x9ftmQRlVFmjnz5NWeR8OEbC+4HA98cDxx++EzuNrggxrBqoD95PNbDqcTh5NC/2IXta+lcRddCRQnFO9JvdJjRKCkmpA1SGZKbfz6GDjtBrwhNirb3zlhtNYrIoXT/nSmkKjjoXazmNiZJFkKCtWu+0LzJiwAKKWQx/GsULDWCHCnUWGhc6aLHTjPNJ2Y5oQv2hojCkt/0NV5nPcMXWQ+nnBOOeneBKvEDFNNWKzbS6wPZ3Z8HkfE6D51vIpRHFSFsyav9T3V3xILatb9ORuqqn7fV/jKPgdiZ9y/+pbVh1QxUmzcpQWDCjXtoRyZTycKqjxbkzNrzQhEZYS897gVukxS4f43f5cQOqaUmU4jJ+fxTHq9p4ltSjx7eWAaM4dr7b86HUcEYXuxYzCBw5IKMk1IzqqGOxWuPn9jhQ2FL1+GoInbGJE08aOOn4qgzTlwQZ2OIg4nCZw6P5cXF8bF8vikzT8RCK4ad4v8/z/m3uzHsiw77/vt4Qx3iIiMzKrMrKpmd3MwB0GiZZuUAPPFgmFYgATYguF/wK/+8wzLAgw/kDAsywJEk1aLk9jNHqqrsiqHiLjDGfbkh7X2OTeyhqb01AfozqyMG/fes88e1vrW932rVOMHHh1ctT9bKUU8mRTlWBxukIe4eNSU91Bg+MpE/rqJ/U339R97lcKCklf6oFVMxFArFKKpqzo3Q6F4SQJW+wOWBZG0Uaf1jegFrIGUv7JevvV7FyjmMVVwGYvleVQK1Prv7Wnm+68HFXuWrx2/rybJj0WZskEYpilwcwq0GwhIEJz1l+rnxhgx08Sadtb0o6571cHpJpBS4Xh/pN907K/2DOeRQmGepAmzU5SyojXrLiNUr5zFnapmVGZ5zfq3Uup3RB2oLrVbSpkz670W1R1SNziEOoQxQqfKckAnK5Wi33ud+aP5BSPP8Y3n5skTNn2HdV43PhEgxyjBtDxLSbxqWX+5q+pkWmTJVRGvq8J4lNakwaa1cvg7Jz3ScszEJJXR7W7HefNb/E7scWkijBPn88T9/UFgyeVTZQOuh2V5NElkDfxN85Q/9L8JiBgcLKkE2RjthTC8ZOkBVAquiBFPHBP+uuGDZ08ZzrL5Cr3DMI8jGMuYooIB8tkhgzGe/X7D4XSkWK8JdcBYT4qyLk3VOeZEComuu+XnPx/54NmneD9KoKGs/5Sh6Hev86UkSa5SraarzsEYs7ys5EzKlvP5BafTd2m6ln5boLBYo8cimHU1n8gqTqcImjePEwZD2G4Y5gjW4a1nPg2EPOiaExvzzXarVD1xi3POcZ4mfNtRTKGkifM4QCl8FL6gjF9wmh7kgF56qa2rxVqrdJS8LnJ1EBXDh7KAKSln+s+OfPDZW6L9kN3TK6ZxQnxxIr2zQJJ2C5OALiGJrmgcJuYx8NHtLXvXs28i339iGV8n2qblycsP+ei2w1rPMRiOpUhPMe+VqSDr5KPo2bx+jndXGJIYb2TRwC33JtA7KUVymnnRBn7lRn42bwp/9u4Wtxvom5YyGz6zhZJEY3Y6jWp+oxXyAiWJPms6F+a7hpIMWEuMkZIEyf9v2r8hxUjbi+ueon0ydIU1gDKC8soWkolz4Pj2Nee7t/yOd/yb+T9hwHEOiauba5yBKQTEjlx1TRpcGj1Yf4vPSXMgxUzTOC2QVmZI5RMUkW8qhdqkhE2ZX/c/5V+XD3R9Su/J5SxImSFamgKYFR3PKUvZwtiFmiemNUZaaaAJWSranLasRg1CCxETqlLUOCkKqLY40a1TUYAUy37f8f2YOZ5m3tVYIBemYSTPk8QSFUQyIquoeqCq84kha+Bv6ZyhayTUmqYg59M8U4pZ0Pa+9cK6KVoRsdIGYTgeySLiYlRphrUO13hMCJBlrKwmUzEECRST9Cp1RjVhRft4qnayOhSvmj7Zg4pBKvQGjLPYLIYWXdeSmurcucZHKQlol7MktqeXt7R//RneqzzDWqKyhJrWy3x6947NcGS83ZOc4RwjLmb2bw40EUrbE+O8gLA3r94yd56w61UuAd1pZAoz52kWBgKWEKMCp5BjYjxPlMbwcLtj2na8+PweMycFP1M9VhbtfQyRYddxuN5ADMvpXVhdbkspmJKZ5lmTHEMto4rb92NAOl04ftREWfblsn4H6hLOXxNbCm2utivyfSaOM+MwCQPDqw46JNoW9Vdw9F0rVTkF9je7zUIXdY3HlYh26GUVz3x9XGatwyulO8Ga7JayaOPlm0o1rGol1+pv1UlevL/Gd+VyHV4GQBpnZWQ9VyD3K6jUxe86K3tg7S8rJlCGqC1OjJXnYaxQrdf3McveJRMC1R3Lw2jPM+ddJ5KB0yR+GO9ONCkTjcG8fmD75QNunPCzmEy1bSNAjXe0OdJneb7VnCTEpBRoQ9s1oq92Fpx9rL//muuXIml7/3osyssLDcw1LaRI5dIuz9g8AhceBfq2FKIeaBLUXLwO6Nxq1HmpQ3r/Mu/Nrb/dffztXveNn5dXB8RS5FCw1pJSpDZU1E9aqgF1UFZ9lNxTRaCaxot4PiV83ypFTzYm2YQlwfWLFfW6VuoGVhMR5yw3p4k8hYVqcbzeYFqvfsxrAodR5PHRPT5Ojv82V86JaZ754N1A17Sc9Ns/OoKNIKx914G1gpDlS6fQCyMERcAwEiBMp0ESAKBtPdvdVjjO50GqGTU4ySu1sSDBkS1Gk2cdrwtQwBizUCplL9CnUx7PV9EZaLU5r8Ft0cZUKQsVNMGS60ica/mj8iv89OZXeN53NE2jPX7kSrmI9iRLYJiXCoi8QXWJK2XVJ9Z7qA2jJYBnqfBiHLa5GHlFr7M6ukZF4Ixx/FXzCf/V7cSNC9zfPRDil9osdkWIWT91GdtadaxAy/9nX3KaIjGclVvvaLseYwpjSKQs37FpHFYROIMETg/ziRhn2u6GgiWEwN27d2IjLhEbKUehBalWE+PZXj3h2dNrYuqJCT64veLtXSGEWcABnXONbzHWcDzP3N5syfman/74wPMPj1gz1SGStek7cd6zVaOhQnHVoi2jYFbROcby5s0LYnzCk1v73j4pQWvJhViymtWgPY2S9GfT5/P27Wua8w7fdXjf024lAVrBAw1AQgTVTc5xZiyZrmmErjWNxJQ4nU58Uu75b8Mfs5neEZR6Vg1H6vz3TbMEcNI/an3c9dyuFXKZQ0IFQvu1eeeIzoGRoHc8j/S7PU1tLO4c3ipYE2fOxwN/+dd/Trvd8fdvDObLf8fpdOIf2MSfTi946QZSnumzpTcP/L2Xid25ZThJP0HnG6bdDU9vNtycWx7uB4oF6y0hZjZ9K420U+J0mnHO859/r+fFfm002/eFf3CVMeYaY4QOvWfkB+aeeZ6Z5lnBNau6UxkWbzPpriW9ayglAAZTGoqVICaYQL9phUZVELv0VDP7stCxcrlAoIsEcEMRR8Gm9ULtCQHnG7ZboZHKdxAapOhA9DnqXvn75c9I4xlpGSIodIkGqr5F96zF8CpKUuW8578Mf8G/Ks8WIDHrHJE+hAlDJkxZzx090C2QJbjMOfPd9CO8hbtyzRf56WJ7vsTGxmGsDKTonFf9LEYSuJiS7KeqC7fOYp0hxyTTLUdSHrEp6feXYK4o4IWuR8Ec3LI/SqIsiUrbCh3RezGe6dtWaFGlsN3vyMYviWtBXCgZz0zngahni3WWvmvIzlC8MJHCPEMUvZ0YI8h3aPpupX0ayNqo27Ue33cY3adMzjS5sD9OHPYdKWWmEJmDJBwYI1XDkrFJqmfOu8XQBAON97T20qxtraRbK3QvtPH8dDoxpEzXOKwXnWTB0I6BZo6kTYOJmZs3D7ghgiaG5Eg0UjVyvqH9+Vummw3T9RZj4MnrA6Uk+sYJRRhHaDzzFHQaGLrWErYtPL2mK4Xxwxu2P38ne1uMy35Y9VZhmnn34R7GUZJcLc1YI03UK3BK1fKj4ECN9S/+93XX45zkFwPXsndKTFDHOYbIOZ6Yp1kMhIrIKEzOlBiJIZCSJPU2yzngvaPdbyghcj6PhBCwzgkTaf2kr//S+qMldlL5TU1g6xkkVOyCwyz9fut4mdq0/puUMF9XFfnKYJT3ozxgjcEk1qz06tXa/+vu61FlOq+tDGRPkn2mBk6VRVS9G7rP3jHPMyEX5nJh1qdyh7Zt9L+Fhh2D9iiVRpAr+yQXjHV0mw6vjdTRJO6reu/H1y9N0iZNqgveOumhXAPALL3EnPEIqq4buoW2qV3ldSBYNxFyBudIxqihgFCHjBOL25wyxIhvxRZ1fbgX09jUhfbY2e6bEIn194TKtHCwzbpg6++tAb0iX4pOpAVhr4iWXfjF8jMrqIFJODUvyEkW59q/7fGt5JR4+uqe0jqOz2+W15j8+N5syux/8iXDtiN++IQLddE6PrpDXc+J/cNEPgykmHC+JUwBczzjGoe52vJwu5ekCLi+P/NXdlpoqa7Ab7Z7KsT5TdP0fRqlzO3Mn9iZh1LpO+h7mIVOZozqtxrP4gaog762mFj/r+jkyUZpANrYdZmDMVMt6tfNSMeuJj9F6A6Veli1WsDKJ8+Xc40loa2XiNAjcQ70250EUFp5cu5y7kmwV3Ub/1v4Fe5vv8vNzX69TyCn1aSg6CYdMqKTLBZLJn1Dg08Q/ZVU19QASF8q3mGJgsxJgzT3XRC0pIBDkqDKec//On+H/+n2FfM2ELOCCkvirhvdI/0Til7Lhj3PgcN04DTPdFdbhnGmaSNtW7i5vqZxZyY/choHpiDfpVJbaRu6RpBoSmG/3/HZF2/pd3tC0qap1mCKY54ncnZgsqLUmXma2LQth/PEpu+5ucq8uzsQCIRYsCaRM/R9wzBMwIa2a7l7t+GnP3mB8YHnz38GmvjaHLm56jkRGYdZ0PmUF+pR1Xpdrs/PP39JCE/YbltA9sUCmCLVzcs1k1UnYK08Kwl6NXlPmfHhSNfN7PdgGukj9nCa6DpHv9nhnaeQiMkQi+F0PjPHia1vhfpjDU3bcGWvuL3/lNvwTqtNF5U0pALQdZ0mLWubkeo2ZRRtLYpwGsF5FEQSPQS2MiRkLrYaoArFyeObVqnMA433hJgZxyP3P74Tw4VN4ln4KWGe+F458i/ffkZ8IkdfXwzYE2dX2I3z4mLrvGHylv2V43e3Z/6fsWEaZ2m6bUS74duW1onhyO9+lPnOrVVgp67TmgyzuOB9nx27KfNn42s+LRMlF5wD6/QoLhBipsyQYydjZFG7fcOcEoFI0KSg8Q3WZjWAWOdK1oSg7oepQDaF1hj8pqdtW/rSM04zTdfhnWeaRX5AgVgyRE2IxElD9ZiRXLI8g1IocwWxyuXRuQBi9Tkqh+kR6JdLwZX1O4cMLatRkbWe33U/ADvJZ9nCLg5gYF88LxADgJGGH6Tf1N/JS2PmoiCXnKJVz1118doeQPd+by3WS/Ipz06qG5a199ZSCWalh7V9Qzmcefi//g3TIAGx13lPKZLwxMQwBTnTvWeeJsYwaNImumlr/eL8WSuFSQM73zlZtzFCEtdpSsZ3W77/mx8zjANvvviSMMZlXEHogo5CVPS/ArMGaKcA+x4QQ5NpGEnF4DpPTpmQMq0x4iOQNe6wQuWtNvbGGDa7jSSL9YDW7xBDZLPdLDb+JWem8CBnmAKb13cn9vdy5jWpiN+AAesNtun1LM0452kJNIczB29wYybO4h49hSCGM7YsGmHrRG7gvCPV9kTGMO069qaQYxJnwJw12FZgsMZOKWszCAUejMH5mnHpfNdmzku1HZbA4qvVsjX5QsHX+l6Pf85XrsukIGeRCIFKCVRI2iBrO6vhRhojR62EN9bi9lu6xpF1bI7HM+E8XoYd33pVfX0N1KojtSEvawMUZhEh9qPWEEb7UpZ6Q/Xe6jeoieDlzxXBuiB6yhO5cC1/ZFZ2cTOxcRxudpiHg8ZxdT8RydECLMEjG//w7p7508/wHz2nGMP27/024w/+itC3GC+mh28+ekLz5oHNacTYRjSDTs27XC1yXHwZpUDPSZ69y9IKLGpV1bnaQuviTs23P5lfmqRtdRS7iKjrIW5kwlovgv2UEo6Ma1vMNBCmoK81GmhrGRIwbbNQw8TAQaxOTV7t4nWbXl0Y9ZL5o9v/ewP5TZW31cZaERrnpDpRk0nqIbVmVpelZFd7XGB0Ia+v897hnCBV+6stbdtKIkDh/s29IqKCuJpc8Eq1unl1ZkOhS54nnx98XTgoAAAgAElEQVSAwusPr8llFgevIij/zc/vmIeZm5hpZvm82cLd8+sl+y8x8cGrdzQqVp6nQM6RFGZSzLhS8AX8w8CL04QI4YvQdq432CzJwzDM/HF4WHLCWjk0Bj62HbdWewstFZd1rGPOHGq1CaP9ni6SS7NSVZx1jx6W0Tf7umVh1H43F/BWbKujSIuXmbkE06yIDVnqYE+/uKPJhXHT8HCzldd4qyJg6VtUClhTsCEDSsVrG7X+TRhr6HzH1dU1H3/8iSS5Oi6JgulazKwiamuwbccfxo94tX3JrfZ7SqmIxW9MRJNQyB2AmJNW+AyUQEI3MSO0HlfXTZEDsDgjwVuRgCvnIu05qu6uZApxnc4XA5sSmCw92HzjeTAbDqlhu92zu7ricH+gZoFWtRPWmTXRqhu1sdKfDknC4hzYNq2sqSzIat82eLfnXgW+796+w7WN2DAXoR0aZ5nnxDDO+KZhv2mFYpJk0w9xJk7zEoSWUgjGYhi4O/TcXm/oXOHLd/c8udrReEu0hlAkfSVFxikzh8wYIsVYMp7z6ITWG17yySevKSXhMDTWsPENownLnMpKk6x9zPptTwyRMEPJvRgLtA3xQgsIMynWoVdHRIkEZY4YQ0mJVKL04NNmwrJapO3Dbtvjug1Pbp6w3+8w7UbWTU6M40DOgfRupuvEhh8g55n+/m/4x8d/RdAqjVgmy27rmo6uayRgKJmSzSOBvu5+y7qGi/1Ok//WRjxpCW4rTdkuAIbM9eE8EueZ3IoRwTwVpiniN3tsOUGYmM4jyVj+6fav+CL82hKIO+8YYsuYDSlK0+zbJ9fMm424dFr4/Y9G/uRVw3Ae6RwYEi6PlFL4nQ8LL66cNsT1ut0YrfjIEWsR2qWj8GJzxbO2J9TqBiz914omDZ/3kX+3sxjfUHCEFIkh8o/MT7guE9PoKDGRulYHT6neRahvOKmOrCeqBpTSI4Ku7+jMhl3p6NoWMaNYKbpGdXsL8FQrD8jaSBimcSaniCmq8SsVMFuTMsMKEj0rD/wT++f8L/yOJlIarKaMJ9KQ2ZUzv2X+gmwcpRjcOEvVNYg7ZdSA35hIUwaMMTQZ/gv+NZ+Vl/w8fULJjlXisE6udV2jzAvRfpacCSmSEzSNAbTa01iM8xiyMB+0KtN4MQCqmuQ4z7hDIQ6jVC63G0AMCpwV592s+26/ERfFfD5LgtI07Dc9Q0jELA6YsiqlOjEOAwWL9S1d2/DJ974rxiK24PsdeTjz+U++YDrPazCq4x9TwitaY4xZXP6KF3dJWwrTOEPSPbHr5Pi0Rns4miVqdk7cg+u1MIAuPlOSXLBOALZER7fpOQ8DvunwxmglPuMMWNV5u6aR5KiASZmSwPhCjqMm2JBpyDmxf3Mix0BCwLQwTAyT7D1N1y7P3VnHlDOvn2xBK4SUwqtne67PIwUF/AuUIkCa3E9hNRKrG6z+8Jsyq+VnhsVb4L2rgpLy2q++3bcVAer4VlDmUQGhQPEer+60LbKXkCImRWzfid5eK0G+8XTG8I/eZY7HkT98Uj/g2z9/yTPXOwJTWxy8d6/rj9cbpZotVQeGelpdJE51QDTGuoySNdzh64zr1M8GYSUUQk54a+jblv0OYm4evf5xUnUR28dEmYXJgbW47YZshN1Qk2d2HWH7IbPO3eevHpZ7fv38GmMtT794kDlUkP3fRmwWUyU0VvXeqKlcqds3p+ue081WPugH3/wofnmSNlA02FKy9j0zkIslhUzTWA3mkh6MXntLNWAipRi6ruP5ixe8+vxzpX850jyTm0YW/yXdUD/TpFTl1ljDVxK3b/3OC+L0+N+tffwPUvhbk5KaXFwmb7Wa9ljDVR69plIXm7Yl58I0z3SzbMxtSKTO0YaIm6Ephd3rg4iuYyK1LcFGvNr/3352J8Gjq1QlQ8iV8mFpWou3DkLkxWd3FfgQhMdU10sVVlpP2zWch8RwPIr2ZA6LY2VBbGv7TnRRxVi6rjCVvLpnKgpkDHyaZn4WJxyG77segJ1xeAyJwmdp5hwd/dSAMQytBW3MPQ2zzKUswVwys9xj0yiymS7MSeTq5kjfNtim5XQcxDp302AoNFYClIRZ+qjVxK8bRSzeniauRhHfppzxD5EPThMWuPvwinHbUTlg2xDxMbF/fRTXNe8ZPnnKsGlXR0EqgidjLvxoz4Th4fsvePrDzzHGEIzn/04v+ff+JU93ezEumaPOl6TVlkqvlQDZIQ3q0wU4VRu6OuMxS8NyB0Yr36kQSw0uV0RfAnCLkSNUPkfnfyoZYQ2J+2GMidHCP5+e88/832jList1Ypb1UfLqnlqbVgqlQNaH8w7vG23/oVS+nIhYjNop99sdx9NAKxG2VJ+QIHkcz2y3nu225e7uhG8s4yxBT1I616IhbZWumCJfvn3L8HBH0+0IfUfXbwhhpkkzMTrp+QWC0s+zuEs5R7e/YhrODKctX3z+jJvbV6TpxEdfZPbZ8sebVctWDSpkH7G8fP6SH/3oR3z5+iUh9RRjOJ5GOm9IxVJKWoJfa9WEyJhFB6xLa7FQr8mNAXLbsb9+wsuPX3KOhTxN3FxtaJpW9YKSJB/JnBrH1LYM0xnNlzgfHvif7/4PpcOu+5dzooHo+l4aIiP6R++dVH0X1m/N4IzSbuqGiQYJln9ofsZPzUteTbfELIfqXOee8TROrMiNRZo6F0MKiU1TeHFj8T5wePOOYxxlJpWMKxnrezatJgDWMG22vDkeeX73OSlnHrLl0+0TpcVk2qbwe58EnBEQLhdDLnHZz0NIpDLTt1LhMBcVCUl+K7VaAvPe92x7cxGnyHwXWmtmvy38xsfw128dP74znM8DPp65KhOUREmFaCCPI945xHDGKD1WxPWNt4RkFgpQNkLfE3DT4m3D85uOlCLjNFBSoe02ZBdVnyPrIGmA/bwcsNOJcZ6ZBzFCqDTiy4RtWddFtr1UpN5oc8aahDGFtm246j1X5Y7DeeRl+CFbN2NyJruGppWqwqAOeWCIISFN3a0aHqjNunM0tvA98ykmW35ePpY4AqGZF52XsmdlruwBa55gLnD86n9cCpjsMMXiEvjGkYExSCuO7W6Ht4XxPDCHQI5gcGK2srxZkfPMWq2cO4bToHujZbPbUoDxPOC6nqsPP6C8fkNtiCrGDkoYTQk04T8dTvz4/NcY6/nwo4+4yfDTH/5wsbyvlSNjhZ3grJX+sI1Ud0oWCUPKhXYM3Lw7MUyRuWRS12IvtLS5iObNGiNNi2PGINT9altvK5W7ggYVNC2yb4zHge1uhxmDmGY0Ts5U5+SMyEIzNVloYc7UdiiFnGbmkLRxuFTqwjxhSiYZi3ROL8uYlSIavlLkfHDO4hrRpNvGa6sIWauyJ1atrgJlphZ8FLxfHubjAF9A4q/PcGQPNo90bJfXJSBVY4g65/5DL2OsVMhbDzGRQzXEMmvTbgNd34r21BiK6s//08Hx8XbPW9/Q9cJ+Mt8S+Jbl/+BxdieZ3KVu2ZT3ftGsvyMJmjqRXiRmOjiPA2mNex9hwabaMNTYSKiXNba5/ODTpmG83rIJArvXr1JKAWdVG50vwPd6O0YBdChkzruWcdsqjHORdFuJbV59cqsg0nKHfPHxk/UTU2b/sy+xNc5VIAtnQffP7AupdRxuto+KPt90/dIkbbXZnliFmnWQ1WK2FA/FiJ1vq4FtBtdYzGyx9rG153LjuWCiiLXTxcS0wJwSjdVGt9YuG/d732x9qBdXrYL9olImXKBSWlZ/VNZ99Gf9fPkz5wtKnurZjJG+QW6O9MNMexhoMXhribdbdnPGjJKoWE3ukrrfpJRo2mbp5RNDYkqTDlc9IOSeYkhkdZz0Kjh3GJpGUMOchMfuVIBNMbSdJ4WGcUjStHqWKlHbSlUu6MQteph32w1dNVGxFoyIbnMSJ7RE5sfjTAyRW+PpjSVQeGcT28OIGwK29fhtK1x2Y9g4RaSOE/48s725wh0GcipLcCi9siRBjCHxZE682FxzGiIPx0AYBw4h4fHk26TPoqARKPtU6GJke3cmR6GmZmdonAc18LA6xk/eHLjPhWHX0Z0mrt8exUVQJ1Epme2rO47feSp29kkPQLVcds4uVJuC4W1n6RrDLln+z/lD/tw+5Wa/wVrPHJI2GIekzy4XizNZkuVKQ1MK4iMLmpKkkmekcpkpdI3HtY1oPXQzsbWK4wy+umfRsCBvyFry2YArRE1Wcyk8HA7czZE39i3j+SwaD9ZAz2iWKq0c1mRTuOCGT801R9vhvF+qPSFGhmHk7u0bmral9Q3jNNFvew7HIzFU11XRvzkjWi1hIon2M2eDMY4wjcSYqUrATMHmQjHiRta2nrfDzLv7M/M8sr99RiqZtusoaDuEkvE2Yawjx8A8jUCmbTumVLh72DOHyAc3P+dXvxj59KqBrZe+WCktbSAW37JiGIeWeWoxQNvUoMzQOgvGXTTjlTWEaouEDitr1HqLweGso9n0uq9lxuFIzonWOUYS05yYw4g1QpksSRqTxmxFfwc0zkoSPkzkVGicoVhJHNq+Y7MROqS1jjRJNbcojWYNjssSDSz6t4vpaJGgvOsafvVZw6afKEYgggz87OD5ZCdNtFNKlF5McJyTQPXFleV6YygxMRyOlOyoNYx6+bbTykahiSe6MNDvdozTzHAaSEko1jiLNw1Lv0VNQivBjhooOUfjvRpIZVAHYDAUq1RJ1Ray0KuMNDGuwWQROrxVauO2lQD5o53j990bnp1H1VH5BYkuun5yEc1vKQKAWmskYCuqeVZ9+DhMvKNwcA9Mdss8zxzOZxKWGw1uc5J+QSmLTiqnzN9Jf8F1PjCOs8xTrQyuz5KvxhsFAoWcCnYYOZqBUznxtH/Ds1z4qHzKHDM5R0JCXSNlXGJUTayTpKNpPDFI/60QpRdh2wpwVwG17/JjTJ75cfzO46AQ6NKBl/kVL5ov2TS/hzVqPVBjaW0bEwkUk0k54YDeO4Iv7K73WIquadXu1LxP45dSxPAL4+g2G0lunGccxKZ/GEa6rmF/tWM8j9pAuuBSEmdSpS9LX6eMyUKHnIcBZ6UHW4iJuzdvONwbzspmqfvoYvGP9Kf1zmJzITlLt99h5pnjMEEubIaZzWkkZjjfbOHGEFtpT1DNKRbaVsnicFsyphiKW5OQWpGFIo7FMWGdNoPe7mlaTwzSUzBnaWlSkjgsUgrWC5BYEXOxpHekGKX3XcxYAlOQ6lE2TvXeaoSxzLWMygbJKZBSYZ4DnVfAhDUvkGK0ppniYIMxme1h5HS7XSZyoTxORC6uR7Prb5F4Pdbur7HmtxXwllebFXgQjbCn2/ZK+Y9EZykxYW1ZYoDa+kCKppW6p1Q8a4U+6yI5Jo3jvvl7VBrxZdEBvrrcH//OOoqXA1Rqa6Z1YB4/mPJNm0n93KIJ1jcNmuzH531Hc2fxMVGJ2TXBF6aRJpulSAxgV9OUUsBuWroXz5jM4+pejd3W0+DR/13kngKUHa633Jwm3vshIHtF0Z6Ya8L27ZPhlyZpW7Viemg5WTB1oqdsRVSrDnHSy8ep6BoenxoXD4SvGmDUa07SkDbmQlM3cFAu9Tq431Tu1r99JXH7emONle74SHf2+Dc1qavmC2XdNFkttF3K7N4caYIIhwMyF9pXgWjE7Wk4nLH+wiFHEaUwzQTjkP4osmmuYvUiv4M0XJ1TlsaUXg5y13qaxjOMIzlGkY9nsCYxnGc5TGehqvrWkZIkLrnAPM9LQE7WviHe4hptKqnucSkHTIk0fUtjDKaI+8+7IlQGQfHVGa8U0hBojme18xV0UVyxYP/kihfXT7mfEg8PpyX5ds6y3W2gZMZxZpojb8wd4zjjnWGz6Wliprw7wfWJ4arnw/szZg7kkukyNCVTkAamzjl1d4uSYOlzLkCJkc2XD7QPHh+ilOCl9qD2yBZyYff2yPHpfhGTv/zoYyrNN6fV9vZtZ/j8iefu8xf8MD1l13e0XUcuanNcNzoNpDIWwYoTBTUuuKBYvD9vbW0rYbM0Rg5lobzVqqnVlg5VGmydI5u1gS26jow2iY1RAo/TcebN6Z639i2H+4cF4VyA+QXukv0g59VxyjvHF/4Jo9uwu+5VyyfV5hhn7g+Jvp3ZX1+TZkF1MeLW1rZutZU2hhhmvnz1OWbbsXE9YQ4YL/uItVad54XymBJ0jdxH73tc09Jbx7MPn2N8izOOKQV8M4rjVE4YcwXOc7qPDOcZm2fa3Z7Nfsvx/sDhYUccXlL4EcYYur4RpN4YUilcPblWi2LD1e01ff8BOW7BQte2OlYC6FCqagogrXuRQb+fx9hEykr/jFH0O84Qg5jCvH71in675Xw6cpfvxTXOWA7TATuJac0UDV3JhDBzmBNTjFSb+cYbjJN923uvDc7FxWucMxgxMJkKWBsXk5SLXW+dt+t2iXGe+bu/za/+6m/wHeeXyk8uhe8OsG91JZlOXeWUyWDWYDOkxNWVxUxyMkjhQqqpVs8SSsGPZ67jwO7JDf12w+eHgb5r2e22hBhXowuKhusG6Vu23oh3DuMcMUSqzhPWBuiigwnoboqzSkHMQqUUIwuHcUn6NoXAj15NPNw/8F+3P+ZZeiAZcY6t9tSukcbBOKTZbkpqZCROgdZmTSarW6kF7RNUcuQ+HAnTLE5mxnL39h2iBcuroYxWcc55ZLK199HKHLnESC8e6vrXXAgqR7iyr/kd/2fcphOboRCtobUW03iYAtZC1ztCqBVOLw5yRWl3eLS1HqIHZ3EqNTnj24ZfMz9jYyM/TN/jO+4zbuwDJSYaO7FtjvKsitCCVwMTpZWaGt7LfZ2nKIYgfYN1hfEkujVSpmk8m03HeV5pqDkXwhyxvrbzECOmbreF8yAGNOOEb7ysgxiFomgMrmmI57O0QrBaldtf09rC6TwxDhPBSjI5HA9SHYKlf2tdN/WsMFbOTmuMWMIXSRBLTHre6vllCleHMyZEHgzk/UYddPX9ZZGQoybqVs5XHEv/rlwSNkP72RtNouXrzNMgvbsUxKkgslDt5X2Tah1Tylqq0YQs1cbNaosfM5cUxDhLhTEl0e5iHMbkBSghZ67uToRdv2wqRvd2Y4xoB60l5yjMj1K4Ok2atF3uRebRnxKXvTfp6zn2DWYba7KD7k+PTci+zYxtCeGsWd4/pUIMmb41WOdprSHbsKwFqPHehGs9rojWs5TMv98Ubo6J82lkSkLxtvCtucJi8mguCgtl1Uiu22NZkrCvS+jWfCZL8lbHowjA+H7mWMetXP7212aX6/MwBmr7mMPtTuiKWgBJqrF/lDwZNUvRyrE4dIPre/oXH3B4/fq9j1pj6gVcXT99ea4VcH40TfRzqwynRNHOmm8b/PeuX5qkTazroYYgCxqJAesgR3K0yzf22uNonoI0hTR2HZsFeV4Tt/evOtkM0KvL3uXke6w7++arJlfLRxu4FBVevHL5YqsZxcUCeDTpzFcWs/wpVTdnDV0BvASmKaTqMiBfIEhl0aJiVQ2As46rRQ0MrAQSJiUBYozTw6Pj6smOGALjaWAeRkqBeZpxzuCduGXNMdB4R0boQTFK80Dfqtug2to7VyuFsijnOZCCImgGNYcxxCR26c4awmKqUlbxqY5DVJ2XTUZRtaSVM6sJhrjy9H1LSpH99TXjHDg9HImzBFRhChduZ4bRDtqb24MR969hnOFvPqNzDl9YyuwY0THlFCWZMcLnD1mamjqljlQahc8ZN8yChCMIkWFdwJRMMwZIsnk++/KBq7+7laQ0ZoqZpUeXug69c4afxB7vJcHE28V9MGJxqDsk0gA6q+akEB4BITIZJVhDxzqp/sEVu1CK61XvJxcJHOp4x7xWUGswJy0LqvFFohQ1AbGJbBMphEfrq1BU16gboVIU6xC5xmGbhlIc2TodR4MpaXEaPByOUDLn00jG0xiYwwS+xzSi/SlaGZlC4PjpnfRS8T05jIRs6Tce14oBhNN1YazB5BlvtvhuB/NISjOHhwfevrljOj3w3e99D2McOLHHFmrjM0wKjONMJ93KdbgdIeyw255d37O1zSLWNhZEwCbW/cPR8Nln14SUgUZoSznomOpzqU5uS/IjQU7bFnxbmIaZeZqYVXxfGodTcGwcRl69+pz91bVU4rueq90TAQucYY4PPLw7EebAMQfyHJhm2T8cmcY7XOv185V+Oow0jafZ9JgSCbMkdyHE1XWtRnVlTfQfaw2kN9Dge05ZwJsKEBgMz67q68SZqxiLXVqRSHJibSG5lvHFJ7DfsP3yM3zTMvU9X1hLThLQTvNIuD/wzHm8t7gCOxLl7SuOzXe+cgpIwKbBilnrd957ghpjGS7F9RJ21LVX9/KUDUWbNOcslDaMUAEpojcaj0f++/iX3KSZpLS3HBKzkX24qQ6/S/xjLyqXoiN01RbfSlW2aRu2zhFOE/MIxUkrBpsTcxInxqJ7bVEdbkH6olVDJqrhE7U9wOOramCrvlH2D8tVk/j+dsQWOa+9d6rBBNMURpBzwULXimOfNfL9hpTIcxQb+1ap9jkzxyT9qgqYGGlL4SU/5Rmf41PELgZMhQqwrw7ATqbLAkpVwLQ+bGj6jQDFwwQ5SbW3FPl+MYoOuJ7Xuh5zSJwejqxGU4YUZuY5LkGhAbrOcLi7w1pHv2lxphBilmS80equc2BFbrDZNMxDZgraEHs5wxToUrpVbQ/UNuJSSUrMD2eMs/Rtw4SYa1XdozEFhonrz94xfL8hWNF3liDV5tbJesLWtgYSr1gjPbBKcjz5/B2H84T1jpLEZdJZo/R1MXepc3vF1g0x10WiYxgzxUvvvdqNTwAESfiq5jkEBS7cum8IYIF+R8/GiBkdCmQ8/fKBucYVyIOuv2+902SjxoNr8v54/VcARieI7mVfI7e6WA9yj7mgFOWLdfILw8zylT1ILqeVM9H0CVtpndvPx8zf/WziT78L8wd7nEnkmHmbE/fTyP/ezQxBWDTO/uK0Yd2e61+++humvrBkFqHg8pPL8/4i8VoAVR4nbPUjlr989X1gHdv6c2ed7DyuIWwFZDAai8Rwsf5sZa/VPLAshiIiV1qyAS5T06Q/c3atGJqLV8p/67/kslBPq+t2pc9udhvaVk3F9Bbe4z997fVLkrQZzXLTYiTziEKYI67T5nhGeptYYxjGkTTNULJShi5+xQjFyFoR5tdr1sNmTpleq0rFrHTM9wGUb0NAVgDGXLyu5tuWou46cjBfvpYF6a2UG/lYs7ruXCzqellrub69pgP824HhdF5+vurlgFLwbcMHH31I37WCsqfI6TgSRqFrLUgPBed62r6Rzu3DROMMm36Lv2q47+45vX4nATUQxkBgAmNJOSuFISyVuksHTOesHK5azUuzCIKttSSEp+69NBUEtH/OKkwGcYDqNr04OtbJr6dqTOXRGFknTUF9I5/57u0db17fEWOQe1UEyCm3fh03o0kEFBuxSrPabUXvYV2DdeJCGUJUnr3q/yiUqA2MG3GIrBqjWjkh10NqRcpW1ok8+XaOPP/Jl7St5cmTPbdP9zwcR45REmaTZXONwfLzT18SUst+2+GblhxFS5VKxpik5u0eUxKJx7Th9/V8ZqEFy5WTVLnje46S3rrVbGB9MYtoW7gFMpfNqnUwJSlXy7BtC/8k/IXMAxl8/T5l0XQuTqrW4hBEylpDmCOn+cTRjlx7T+slEEilukoZhmFcGkGXIlVcELChKWZ5X+8952FiOAVO8biYr6QC89Dyne/9ilBEiyQlm77DuULbeuIceP36DYd3b5bK4wdPN1hrGc4DTd8SrWPX9jgX6LYbsnG0Do7nB9rWaqGj4V+6P+B/KN/j77yVtWsNfOnf8JdPf7QeFzkzjRIkbzd2SYpSlIp2LtC0Hu+90LJYKTTOeXKMhDAzngfdRGRscpKkO8TI4XBmOE9gDJvNlvNpoKjL2jicpU+Q6gadMarZEoqg0eReAnrpZZaiJUfp0xS1j+Th4Szat+WcrvTPqjF+DFplhALrvQGs9uCyy95ZFNCr7qJGXUzlPQTA8baQPHSbnpy3zJsddB1/8/QTXJG5lYxWtz74gLfblps4cMqZ4eqa11dP8UmorjX2Nkh1oWorloTTOYyzazPosu7HNXWBql0WGrHROWmNmDXlLMFwpTIba/jvup/Qp0hKEgjZXO3mCziPb8RBeJ5XpggGpYNLVbAkSEWovtlIovMQAg/DyJg8Rk01rBGjrhCCuE0q4IKR8/lfuN/m15t7XpY7xjlClrVJto/2hcvKqbFSl+y9Y2sNZ2tpjLgo5ihU+Lmuy8azK4UpCGOkUfbFOYrjY9Kec40zFOM0XpazIFWwImaK5lAtovOJueCM0AWdauGarpUYwUigplItSiiM58x0KJznIOfbcMa3QsFPChC3Xce263k4nSWKeoSDJcYQ5exHNe5FqFAL66eAb9XZOCW8s9L3yTqSOlfG88iTJ9dsOs/D20jOiTAnbNPQ+0ZA1yy6rRjiomlb+ofJE8A1jtIaSJkwToQCU87SM/Ui3vGNZ9M07L448OWLa2bnFvfHaBwhyv7e+mr0IsFvmCb6v/5cwLIs2j9yIWa5j6bxAqgYme8la5+9CkgjYF0tkIGAA1JZg2meZN0lSdgsUk32NixBmHR79Bpv1SQv4N9Ftg6m2ys+eHOUaqExkBLegM+RVAoTMgdTKtx+dsebj54sSZmhnmWPk6eaptfc7RfmXlwmPn/7awHwv0Z3VnvHJnWHlqRa22aUTBsS4f7AnYv0alyXYuZfbALzoO5Vvyhh08ppZYGtwcvFa0q5cIy8ZIiBLSsb5CsAWP23NXNaBsnohitzGmrwZC7GRNpe6PdjnfPWeQUTIq8/uuHpj19rm4cLOndJsg9fAJ1hnrHqcJ6zUQ12WcwE610scfflECyfLnPbx4wJke2rezlrqqlOKbRdw+Zqu3xHN0eu7s48PLq/3XsAACAASURBVNnyi6bIL0XStqCPpVS+nfJyhQ4YYwHjaDqHt4WcozSaDBFDZCnZ1pKsHvA4h99tLipXhjZFiFlsUvVzi34HkzJc6GxKEZSpivLrYGZFMmqi5Bt/MZFXml7bb5VysQYoOWemSWgsXeuX966X8455ClJNoppKrNd4OmP7noMxy8Mzprqqrd8rp8SbL17T9h0KKK0BT5ZqmHMGcqbrN+yur4gxckh3TPPMZz/7VIxBqAFFpqQoG3cpQFySJFh1ewCmZt7Oif5CbbGPx5lpONN4SeYE5bIUCiEIN923zTImq/Wz6BlCSIthQeMFTQlzWNCSuphSkH4lgmbqpps1YfNuoUU5qxvhRWJcFAJNKdEUT9s0bHdbNtstzkkD1Ddv3nI6DaohERpPKUIjMUii5pwBr3uDt4ybVl3ECpvDSC22LpQuBJxqsKQ0EvOg3H5FbfRFh+MTxrnQ9B2b3U4sd6nU2bJoLEoO68Fdd8YaMOvBKbuf2mPrRlYK2gJD5/oSbFYNyzoXK51Ifg5OUdhKp0wpYorox0rKkviltCT5IDzyWmFb0Mism76x1B6Y1lqurq74oH8KdnXas6YeptLzLYXAPM2Qy+Uts72+5eOXT8EIReR8HmlMwu/3WCcOciVFbj/4kI8//gTXNFKVcFYPAM/9/T3WWzZdx/X1Ts0QwPuW0xjwTcd+t2W76fFNyxwM282GXBL3d2dyCEInSom+6fhnT75P10viLcE77M0NT8Mz/jz8kImJzbbHOBHgh2kkTZPShpKitoY4B2nAewEeWWsZT2dmxQm8AiExSu8p03gBWlIm5ixBvnWUciI9PFAbt9fpIzodgzWRp7yRhDFFPs2Zj3IQTXAuOO+1KozoeKrzqG5AuQichZHnXvRzrLFrolMK1kTm9gnHza1G02tVXIKCokEaiI5Uom45NgrWFXzTMMyBmDKv2z3l41/X5NgsZ4IgvIa27Ti2H/LFfOKu3zMX8NU86iIJWZLpUqvlQmm3mHV/QgGxAhhJMDNmTVAVLJPG0IVcxKEWEmubAHF8q41lC7WJ+xoEFSPBWoqy59XepZeGB7UVji2QdC/OpfDjdKUaMifJsdp0l5yZQ1Tmi1AkXWPxXU/btXTNjisidhiJITJNswAERgyA8sUGUXs/+sZzu+1pDGxMy6ZpsQiDIoOwFkqR88yI/i+lRLKGOUSpBtT9BiipEIZBrNiN+YXuYb4Gpc6RG49rW5pNz1SkAbk1UIzDGNFkjePEcZjIKUuPwnFWYLfq4jOmt4ylULRHW50ZdX7FFElx/beK6juztqVxTio7vmkEdEmRMFXjFaHyuq6n61thn1AI04zve9q2YbvfYkrh7RevpQqra6zkLK6/JRONo7+6kp6YhyMjszI20P3XLFbl1ago9x3Xn91xutkyeUvedDyczgynAUqm6Vqur/ds+pb57YH8+o54GmiiVld0by65UFsyWmup7MfVKXwF6WSOFzWFM4wh6TPJQjdFYjPRoTnISbR1yO/JI4gU7dPpnJinhDnRvD5wO0StpLEySKyA+1nH2tZgPBWacSZstP1K3QHqsioXz9U8xmR/0fUfkbOtScZ7/5bTTAgyPug+VhBdo82ZsfP85MpwNnB+GJjbiK8995StVH8vw8UZfPF9dT+uIYiEWWW5+cv7X6tlsBQi4ZGtvn75NSRZKx+P/6Qu6/W/63jL31f65VrkWefxKrcStlKYZ5XuaB/c+sspsRgWlUIMAR/FLMp6aX8h+6pdPtMuX+T9pynPyaVMO85s7k6U88g4SzxvkGStUR31eDoT2wZvJSZxc8DFRPJfx9Rbr1+KpI1S+5UIVeTyMhgokRxnii2EbBVxEdRH32CpBOl/ylx0Dtc2zHNg07XYxutBVpZ+QiVlQQlDIk+zaI5qsm/sggDWjQU9YIxWCeRBtBjrBbnK0qvIKJq3WOcacdSTEqkhJ0fbSTuCAos9MsiGEyPae0fHwdQqnFjHx33Lk3FaEA5jDUXpT3JAODCWGMsy8byz+MaJO5W+cc6Zw8MDx4eDbnw1EBD7eeeqXawlFnCuIEi2TN62a+h2vThGTlrRsmYxr9huOrbbnrbvOE+vYRDzDzTA817Gt2kadrsNKWc1IxFudh0b9P5Lroe30bzQLcEKGVJJZFZaXaW7XCbuthRc18pz0blXdNzaxuMaT5wnod6WzOk80nZH0RMZw6CmLrL5iDkOIPo7NRCYppmmbej7jtA2HD64olpORwu7u7MivLK75Zwl2XVmGd9GEVyj5ipN2/Lsg44P7xtNbg0mF+lDWO9XqYIlCRXIKBfDLL2wJFCvldGYRe9kSxb6XEp4r3TJXHsfKmUIo20s7EIDdd4JpcCWZUM3SNVPKLm1B4mK4q0DJMg3ijzV7yiHvTyjookfVoAD5xusFxOdVKQCsKjojCXFIOY3uTBPURDxuo6tZ5PFObUA4zhDjtw+/4DNdsswiqMiuaXbSNLVKHhQgZkQE6/fviNOZ1zriSli1cxkHGbRDl5dEaaRh/EExjLFxNs3rxnPAykL7cYrvXijQTBGhPe2WB0CS/+q46M3HzA0E//i/KXoOVIFr8Ql0lqLs26hHtdxLFkO5JzSaq9/mbvnog5eMnbWWbq+wzkntOWYlr5+lSp7SVfd2CPfNX9J23qKN/zQ3tIS+L7TNWeE/jyNkyQxpdLQdG7oWq5JYU0MZd9aD0LnHOb2KW/3z0EpXNbWhM2sGjYc0stQzGpAA7dcKMXTNo4U52X/WE6V2lNJqwVZ1/AX7W4FFCoYWBNF1uCgVpZqsCa9qKR6aa3odUqpiVpZAB6922X8U45gai/ES8ZG5tXR8m+n5/xBOeG1llSPPBlGpedUe/56d+Yiga3vViQoRSsc/zbfMmaHLUka81KDpILPmVDyGsjlgnpq80f8Bv8j/y9t52i8siOs0BxzEpfY99kp0oJALdr9LT/yv8b3zM/YpZME9MaQdU76FHHGMOVM9pYmO+m5iASCVdtGAaPn7yV93jtDtzRClivo+IwpY+JEmQPxdKa0LUU1yMYoHZuCt5beWebGkzHENItZiOpO65yepllpgqvuvK436ywxiOaqacSIyzcNlIsG3xpLpCIeV41vYA4Sd+h3maOAPACu3bDddzSu0dYnkc2mw3vPHJU9o3FUyUVEEDkzHI5aeSkatBtIa5skzAXglhLTMOBiw3acyU+2hP2Gzln6u5NIMayhP03s93vsF2/Jd0dwDrdpKYn1PNYhqT38SsniQrkEvGaNoy4eWFYaa9s2xFl1SMbQbQWALxrg5RSZBunhVylnteq+ZBlaLZ7ngLNWTeflvAzGcf30mhwT091hgah8Nly/PXJ4tmfum2VPKhff8X1NWwViflFathaS/sPTt3oWC9usEMaBHNbKatO1+EbOSW9gNPCDm0AYR0ySympWcNw6J8BPiuqYvO4X1c8hX4IhCzChsY65vJc1+SrfeFuaSF0ydUyNm3Tv+rqsV2MYi9RTnBOmiCT7Vs8Ph1mlfOhkwzqD9R58ENAyFwWMH4NLAjTLf0v8Kj4LS1zy/kNY/qrjcQlU5czV6wP2OEgFfA7KFpKeur719H1D1xjmkIlxFjdUY/CnM9clk/23p2W/FElbQZpeusa/N0grEuRqSb5OLmPISXRTsvlBrWhlKkVNSt6C4EriUQOGGkDkAjar61r9VM2EJUYQCmBWytdXIA8M0xgwVnjFSwUDyzhOULK+tW42iogJ7WlcnncNZqp5AwtacIHQJknAQowYb/CdY3OaBHY1kuBaa2katzReTDHohFFURDfUpO5btbqSoh4kmtAtzcFLwu5a+k1HypnhcFaxqyRT0zDh+5Z+s8FsOs6nEYqgrAXZWNuu5cs3R84PZwm0qmObt1gvlvKSpFha3wg1LTtilI7yUDcsFjvjnCQp9N4yz6vejVIXUw2w5LLWiP6uiFZGKndRDmNtwi3PXuyIc2ERcRsrFLswB/l3LZnLc5P3zrkoPdEuz66a5pSLcacUTl3DthRSSUuwZEAqXGsrnHV9KAgRY+R4zhwezutGUysievDWDdzpJmGtGrxoeV5cI6XhKzkKImo8TZmZkxCbaj+XlAo5BrWLF9qr1cbCfd8t/UuwlqaRqmTdSEuBYmUMnBcakNUkPercjDE9AiWWZO2irQBFAiCZz5m59lEDff4e4z3j6cRwOpOj2LDvNi2u8fi2JcTMftczh4jzDafjiZyl39vD/I4YZlKQYOeJUq2Xam+Rjf48BamgmkLnLWGaaJpG3O5iELfIXDgcT+Q4Y53l7ZdvGaZZDoBSlGYLbduybT2brtGgyeouKGNnreNFeE6ZM38w7viR/Yul3YcxGqAYR9vrs9KqMyhyPMwUKg1EkzOl+BU542Sv1AR7miSpSVphWXblVMhotcUaTIn8Vv9jOt/RNp6uEyfPVxZu08DzMHI8nTkdB+nRVxJZaboCdtVE5xI9vUxU1ixzs99gdxsJypd1rG6MC8payEX2XWEDxeW95lmSnH67kaBsFiqV031AAAvRv1oFfsIcvrq7axNZRQF1TlxQAXX9r1yMKkSvAEQN6o3+mx70ekvWNGy3LdYKEDBNQRIkCndD5s/OW/6hg6ZS7EpZDQs1oU7m0mBBDGASRg1X9LUaiFV6sMkNyRriFLAG7dcpL46Fi/NTgDnnHTnO/Em+5h+XxJPeCs3ciCuat0ZcXfuG0zgzR9HVWiMAT7/Z8JYd/zz9fR7KhodyzX/m/xRXEm2KlK6hWGnRY4Gb/R7rDfPhxDjNarZlMGpAJACXULKdJpgJoUA2CrbUy7nCoMGac45t49h6z8kZrViucYHBYHPGURTkMWqk5Og6j+kk8ZpDIswz/abD+bUXaB1vZy0XreT1/JJkwjmvIIFIN0oMBAxF1xpoDGMK8+GeU2zY77ZEpIIX4khKWWUFrTJiVrZClkCEnKRJ+jAfcboXeytrp+saiWk0ABAjDtF1pZiI0wRtS39/5ioWdtse028ITs6QJhrsYQDjmLb9YipCjovRiGzpsld773DGSy/FvHT8YyWnmf+fuTf7tSU7zvx+sdbKYe99hjsUWRyKpChSA1uCpW61BQ9oNGC0bdgvDRgw0H73/+U3+9lPNmDYhmHADcPdUtsSW2qRlDiqyJruPefsIYc1+CFiZeY+VSSlt0oO995z9pC5poj44osvqPk3oeBDq+txhhCcnbWeOWk2O8219UNj9NJIsV3oxLqAOWdCI5nxPKhNtjktAKHl1TtfJE8XLsczs1GhyZkQE7cfPvHmi3ekNiznqNY9bTI1mxl+fnZ81lWroz7tR/76SzPC1hZItFaw3kYBLRVqG3a7G/pOtQaiTHp0bcAUbeIuCt6W6/NM11AFujc+1JVbrgtNC4A2P2brRut41XkGCziRBWDevtJeYP7S9QvEiarqOs/+5mAgoli8pL50KsruarvGBG/Uj/WhwzUqqJdzJiZVpH0uxlafc9sUfH0u4dkdreOx+YwXHz7BZSAeL1Z7qUBmV+18KUzDxK7XUq/D3nMZC10jzJMwlkhzHhEX+VXX5yJoA1FRBylLIGG7VzNWTdCJWpBKNjSUDaIo6+fVsVfVokoBw4I+Td+Ti/VDyWgnrqIoV13cdUFsiv63qfF1YV+nM50Tfus7v8M4Rn7wgx9c/dwHT3/Y4YLn/HhUBTbn2O96CIFxmInTRNN4o6cYDzZvF5j+OVPoDA2v/bMwQ+6D3xjsVaTFif7fwpNGMyeK7AmlaRX5tAykOEeaZx5N8arb9YuM9HgZmebIeJ6sP1qxXjrrvX7y8cTx6UTMBTEHSwzdb5qG0DVMU+aw76gtDZqivYYYB1LSInMQCBoELgCH1bhUasmKFNliqCieMwfVxnqKhQbNWKSkgiTaf8kxxxkIpDkzm1BK1xlNokDjA13bMg4jKUZVZgrCNEWiOf7iHZINRbS5q/eWS8YVFhXHKqih8yTM80xfKuVG7L+ZgiPGmcspczoaXmjZM9nUdMa5WH8dPcTEeVtLxZwWt6iLee9ALKCSfkO3lQVpEnbL+osp0oSwzFOll9Ws49oD0a3F3c5oXh5eeogPSYNlExlZ1VSvN/FaCKz/9iFwOOy5Dy+Wn4kUxEOKhY/PZwTh/p3X3N/udN+iConpeNIDO87M88Tp6QwpkeeZeZ7VERR15ufpwvF4ZJ96pRiOEx9+8oZUZl7dvWAfPO+//3OmlMko9W6aJxrfEscL86AKcRdrtJtzYZoyXedx5pQVMv/t3bcRa5ZXpJ4vRgdGLGh0fKk/0AXHYL9TpTSl5E7nmameRTYq6zC6ZTR1fCu6qGDXdDHRoOCJWUUSvGMJ9kUcyeqrugB/sP9rXjSDqmWOQZvtBnWOI/Bdt2e+PMLDqCBZSRQ8wrw4qyJrDzrdT1ugTBbxnv3tnubL7/FnX/7DJfvhfdCsRg2cKOS4vj/VWoXFQYBpGpSitwEvYsqb1i8FJ2EddydUZnBVEd3Wo1fgCFN1rT0VNTNe93j9U1grOeq810/ZugCZYRhxzvosrZATOSajutV3lo2Y0ebcEKMJhrA4I+IKJVsgsgGDBdRxnRNzCsxFRSuCrTsNYsC1DXc3e17cNrzz6pa+D8QhMl4Gvs/X+FLIfPmj9ym7lnJR5sGr+3uarsE9nHj7eFpsVmgCza6la15wfLqBGLnQ86/TH/IPm+8S3IgM6lTT74gx0nWtUngHFeAgpsW+ZaogjY5VdNoiJAPR+yt3WOpoL2MFst9BG2hKMfaLOq4q0mW2f06IUe9vbrShfC7auN0LeDfb/oauC8t3iuga8k5LAKoC7lZcrJ6TOWurGGJGmCjFE9B7yqidbHcd2TnGaWQcRgWlwD47kJ3QdB3uMuoqNhZFytc1ySkXJhRw67oG8YHGShNElAFQAYfSZKZxJKasYMFlYBwm5hgX4OUiNZtdtJWPUT8XyCdf+0jOC9lAwd3tjbUSEbqupW1bnFOf4nw6QckEyap027Z0uwPzPHEZRqWBL99REJLt02J9MjfBelDbNw0TKSUuxzNN45f9L/PM8c0bJCtYUxD6xjOMBhzOSelz+dM1XNv6KdnsbCygK9tFuL3Ksw/6e11CG/R71XRsKoGL+gg1oeHbhjYJQRJPLiCNW33JogJ1batnQ0pW0lPPZqnPZAkT7zZtZdCgR67Bt/Wp7MAs12Nk1mkN8rYDVBfJs5G5+nQnlCbQtg2NCSpls3HOVFCh9uLTOmpyofEwi4mNOEfwQQO8TY9GH5T1Jve3dN98z+ZwhRDq84g8u6lS8LnQPV3YfXJksv7EAE3X0HSNahqkzGiAf86Z82UGaczvUp0haQslOaZSno3qp6/PSdDG4nl471c00q44J22oW/nSa+i2oO5shtiRycWrUEaKiMtM8wyzDVylRWalANW/51Trk2oqd43Gn2/CNYi7Rir0cgvdJoTAOCoaWWOPeVQhi5QrTS0zjBPFCp+3MM5ibM1IrwehULoWGSI+FwtOteh6jokiukm1fYGp5Ng6zUULs5egwNLAobFFZps6eBX0iHZfcU6Ml1GbPTdhOX8qelQFC67HSUjTjARtwO29Z9cG2q7FNw3dfkfTdvjQkrxnmkfG8xEfhSaAPzilHqVMISBoAfxkaec6VMWyRyoNbMihWCbCCqNiVupELhDHCWm1IWpt1DrPs3KbvbPeR5U2AnPOSBYSiXEaKdQ6wkwaykIBWY4qEe35FfJ6qNe1xEqxtSHkUwCPgpbLwVidQ++hCea85myCoSsdqO2Fw95x/yLgQseHHylS6URT87tWabMlZ1VgdZWWV52aCpoIRQwHtXWbyVShuJoRqpScqhaXTVZ62b8mJQ6O/5K/4oPjZSkI1u+7OgWv1nz9jFKK0nztvmqNn/NO1f5kIiXY7Vpu9x3jNHF6PAGmXJkL8zRRdh2Pjycuw4h3qiBZCovzMQ4jw/nMz37yY/r9juADj6cT8zhy2O8ph2TS9trXLKZCmrU/W2Lm/Djx9PC09F6igj4E7u72NI1nHIwukSE7zfRshBTt/KqP7xQd9425BtetSNbx0b8WfYsig4rOLCIIuSSy7Yu6aXKMJBGatqEJLX3jePdFY3Vp3hzZzFe6N7ycCuRAiolxOOH61uZPjWHOmXmYaawNgdOufxgpzwK2ssk2rVMuTmjajsPtnnz3inJzz3e/8gekNDM9fkweJ5rO7qu/I+xvqNUs9cpUAZb1x6lonaMPDZPUInTrwWPZTc3UFRM9aSgLVciCu9rXyC5nQMgVcFDPoLLS+9c9tbVRlWKpdExF7JUJohQarvei98tzKfqzPm8pMFwuzJMqyy4Ndc2+Yd+5Cr4oWBCzICnpOU/Rvp1Ns5wzrRN8cNzeH3j33vOP9n/D/a4Qmp7YD6TbiWHe8fPHM3/T7XnrHd8siUPJNA9HGicM0QACWe+VVMg+Q0qquEhhCC1/Pn6L33PfY89MdJ5wORNCQ+h6pPFc2qAN52ejcdv+SwYSOQtQimgdfIyJuTpd3uEsM+mco3MZ6Tu63/8tVXHUA37xyzb603quzxG372n7luA9o9mc0DiatqXbFQWgsLnZOHYisNtpdj/HSNM2umZzBUOD1WlZBaNTdVUvgm8CgsO3DafzRW1VRuvvSiHUQM17ctTazQW5+SXXkmURT8xlobM3zlOsDKIOhDivjKBxot/1C3gRU9JowZxfF9R/KGNcQYSs/dy0e7Es+zHFTJwjTdfShAbftvjW04SGcY6UpLRsUKDB+0DftQq25WitmSKocL1m0AxcVHvTrvu0QIoz8xxxbdCehkXtxzwnXPC2XzKPH3+sgEexsgHWOvumCRyK49y2BmBydeoIm7rYzXUNQn7mbKxByt8hflNTYiJvG3CzJiGwe8iC1TWCT4XfPha+8lHk/22FH+xDNet4ExeSEGgQvEsWyFqm0sptauP6Oq/PbzUbiPTs6dUnlBpUXoU+y5/ACpCsA/epZ/fiNLFSRTyyV9q/6S0Up9TyskRUQo7qqyTQ84bqA5soT2lAVkpot9ux27XkvjfFVruXuqbXxzJbXWjGiIuJ+w/eMk2RyziZXZHldSkl5mgiXmm1e8NpYNdr32nTQmROWodN+vUh/eciaHNO6Pe9OdnOHNrVUJacibOpayGaSlEFg81hZepwgmaSgJyy0k3QWqdFdMSctGKF1mv2TZfVYvzr9/89gZH7+3ut9+mF169e8f7Pf05FYOfp0xziUgqTOZAi2mDUWcDhvaIn1VkTYcmUjHc96TLRjOp4ZMs7l6xCBXXRiZe1gXXJWge1qCxliknNixMwGWDnNNsW51nHxTnLDsEcMwUVk0hzMk/zesPVnhdgQYlR7prgcSFoc9YAXd/Sdh0ffPjWArEZrKFo1zZ0t/d6OMSEk4aUJ85PJ1yM+LZV7r6hG/WSzVgBpv6j+aoSk9IIU2bXqAz7PM0q2281Ei4XKFmDuaA1blJUpWueJ6AKmqg107owpWtoLZaOV2FtlLqIlaTC3XEwp6oG4VrXcDWEDlxTF6AsQVGKSWsCK6YpshTG5lK4PTj+6B/v+Na3X9B3Pd/988KPfmzrwMEij1pYs5BGV8lO9LBNBoyYY1f3jBbxYk6i0W9L1J+Zs1ipmZXsUiQgwZFy4c3bB6WxeUdOG4e2rKCEPfCydpY6jaKFvFpLYLWmQalt0zgvhvJ0PmtdnhNrVFtMfSpzOl0Yp0kfv+iamGNexGiKCMM4M8c3PD090TTtInhyPl94+3Dkxz/6CXke6Xed1S1l49lrz7gpRhpX6zSEZJn7pmloGqc94WBBMkVYWAOUNZNSpdRmU20stqcak9dX6rBSgvaHllokXQ2jWEDgg2ZFU0ocH08IcLg54L3j8eFIobDf93zrHcfd3vPVOzsTcgLJzHPG+xekjz4mDY/MkyrVXYCYioJpweO88Leh5Rsc9YwWCz/F4aXFSSHO4xqAUG9Uga2bmz3Nl97j+1/8Dqf+DqEwf/hjju//iJIzreg+Trt7ulfv0t6+pt/f4JwVfm5EPq7dqkRtpFodn2DZ5myGtZQqZZ/WEZTVsZI6T/Vfyz6tlPPnzoauh8y1syMVJLPMmH7OClRcgxU1A535f8ZXtA4g8y33xCsZ7FUOVd/PqFSHgY6skGcF1Ip9SS6JEkxK3zmyeGtWHxZb8c4+87tfOPK1mxO3TUtKEzGOzNPEcDnycB5h1nYvTRv4rt/T5MQ4D3wxTwZYrI+SC9D6pVn7WFu05MRT2vFT/5pvu7MKXYng2w7XtGoLZGNH7AyMVRzgymO0jGJMHI3ivzBbQlAKXc7svvouzasXpHEkO6PEb24X1onIOePrOFnwl+eJlB1i4i1zisRhVPqfK3SN9tW0fs2ayS5Z5zulRaQrCRScypGbrxNLgaCiSMMwko8n7alIIWAy/qUgXtkwKSUtwagAmAXKK+WQBVCr+7TbdQSvpQclF7JXerVzta+gXt5DNODzy93vsJNbYqONmIsAzlt5SaE0dc/AWE68P/2lqm5ai4+SEnNiKXOp+7BrGl23c2KKEXlxS3k8UsZJ/Y4si21wRcsncl5tpjO/L8eoNtKEfSoTpOIcImpcG+9JpgzoncNbg2opWds5zNrM2wcxFWXPy+PI9OpmDc6qvVpWzFquoyP/WYHM9tLZec7Y+rtfstnbZnNxywKu3/27p8I3zzA64TuPM3/tVZm0CX5Zb1W8bLn96i+KsmPEK50cUSElubpRfV4nZq82vyuy3mMN2p3YO1Zjr+dj2YTBtje2WbeUE3FWgZyUEr5tVJU8NFqv5oToHd7q3b3pYpScFJSMmZu3TwuoLgb21rpIVZINSKNnmXix+sjqJ5UlUBc7f9rLzOGDB8owcZpmbTlUVA22845xTlpysBnj7ZVS5nSyOmsR2qKKuKdBe9cJv3pBfC6CNrEsT13AYbuQ7WCQagS9WyTVl+iXKie7Kjgiq/GqtR3FR4b7lAAAIABJREFU/l4LEWuWq9YFlcJVgeLyUVvE8O9w3d/f4b2Qs+fFy3tO5xNPT088j6F1fRpHXyqVRTNmxcal8YoWX9VnoRslmOEvUizgWB3fKjZSC31x2HdsnifX4mtMsUvRsLV3y/XhogGC9gSLs35uFT4p8bpfjCZF1eDGXJBJ68DEORyi8vjB83Q8M33yoHWBaBPu2Zqsjt7hH0+4UJUxC1OciVPEB0frGibLYu72O0Oey4Kce79yv3N2hspBzJkQo9YqJm2GKqLyrl6UildsnFNKjFOm69rFsdLAWdXWgnPEZDVcJdOEKktuTkRKC2qfVfOA/vGyIu+1hsEWa/LuV2/ZZ/7h0qOrFJoG/tE/dNzfJU7HM9Mw8PKF8P0fdIS2RVJBFsEGE7/JrIfKKs63XDVoKaAOaIEgSlee06xiFU7wOFJJtE3Dru/p+h2q7OoRF/iD6a9xHyrJr6RPixXUMVv3xadBADEnCTKtdCZLnhEfuDnsOD0dGc6DFv3Lutedc6SYGMq4jNVWaa9iP1obpqiXdwWXEnkaKU7pqsfHN1o3lzNt22gtidVAqEpVIqWML8Lt3YFxnImXSZ2CWbOB05xpGuF/fPgh/+LmN9ZzJ8M4R6P/2GJwwjEP2vurqIjAze0BEMZx4ulpRNAmv6/3hfdu4xWfrxbtlwKnWfh3U4t4rXfdtY7f/7YW2TcdvN5ngtcA5Pnc5NPHlHjBOWGcIl3fstvt1IGeI8NZHfW32fE1EULJKpO/LKoamNdwdf185zxd3/H9V+/ib77IZfcCAeKHf83lg5+QhlH7N4agxfWPHzM8vaF/8Zr2239kQhBsFm1FXCFIsbKnqGAVBdmwILI5fdUu1DFbMrk18ERR+epKlOI+nRUT3f/1XrKBNLLZTwWtuXJOndfgnQF20bRJqkCKZX7HkRQT/yq+sncX7toLL7z+3fu1gL6KIdWWVQtVVKoLsDpCJWf+sf+QH+Uv8xgjaa51z47fv/kZX9vv+WoT6Qh6zpfEPEWOD4+M4xlK5nLJeKdCHQhM4vgLeiiZd8p1TUZJicv5Qmr26uznaUHXb+TIK/mQUYz2CLSHnpwTl0EVeg93t+Sns47T8hAYS0CATGeKitU5pChgE6dRHahSbavlrJuwiJ/8qvM2mVqyiJDHEZcTjsI86FnSlQLjCMnTiIBzpKS2bp4mPf9E7Ui7oTa2Xcc0zZSkrJFpUFaAbwIOVaBrBBopBoip45tDwLUKJs3jhAuRw90Lmr6nTAOPj09M00zNArku0IRGwZ6SwVVGip2N3uHEMvnmbOtj6NoX5/hS+9u8dF+Bzta/rKBUhQV0zBNTmflq/C1yKfzF6f/gND1o1ivqObXUWntv/kCh2++YHh4Jr+8VgB0no9zrZDpr9F1FfupeVcxUQeiqiFkoWsssQvZZSxWSIEXogiei4i65FPIc8X3HThSQ9WKKxy4s53kuCv57p+NTy1uXtV3KAgqvwM4vvzanM58ytr/kEsGEl9wCwgCL+m4uGjS4NQvAN85ZW4qIrqt5iiSfaHy3+MPzPC/sMrDMtLN6+OAWhlZhpfQuN/SZD1fASjlqn0DDEZYnvqpXu0aqNp+xBnclZaY4cTmdufnwgcNux83tAe8nW7Mwdw1P9ztKKbz44AmfVGq/sfrH5jIxVzaQ3ZDWuyn92DVqq0IIFKuHqxPVnmde/uKRtq37OJNPA9NlAhOeahtrBxUTg4H0Ipoomab1LNz6NPM4E7tA0wamWZ97NgHDX7ciPhdB2yY+W5B3qJN9vUBKwVDgWoxYeb2VerP50JyJaVYlveoZGRKSN9+zdbLXoP/693/fSw+xTAgNX/vae/zN3/yQYRiXwLSiRZoB0UPJbwrV63pe72ETPKEB3v7hQhiiCUVU3nxZEKEqM0zBmorq5zv7DJzDI/RdwxTjlcTq80LNFWEuSyAsbpUZ3m5kWeZmHU/NJgRSylzOFybvETea4EVFTlV9TKXiRVW14kgedI5dKRYg6cYbzgPzOCtNMWS6XU9K5yWo0gPIglhDpCsSFlPm6e0jiFtS7yVnSlAJ7JQzxKgGetQBDCGYMEp19ATvjE5gVJxsY1/QtH0uEC6RL/zkjaE1WtOWy7pBMyZmUpTzfb4UwlOynj7G167OoRnjLRq1zlLh7k4NUEqJxgvnS4AkSm0rqwONGbiMHWJli+HBsqsq3bFmg5wgOGJSulljzaTzPHC4f0nXd7RNR9v2DMMFiDjX8s5wVqpgLvZcq5O83FP97mLrzJx8EaFrG1KKjOOIbxpa3ygFYVLFOLFsl1iGs96uDx7falZUe5yNpFmdmsvpQujaRSXVeesVWRyvX7zk/v6GxzefMAwD45zIwwWSOl6aRMmLccpGN931Hbte6V3TlJb9Ps/RMn6JOMP30gMnN5Ite6lAkray2N/tleY1q+PSNgF3u9MaMnPE+76l6xqCgz9+T5u6N65dzgcAnJjCpCLr3/pydeJVdr/x3dWZtdRDAXOMxPFE/OQHNF4WSeRpnrk57Lm9PyxnWJ2vlAqvngrnjx+UAouQilIJvUSV8NbdaHOvdT//ur2jbTp612qG9KMfkIY3jJPWPoZSll5K6qxDuhyZPvwe8uV/gNgYYqhzsf3UGDVqOp8NDXdr9mG7f8oKiq11ajV0rrVIVZYf3ZNFyLXWhW3AxuKoiGwZjdWoCd1O9wjAeD4pCCfb897Oae8p0hjQou//38Yv8cX+wr2bV0VIQGUv9L42J/GzWNaC1Zx5J1zomXnInpxm3vM/4yvhkS92O16ERNfdE0LHPJ9JMfH4ySOPx5MKGhngMTMzj9PCXBnFcRFnSc/qmgouNHRtyxfLE/9J+rf8T+U36HY7/L5jXybunMM5bakyjIlmt+NyOvH49kjoeu0L6B1lUklu7z2h69j1gRAjs3jaXWd98iLFe0qc8a2KhpRRVZabL7yi/8ZXka6jd8JljOS4Ebpa5mBzqlrNeBVBW4YyZ5J3BO9VNRQoUenTwVqI4BwR7QPYhQZJxoiJCd+p3Rkvk4kmaPY3JK07Dk5W0RnRap3kHK5tcaFhmiatb3Mt0+UCGNDkA2KAddN4Drd7Gh8QgWEcOV8GFX1zjs7KFYr5AsIqckNJCJmvd3/IffiyZnPqjlD0wcZtk1Uh0BJ4x/8GQuFl+y7/1/G/53IZyIz0u15LFQxMFlF6+2LbxHHzh9/h6f/+NwqkF/1Zzot0CAavg8DcNgxNR/OLT0xNXGsxdc9aMJ6LqXGvNGifKx1Qa9xDo30uRSar5S/LPvHCOu8bRzVba4H1qsyZ6xrST12l2NKqmXR+9evrLhKvKtIFZKFgy1IzCaiqei788UcXZHezfF1OUf2MUgxQSMaSitfBWErat9JZXb6BHDV4W2yy1fTqGF3fadUHqqUVao7Ul9PHfhb1bsdl8/e6unJM7D56ov/oAYkZFyHN2gvQWe31vgnsHs/6uVFr12Wa8Uml/s/J2qUvYAQggf2+o5TM5eHC7n6nDeS9Ixq1Fxx+jnTDRJgd0zwRx7jsLXGe1jkyZSl7KKXYetI9DDBNq0BWvWJMjEPEB89h57g8qAJ1cNvz+7Ovz0fQlgvDZVijbNRRMTt8jViWQjXNa5C2dWC3wUYmz3GxnDFpIa5W2GidUg1GaiPMxXGp6e5fu6H02gZj+lmqSKlKSA3f+ta3+OEPf6j1UGb0coZmiiQnlMZv0F0WYY363FWYwTmHTJH9ZeLwcMa5q1IH3UTOLbvJ+dUZXzeoXCvpuBYXAmWKGkBu6tKWZpmo2+KMvlGKoli+8dpTLiuStogemOHWLJk5SjkynvWg0CbYfjGQtelsETU64mWp+dNDstYxmLiFCClGGlMcHS8jBeX7rzx39PlSXpqa1uyjeEecE+JsjrM6ScHaLCjVROfQO5jGmW6nKmopK1Lnismwl2KUS2u5YIjlnBJijUadrYXq0VU0WJxgK11rWMSRsgdpKWjLgW2txZpmvs6ymR0ww6FzMM1w2Avf+Q58969mdQ7KFquz4KysAZLlIjYuV1V0uq4XKGg2xYknRbh78Zrb21umOKH0hEjbNszR4c5vmIcHbcIc0+Lg/mre/7JU8SGws9rHRpQSMaeRmB2h1YN3HCaarkWDh6RUIzSDr/tRTEm2LN89jpMpWmoT2TEZIu8Cc5x588knzOMA4gkeLucL/a7jdFHj7oy6XL9zzgXX79i/uCXPM9EMqqvEdVQEIqdIxnFuB1UuFa+94ER4ao589/A9u0d9z3/+lYBIY8Oxybov0v2tNQm1vV6D+krFQunO/adQvGugRdZ/MAxnzj/+c9oA/e0NIo7z6WjNuhMplkWgZ302CHcv+VLX8faTN5yOF1yJZGnsPjWoziVoc2pX+P/cgdTvObQtwkyZz8TpTMqR/f0tp6PuxyCOedSspfhA2wZSHIiXI0UcyQyobSzAgwTmmIiVNv4M+KrPvf5Z6UJi+1XYVFywouNV0bdQSi3uN3W3zbypCqS7CgxTKpzOI4OocNM1OHa9L3ed4+XLnqfHwjxP5FQYreebvihbICoGZCodVVgduSvQUwKUSM7qwP2L3Y/4H/Irfjd8Hx8CTd/Rtp75fOEtsL/pEIHj2yPH8xPTrOd0SsWanqOB0sZm/ruw5zbP3OZVFOJyPvHwETQ549kzl8L4+ER7OXPPB5waC1y94/W771Jy5ng8M0wJ5gs3h4Z2v6fpOihFhT+8J8XIVFSNtbVzIDfCFAvBBUqKxBiJudB6rRGj7xCjhyJpGR/ZaOEtc1iKNsSOUW3KrLVb1Tq62mqhFDBZ72q0w35HaDvmGHHBIdO0qEmmmDg/PWmj51JovKfdiIPVGjKx7I6yUyA7T9P3+kwxKrV3mpmniV3XgHdqv3ad/i6ppyQWkLVNS0pwiRdjGmj23qFZKlfrXYtmmZz3tK7FS0BPFhsft1LlNris3ntFLBBav+OuecVw/gnOeRO3UlXqYg8q4ojjhfDFl7Rf+xIlw/1/9I84/8l3kaGexyw10OtaFmLryW3QGvRUJbTF/tBsUaEoNRVTfjRaZJ4UfI0pk4JmWXzjEQMVBfW5vIM2JRKByjhZ8A/z9arQFwI18/7LrtVfLc9+9tnXgoWXslQ2VDeivm+pjbcfNlOmtJmENkTXYELIMTMacL2I+ollnm291Qq0bQ/txblYQmfZBFZ2S7buaz9DKabIvjjGNl7I1TNtEbJFHqQOjwj+6czt2xPZWhTErCq/EWVmVQbZUpEq6hcFpy2qnPc0kqFYv+GUccFzc9NTSEq7nDPzSTi+faI7ncEk9+PDW05/+T0o2i6kAgOhVREfctFyijkukIL3KsLmRPt2LgmBJamyMlnGYaLtAmeBOWPKuL8+3vicBG2rJCsYh7zWJbnNvK8A8vLwFU1JpVxTG209BOfU0KMSvtur0qEyxidfoFFboJuP+6zgrd5P3UDOCbv9XhGloEVJtV6oUPjNb32T733ve7jTjBs10Ll9c2ZsPcO+Q0Q433Sf/pIY2Z3HxbzcvTktwV09y5Q+prSFUvu6OJVmjykb7cqEOdqgyF6MzOPEOKgcdinZxluWk6kJqmIppSBWa9fuew6HHd1uz5wyl8uF8XzR+sGszbpd45mq2pNXdTdFyd0ykCGoClDKUHs0dX6jYBkTOWpWJMaivWgsKOv6hhjXxqYRPWy7rllr8rJKHtdM1jpXKyXGiaqTDlPcOFyCC956yigal0vUFhN+bcgqdZEtgY2JRYhYKh3KnGi7GnZZwOS0V9xakyHmZ1vrAOuVpZmW1TcXBMnWzsJUnYpFgIKQ0sjHH8O7X/RQtFm05MQ8T4yjOgiLYEFd0HZwGiSgIIkrLJzBZ5cidibeQiGbQWz7HVOMDMOFUjJdf8PNfg9l5jfHn/I6PvFBvD7E6nx81lXnqcob4z1t23F/eIFzup5TUmrZnCbuXtziRTidzpxPFw32rPhYfSpDF5Ol4ESN8tJ2oOj8OafS928++gTxqDCFrZM4J+5vd+RsKmppHZOUVX21847j20fmeV56wtQmujXTu9vvCE3ge+/+iOtKtM0gw6c9omWlbQaprmM2BmFr0YvWfpRnH7XYStH1hBlsESjzif7yU/xdv8gjd63nHDWjMA0X3r71vHx5SwirvHqh8KfhwO+L8OI1SAgcHwYVepJg2SAhSMQd9vy13/FmKtzGiThNlPKGcnxDjLP2VfKOu9sbYpo5n0dSVuGM3WFP17ZISpx/+l1y2NO883Vyt8N7lJorMI7Doga5PHcNTktdB6uzXIfbmdO3XjXoXkexFE8hGlCiP6tA1TpHVneNft4SbJtwRj1v1iAyL+9zUvjtd1v6vuPh1PHJ2zM//XjmS/EtO1/W4LyGRrVeeetcLUFq3ev1nFen7ZHIP7z7Ca3fq2yMdwzDxCXNcHzi/nxDCMLxfOJyyVZLZopz0ULaOS3f0XatUuDePllBvf5njoW3jyf2jUfkkYYn9nKmTyNf9j8mzroO+5f3hMZzPh65PB1JU8Z3Pf1+x33bcjmdmIaR4TIu1PyYsta2TPozBFyxsHbj6Df7He3Le8J+t+z9OqMONmp4QtzYh1K0T5o3p6vd9ziEIZ8pJnyFzaVGw5ph8zct+33H5aLsEMSr8FUpyJzwJStFtmawxBG6FkqhdYJLCbwnTvOShZWuI+NUIMXObN8IpThjpWRC05CnibZrmKdIjGkBNn3w7HYdaZ60ps2e33u/HA51FcY50e56PUOFBWhjA+4s6/m5o7Rcju90/4xflP+Opa+i6LqYp1lZNTkSh5m2bfEG0hYv7L/zLcZ/85d2T2LgxHquXW57Hu73el/7jtbqCIEly9lY7XwRnV98wAVPsJp/bTKdmXxU0MuyTFpaoqsjFXj50ZGP3muX8UHkSjG0FLQ36Cbr9cvA/s8wqb/yqqCg/seZX1tWoSsLtmqNY86JOUeGy2WhB+cUiRNE8mL/y+Y+pSYYlrocPZRUdKlaKDsjNw+13SNXace6qTaXN3/FRkHHIZerMbT2i9ZqS3/Tny4kEQPK9buSgSfXd2FndsE0HML6azsnXRBC4+jalhS1hCQaiHx6milPJ5ph4vAPvoXvOsa//YA0K6Oo1kKLVx97nKyXac2MB2919NYOpMSrejbnHF3fEoJjHmZmA/jP55ED6qO3rVvP6l9xfS6CtnptaSpl63jUS1hUAa/eVxHLa+FCgCWpf/UxthgrFei6oHm9F/jVFMlqcNdxFu7v7tnt99brSZRDnguOjAsN7+xviT/5GD9Vgw/dGGkuWrjoh956Uq33Skx05/FqfD7rfhW5Y/HICihFCqfBU0UhmgYvKgxSsqrmlVJofNCGzYbuBSteTikzjxOUjBdH3wTu7u7Y3x7IGU7nnqe2IT88Mp5Gck44O2BqfzBQdNd78C6orLmo9P8wRi06L5liaobVOVh2p6jyT7aebjkmQtMwWKsBnANr7prySjmrnPPn8+9ETIksG7K21jRqfZinmGBBss2p4i6rk1pFRkrRhqwuOMbLpIGCAKlo7Uupgbsumupw1XyWcx6xjGids2zIzpobMQRLTEK7a8wR1GyothyAH/5N4b2vekgQZ23YHceROBldwYvWaImqka0RC1Bbj2Zb3FXYZ7PGKhUqGVInknj9+iUiwuVy4enpzDyPiDtxc9gvbQymYVDK3K8J1j7zEqHtdhwOe9rSAInoWkqMDE9vOJ+OhOAVDSsJyBsQIy9zPE+q5AayrHMVOalZbhbQJsdo9X66k7I4pjkzjfOqClcS4mDXdaTSMF4G8jxyOl3MgK9iP/MUmcaRpm2RAL/5atLvFkWbqzOwGe1qUW3218Di+djwWefCs3+v/yrmI1pW1Ryp/PAzKhXXxxM7n2l7fc5CoQme3e0N2anTeD6d8a5w//JeM/FFax8z8ENp+SM/8uL+Di+Ot588rucpHnGFE45Pmp7eZULb8vDwyG6/02XnldLpnDc1zILIqMIhWaklh72jbRukXEj5SDi+T760hNffwAukHEm10N6ioy2t+FOBso1ZdSs0cyZW6G/9F69C37R87ooRLy6I/bG4RgayrdnQug9y0Tq3aytlgR6Fr98Xyp0wvHPg619IvJsykr7Fi8cPENv3ixjFYjs3zY1ZBU0yAS/rWvlFGemLkOeZJCi1t+uYJs/D45Hx/BbnUTYB1TnTfVMVmCv1R0TY7XccDjt+Opz4rfTmysEbTBhrF97yTf99DjLQSFzO3BwCN3e3kDPD+UKMEXHCzauD0oyKZkzO54uCIcsQFx4/ebCelDa6YoSEUhbWiOx69r/xHiBM47Q4XJRC2UxdZSo4p73uStGaJoeAV8e4KkZVVgV2jhQgiRBKYRpHQt8R+o54ujAk2N/c6L67jASjrInV0zvvaZqwUtFT0ixfsV6yTaBpW6ZpVhXHnGm7wMt3XlEQnh6OnC8DhJbabFxZOKpWGQz9LiXTdi0pjxas2XlguXpxjjRGXAi87L/M6+brtjVWv6tsgAq1AesAlmpLFoUKWdAQwUTUvDanT3FeqPKSEsHsw5yFaRwZL4MuaZvXOuY5Fz7pbkhGez3ddDTDjKRaYlEQayOQp5lqzqpP6ZyzNkETw+nEfC4mlNYwTdalueIrMVJmz+7xwvl2Z+cE6KKxESiaDXyuQPLc//7sQEMH59fZw5LzCumYXd4mH9fwR7SOs2wCixj5rYfCn935RSysPoeavLWUJtfs7AIAbh5kMUfP/fIKChXTr7cBF7c+l/kqmwFcozTWk9gZeCIivHhzJDye1Z9LqpRdFmprpc9e3chSwwiRWp8sXv9s2sDhZkecIoOdIznl5SwGSH/7C9I047qG+f0PUbzY0wZ9rtnOjRLNFwue0Ho8QkyJebpWUPfemWq6ZsFDG/BOiMcBKMzjzCkr40qmGR/CM4XoT1+fo6BtY0QXNBS4MkC6FlKpdVnrz0CDr5ubW+5uTzwdnxajuc2sPP/KGrgFILJBlJb7+fXe5daArB9tKjSlLEElFG67nscpLRmA6iA7C1Zuh3iF/jopiDhy15BivqIJbKk1S+BpUJk4YZ5mTo9KgaiGJYkqI7Wdon6VYif12NZoh5zQGijLdrRGiUhZZY6HYWBOmTnOjPNMHEdtxpu1sLoWua5NgevGRmtSkhqJvmsZ50IcRpzUol6TpHeiAUwpSONpxTEO2ldtGjK+6HwlQ11z1HqhxXBsrtoDyokK3QRRQziMk9JETWBFHcTaN0uVLjNKc0zTjFjvo1KRKZGNw6ABmPfqbOZUTE544+5t1rarB6MUkyDXIVIU0gKcdUvoPXlncryaAa1N0edZVfK2alDeAkQpxZ7L3MaK0En9/GIHY9nGCc/BMgpKMRYsc4oagOEy8lH8kGm8kOwe5vHM6XTkt/eJb4cPOJ0uxhVf1+xiC34NuJRTZDifOLsTT+lomTdR1aYceXo8k4sqt21KTxa6KpgSqjVFV1q1uwIHVztWFnWpkmpZNRSrK0rikIzJor+g6wLn44npMpOK9fQ7DxunvJCmmcnUpELTkBG+sL/uKfSZEGyN26SeS5Wsun2N7ddSA2wzqzUgBKUAV7pKzdAIyHgkP/5MjWscLCso3L68Z7d/h8sYefvRz5mmibePjwzRcXNzgwaxj5xOZwpw//Le9rfe0iOeH/merzNwc3vD09ORZAXZuWgvpz+dHJd44ebuhqbvefroLaXA/vaANJ4cE5fLhWkamcYZcmGelV6bcySPI3f3e/aHA+JbxjhQ5oH49sec+dqKUC9OQx2f1Z6IZChu3ReAiLY6WPwOvWsWp7RsnAU701WhrgIza+3PMv6LMfvMUHFzr9fB96JYB/SN4+uvAF4xAR+8fsXNcOILbz/k4e0T59N5AbquHUVZgjjn9PND0HtMlm0oIty+fsX9qzvicOHtw4U4R+IcF8BL1+GKvNe+ZeK80sZ9MIEAz5vdATk/IGy8SlQt3jvHq3C2n1TbA7cvbun7jvPpzHy60AfP4fUrbl7c8sHPP4Jc2B92tMEhSenxHoiiNPqMBlcxJS07cNp7Ljg9Z10baF6/YJrjwjyp9eNSDBqTvOy5UqxXqO2lhDDnQmtBHGI0/JTIJo2eAEmJKEI8nThPiVev73HOE6cBdgel4LeFOAuNQzPMzms2+nwhWjmArysmZ1Wq7HrLsDgO93cc9h03hz3DZeCjj94wnM7McyK0La5oNiLGyDDonzeHndqbnGmbRs8jr6UNswVH/f2ttk2YRvrdgb2/4eBeL15QBqPX12BjCwWJni9L2eeyyUC0blJLCvTnwTnwGRFtL1BLMKiA8e2B7ptfJf/sF4vISLa6Wg2SVASsiJBCwD+N+JitZZId7BW0rY55ySBh+Q7nBFJkmCLOi9XX67rYdi5KKfOVp1u+mf/Js40LGLX0LAP/++7/1Hh1Q8P/rKsGQwoa1iBl84LNOa5/k6sMjNbTGQhkNeY6dIXfPWb8MDLtTBXbsv9fneC7Xij52ddsvkP9t6QME7QWuWTzQR3YIXIVLElZBb0Eo8cWrdF1eRMgKjRhj7bWQVcffD2fhcPjhf40wMV6A8Zo6r7rmhKxfqJVNNc2btM2zOO89I4FEK992PaHHfM4M54v5i+t9ZFbvG3+8BPrY6c0SLEzZprSch42XaPAr1Eu55jNZ9OPUVXKhr5vTOxHGC8z4rOKDZnwn/oypuwumGLnc8/r+vocBW161c1VUEpTep49exZe16OjOqqn4xNPT49X6MBnfk9FYe29kZrdqQ1gPztge24Ur/9eNxV4V4zvv7mPlBaFI3GeJoRlUdcaM03/rgsubRCUT31Pyle3WbOQ22Auzur8N01jCy4tDl3MtshKjfRYlKU0HjFZ99mQjgKuqKDC6XgmlTPjMJhwR1IDlDO4sNzWdfa0opZay3a5XBRty1obIJr+sYBgfWZ9FFXM0y7zhTmjUvtpdXbqYRmsBsCJugVTYTETfEjUAAAgAElEQVQ0db2McwJfaPsGZ9mzpvF0bUNBuAyDZqKKBg21xjAZrSQVaFt0U1eJ4ZKNR29BYm1kLbq2XNNw+8f/Hk9/8mfkcbI1AfOUcJ09dyn8efkD/niGpq0AltEnqQ3U4XS+6AhVNGyzWtM0IoZMAVzmonUQxdE2jsM+KFWhCMMwMk4qYiD1EN24lurQ1swcm+ylZrBC8JyOT7puo9ZqiW+0xibN4GfS+Q2X88UAmE+jir/MvlVnN6fEeD4R+4lSJu3TkkQdy5jQ6KowjiqDvxSWb84+VYtV2qzzDsFRzElTZwJ7dgHnydN0ZSSc6NqnQHYOT+Ty+MAJx3kYGU9n8C2d0ZF13y1PuNxHnCOvQqJrAy6UjcGtReWbd9SxstooluDts67KmV9/4uzeBWdvt8xsAaYj80ffZ5onhmEkjrNRveDDDz6mbVrariG4zDBmjk9H7l6/MEq5cP/injcfP3A8juTyqK072kATAkmEvw57OhHelYHDzZ7HN49aNyozJxpO6NkyTRMpdsQUOV8KBEc+FcbLoCqx3quSWevZtyrrHFNmiolffPhEeHvi/uU973zhC7Qh8HQ8Mn3yI+T+PWpGe+s41DneBkdXNZ6lZs2WCguqJ1rB5BUU0pC+Khiu76tntFKpa72FuRCLP6uYlADOMsRrGL8VMFnEVerclcLQ7Bh8R5My+ylyuQxKu6xnQVkzbOprrfdYAZqYCq5r6PuW4oSf/eR9hvOggLlTKfj1WfS7q9R+LhDahiaoSNBh3zOcjoS2o+v7enBsABq1S20TuO1bYi7sgwrTpBBoX7xUMO58om0bDi/u6G7v+Nv3f87TwxN918FBxUrI41Ie0Nj/xDtoOxJwHiZySvRdy94oWc67tf4M9d8XjuFmJRSK1dFvKPROe4INU1RwhMKuVcdryIF+1yoN3gnxMjI7debSOPDxB0qlbja1f9g6wIm27phGpWXDEtRkgSSO4rTtTBZhvKhw0le/8R6dD/zoRz/i4aM3dgaqY901iupfMmqkcmK6DJxKoeu7RVHZeVW9dUGQ3nqlApfxQrPfc/A3NNLrGWRjLbD0w1v2ygIs/DLkLROCY5qsWbZT4Secp6liFoALzbJGpLYGOOxJQdWHC5qRLqXw8cu9UknNGROBh6+9wxd/+ma5XzGK465rGKfENM/We1Lf5pyjC54xZbzH6o+usy7byxfHXdrjaTfLpVCt5YtU+G+O/5X6ONtFZYfGpz61tgAqZfFJypXNBW1jkJe1Xtfo/3z/v6qgCIIUR5sd/+nTPyOXpOunK1zG/4Wc3QL6vWz/M/7rN4FC5k93f8b7zc/tu9yVD+ldDzjIwuBHC2rs4MNZcmAFkwqCXIkwWVBd3yIs1Ps6DgvDbTmrgJJwWdhdJnafHJe+wDnNRiIQ66Xpl8DRaQmvBrXO07Za0141F3JSVVEnPTe3B0qKnE+aYVv1Ej491zVgC1azPpgglgg0jdMWKXacjlNUf4Tr5R+awP7QIakQx4jftQQHp6dhAYWarqFpW9qu5eb2oL5wzKsK/i+5PmdB2+osUjJv3rxhv9/TtR1rqsIOtWfRmAI/bnmZfMZkrN+imY0aQetyhOsa0rI4QL8sSPtln10f5eq1JavRzdq0M8dEdDPOBz3vLFtUnWJxz43eaviy1YctQ4YFJNvNXdZ7ySmTnQaEMcExnpRSg8naJutd4rVfi0ed2JQyc7I+R3WcLcMznC5EYBhngsPQqTVL6ZxXNUVZBUCwMa7c75IyQ9Q0drUxxQ7h0DZMWWcmm4R4znkxqvW5SsEcO0+/7xkMcY5FXxecIMETAAmawZuiPneJCUZomsD+sCOEwDDOjOO4ZC5Tmq0Hj6dIlZw2KqWJtDjRrE8NDOom9t4tGWEP9L/3Lcq+5/aP/4Djn/xb0tNpybCmohSZczkwxpZ/+S8T//SfhmXuMUQopcw4KpJRGzlWwyAUxiHzdGy4v29omszp7PnuX0Sm+UKcM+7+ntP5TNe1zNNMaBrmaVicvCXYrqakZGCmiCNsgyAr6D0cOoo4xlGRVKRQkjpVCQ3MT6ZAtzWIWnO4rm0fVDBknqbrwxwd63GKtOcHYu45Z1X3HIaBaRg3NFWl4oYmLL36bNWiymHOqBWCOGhDQ0xVIGbdW23jdf3YnKrzr2uz23XEeeL4OJKLKXVGPdCDBALVIG2cM9aaRUfh974ws+865gxePC44y2pUp+HZ4VGDgs8ocnd27tU9VgO3pebJ9p2+TiAO5OnC8W//QmloozZnbpqWpm2W7GOh8PR0IudEjpm2a7i9OeCdZ5omgvfcv7zl7cdvuRxPDKcT/a7l/uULM57wF2HPyxLZ9T0nfyTmxFscf+r25mTqehuGC9575mni8RN1rkQ83W7H/tDTeKWNFVFApipxxpRIMfHwyRseH0986ctfJMfCmI60hwkfan2wOZmLU3R9Vi6CYqZUV0hL891qrNXhVnVRDXw12FpZBDY99TymhgDW31I+PXfL1Jo1cyKrgNPG9pTnHs4CSApD03G765Val/Im8K/gyPX3lsKipuxDy+HVS0qceHjzYJRzb86ireMq4OQqAFXIybJ0c0Tahn7fk1LizdsjN7fCzaFXRd+8Fn1WtsmcMlO2nmdm7t2uw3cd59OJOCe6/QFpe37+/i+Yjid6U4HMaabb9aqcOEfmlKzfnfbgCqAtgQ49x8vEeYr4RsWl0jhzOg0qRpDzojYLm/HNmgsoThZKnQhKEbYm6MMU2fcB3wbiFBdwUyRpLblAI9YTtBSmQRWSb1/fKJBkdiOVjMtu3Z82/wgUr3QrQTOJoWuJ80zfH/it3/4NRDx/81ff4+HxCecc/a4hzgq65KIiX+KgawM5qdjGOE5aN9M0hKDKrpRC3/b4JjCNA5dYeNl8gZ2/5fd2/wwnfg0qqmu2mcvr1fXsvLL3OQNDnROmOWrtWn2NAa1Cxm2Niw18Dp54d+DpnVtyVQU2Z371h6yowHvmvsXNWlvmfc12OYSoAKpbFR9FoIQAacKHhjmOgMrA59pbdXM/D+V9fjb/Gd9o/2jd306ztLmA+IIr1U7UcTBAORdtCf78CHiupLg5P66uDebkBP758b+4SmQC0Lir13ftP9c9vKGuVuTnP87/ATKur5XN+ypwVIB/dfhTzlwoaf3dL5pfPLtljZ6WRkUi17ZXllN3fQglOCzgV8kZeTzz6u0FEVEl86jlKAqm1x7DKnSiJeKroqVz3lqGCfNotN8WUox437K/2VFy4ul4Yhwtw2a+5HKbIps1omfDNM5L4rgJToNGpzWvY8ykqOus75TyOI+RadL1F2dlP/VdQxlnzqdxOZ8LmtVVMb7A7vaAE8dlnJhMIOdXXZ+zoE2vYgeo/uPqF/qnoZNLLl5YKQ7PXv6Zj1801W5vXWh/n2VYf12Q9lmXVAepSqOWtbZiWbpFVbhynq/vx4yQ1lOoI18bXV/xb2UNhBaxBmTj+GG/c0tjzdnU+ygwj9o2oTYGVIddle7mqTDntCBymq41oRIL9LRySCBnslh3egu64rw2EK9j4e0+VFFQs3K51HozCy6rA4RmUfSqAUpemkjXk6Y2aPZBA6cUoxnQlZJZRGgbNfqh1Sayl5OqZ5E1sNrtOnxomGY1wsF7ppRQxobSXLuuxXnHPM+Q9Xlcq9mIUrQAmKKb0TnNDlSHohQtaO76Ftd5XH9D851vc/qzv6zLkVLgId/w/fQbzNLhSzJpciilFqhW570YwmnNLTfz/fAIP3tfePlOi1CQCeZ5YDhdKAjnp0dEhEvjtR+eV1W+ugb0ftbPK/XmRAje05hMtGalG3adouqtDzwez+p05qhrQmTNaJftAbkG5yJi6zNQKNpWIcarvVdKYRwGvpx/QJ4SH8WdBWgVwCiLMqTFr9TG4fV5xHn6feB8Ui65F+F23/N4Htc5MrpYE7Vua57nDXCictEp6doeh2mlsNq9atY4PTs0FOHuugZxgXdu4LBTakSOibFEGLcB3ub8WEfs6mzY1v3WPXH1Fikg3vaN/kJEKPOZ4YMf8PjmI4bTmeADh5sDbdcSmnY5d6oy4kM+Mk+6Bodh5IMPPuLly9d0bcM8z4QQuHt5x8PbJ6Zx5nS8gDhevX6p0t4UfuJafnPX0/Y9MZ74K9crjcyQ05QSp9O8ZjdyhlzodoHdrqPrVKCJOS5tWpxzuODZ9T0lRuZGBTTe/8lP2R8OeCfktz/BvfwGElpA1jYnn+EV5RoH5XoG271QqbR1DaxZtIoWV0DAvI/lO8SecRX0ubYtNU6owdES6NtZmhf0Wq72zvPrze6WF5dHQhNUCMrW3PpFqmScSzJF3EwSR9f3vL7rebwcVdijmBqqKeMKaB+4pU7M2RpXMajqJPVdC0V7DFJgmka43fHB/o53Hj6mMgO8cypBT6KdR87imWLENw23fU/KiePTkVTgdr/j4e1bLqez5ihzwjcN8xjp9x1jMatg53spKPXZOQXiRNi1gRF4nGeaWXD9hJvm1W7WOavzIlClmCVnYi5EIJTCZZisxlvXXb/rSKUgoaGpnxKCqjV6rzTNcaKAUVGhbXsocLlovzlnTcO9yasns4+UgqTMnPR5mr7TszQ4vvGN92jbnh9+/3scL2f6fU8QMfBC5yll2PtAahp1pIsyUrTRtVKOVWDCkQVOOYMXQtvysnmX3+n/CTfupVLlRMWVsmzc7o3fsV1j25+smTmhcR3vdt9mHP9CWR1hYtd3uq+oAasGS2HXLwIbucCwa3j7+obglAnjRYiuIO5ZyQwKJjy+2vP6PJCS0umcBc8xJWsgb2ei02BPWwkI2u7AgLymZRqGpY6uZpaFFbjYfnNtoq58QAtWpa4sC+IEawS+sauANii/FgVZn2tdmzUZlms7Dyn4hX2znQcb9avgqyz3svifcn0SLtV59oH1/f/h9O8vd1LPoH+b/4oslf6mD6qPvp57Ja93pUClfX7KZDv4SoGjP/Gj9kf4pwvd+28ZwRTF9Sx2JsFfNRbE7HztV1eKDlrOmThNJsRiZ7NztH3Hrtfs29PjWWn29iw18VEp3wrMmxCcBe3OaZsn75TOm3JhnJMx4tR37A8t5LV5veTCZdbPHy8zlEJMmTla32RTKEfWGQjOkaaJaZyZpvhrQrbPVdBWH6JcZXau0Amp2lju6j0VXb4ybfZxDUZ9fP5tcm0My+b/l5/VoIO/W/C2OFVAqZ2Kn9GZnifKrxzaGo9ZENb2QYvv57QGY6IbMDSeftfa71Xhphber0OwOnsxKXWsZM041PGqDkl1mELbUhrNVDmrn8Kcb+xwzNmaeVe0rLAgi1s6TC2YXWlbQknZ1A+90Xmug9XFIbVNU7NJpb5moz7pgseLHqZKUXSEtlGExjnaviVmQXIiTok0JwTHzd0Nj49PxFkPcxcCKSeapkGcmAoXNk5rA9uu62gbr+he8ErDc56cI17c4lQqiLCum1zAv/OScnOAop934At8rdHC+DrxfzqOHPPFUveZUvSAqj2C1MytbSEy5ar2uZiDF2Mm4eg8CFoYO88rz9iJYx4x1DmpwIqtmNVYXFOFRQSaQmgV0S9okfbxoqqmS/BEDeYLe4n8UfiANF4DCVvsJTSBptUmz6UUpG3+f+rerVeS7DoT+9a+RERmnlOnqrq6+sL7TSIpkSONRVAyBraswTzYMAYG/GLA/8zwi9/9Yhsw/OCxDcxIA3vG0hiCSFkSRUqUmuzqrsu5ZWZE7L2XH9Zae+/MU00ZfmoHwa5zTkbGZV/W9VvfOsGGi78ouPFlXjDPC5YcZIdZVpocHEq3djKYvSoqroLcmrSWwijeKKBRxywlrWnUegEz4lkVbikFd9e3QoBBsgeTYeNZYLQ3N3uMmxHPnm7x688KSpE1FoIwtE2hIEIKlsm5tj/Qou6w8ewO2xv9/qr7pds3RKhMrPV3ACUdMb/4CW4++RiHecE0RVxcXCIOY1W4vTwtDIVTORz2M8ZJIM0vP/kEF492GGIUIqFxwOXVBW5e32JZFhwPR9zd3uLRo0fw3uGjMOGbaY9hiPgLN2LvAsgrLC41aIk9rNOIekpc4Xn2WQ0MESHEAdvNBiVnHJ2DCxGH+wPub+8QhgjQK/hHH6rTZjdhGMzUTCUiD+qCf10MSfdc21tUu883Y6maXJqprVFmMpOJqhw+P4iaw2gHK1FAg0IWGKib7cYneku+7MNnqHIuZ9eX5sOP33mCX/A9DvevpW64MDYOoDFqo+5WHycsuR4M6VmZcwEcsN1tMI4R16+lznS7mzSYUfDy8jE+uHsDhhI/eImM34JwlwqOqsu2G4HmHfYHHPcHPHryGI6A5TgLGUeWIGOIA5aUMGICDQHrcRa4vkby06Jz6IWQimLAOAQccsIhF2wUVi91vy1rbRBqB9JSBJK2KzpeRdvnFABxs8E0DfDKMCiEYQVrlkAfISN6af3ilhWTk1qaeV5xf30DFyPmRcg3huhkNWn7GscsDqcTuJmwNXuQD0gFePe9d/H02Tv46G/+Bne3d8JA6wR267NkiCOU6IMZwzhU9FDIQt+f1lWcNa2lCdEhThHkPLbuMb6z+X1s/ZMaRIAZy2j6zJy2mmE+FR3d4pYsSKQJz4dv4pPNT1HAUnbgXJWhnLM8YxgRLy8kiDsfkeZV5GouKCSQMUlCn96MCJodF8dI7A6PZVmltYoGeQaFjVZ3igAOHrSIE+c9ae2TZDVRitSNZ9TsCHS+zPExNE4BoBUEVU83vmg52UN7onbjKDnGdm4v8d8mNWxuudpT3GRDDXjbekaVF3XE+jmETXKbt9N62naC/dUB+M30nRaIOn0duRS3mkc2B9JkiMlT1VlLnnFxTPjo1R9jVbhoWoQNNgyhOsrGGVCdOedqUEvek2sbJmjQ0WlAiblgnlfkdFZn1dna0TedU0rr0+dDQByk3vS4CqO3D1JCk0uBj5LQSMuKpTCmzYAQPUgTFusitXXOSZaOvHV1NXtWggUpZ/CakNdUmc9/1fG5cNrIES52I+ZFIiSAeO++aik5zIM2d4SIq5Ilb1quO985ZJQHDpuDwrNAraiYzrDI3fH/xmGz56vRYjV6+wwad969nW+foXvVGAM2FxsQGHe3BymuBWrUQYxNIQXwY8TLT15r35HuQXV1CHOdQPwiEdgTWLMSvWFozsmq+F2YwrboOwkjGxdIHZl+z5OQMuS1sYh5T9XxNuIHed8VXDRyAhIYB1FnfHc1OSSR2eiE7nWxHhlOIvRrkqwIeQ8fBPNu+GZmh2ka8fjpE8QYcH884PrlNUpKKDljWYCrq0vs9wcUJVYJmy02u0GcpZKRUkEMvvbimucVuSShfdXM55ql9w53i6QKLdYJ0FDcsNnAjaNkTwkY3A7vxA9hW5gBbNInIJrhKOP3/v2AE9oJjdqXIoYMw53UbHF3/6JZozkXpGytMGRvSHNwU7yof7eASf+ZOUQAYE02iboiay7Y7w8n0cfqJJWCggWP+QYHI6Eoll1ra2sYhqp4iSSD2ZzF/t0KcpZsai4J7aC6N0IMoDVVK5iL4Do4Z6nJ9G1N5lRwe5yRllRhdoAI6gLZgzFGHPaH2haglILDYcG0GbG92MCHgP3tHsejZO+YgXVZ8PvfvsR2IgQu4CpihYSl6cm+NgS1vq45W+3fE2VsRoOeavPTs+oKbKSryeKC9eM/x+2r11iWFZeXO1xc7NQAacaFZYzMGIkxIq1Js9Eb+EC4vzvg7uYO0zhid7GFDw677QRi4M3ra5RccH93hHMel5c7XTOS7T76hJUBKucSWe4dYsA4jaI4c8LxMOMiGHEACUyHWVqZjAGDBw5JICcxRGAr+/K4nxFDxPDyJ3Affk/H5bxOwBwjc/a59loDDOihv3T91upgnf1oxkirJVOlLIqqO4vq+iagtq+p89vtQ3tOrk6ls42jYyaS42eP3sOXr69VTrd6k3qdjq7dvnj96hVeLm9k55cMD6nP5Zxxd5jruzcDSTYuax3rMI4YxgFHjV6vy4zddkKJASVnbC/EwVlSQugMyj0cJH8msmV79QilsGTZCrC7uMT+7gbrKv3VnDas5pLgfMA6Lxi2Wxzu9mDt2MsAMkkPxKSQ+ZgLOEld2/1hPp22Xu6RjbHuRZsvIqTCIGQQeQQ1/GMIzQ5hnTGr7SKn13TwPmLUfmkoGfNKcLlVC6/riui99BhUuedKRpbwE+Ac4jgiMxDjgPffexeH/TU+/eQThHFEDF7avWvAcmRgTRnTFCQj46QpuyOpEYsxYE2DEG8ti5D6wCPEASF4XPhHuJre1fGU4OypndI5PACcb7XJ3I2luBE6liAQA+/Gr+DLm+/j5/hTlHKHw/09gC2GGKUVCxwCF1g/OQnqSKsfsUMssybzxJ190jZiq5d2JHbCfpZ1H4eIIXqd1lNqepluUui8GOzROZFTjuCgJRXHGT/Nf4LH8UO8E74o3/elc65Ud7LJdULpRQ6TQkwtmK/BFmrbktrWfnhU09actO57MDhz70Gro0Sup0Y6Oarl0byIGuSzmlw7sTpuBGEGB5SC//QBJbBeJNvW2bUnNcT67BtMeDxv8FHnUJljBu/hY9ASE7kWMZAoa2se6a1KJEFoAqF4uXDhIIRgBWqHJsQxgFZhAufc7ldKwdJHke3/CnE0dFvOBeN2xGYzVPhkmRNAkP5+SZFwqtusIXkcogxeKbXNi9m5RmKUEytJkPkPn7EG9PhcOG3OEYbtgDBGHPazRK/rSvGnRhwBIIsethRwwyD3GrVUR6xis0nx6gaf0tqIh37ZaRjCDM1/8F2YEUpRo73An30mTk5AzukkwuG1+HbaTvDOY54XHPdH7RliXelRizIBgYxdTQPCsAEnidznvFTjty4uRm2qXAgSvVIiCahw8l4WVE6NThkuCwkJkRamB8Efd0o4F679aiy7pjwnsqEqSw6E1ngQBysrnKb2sMpSR6J2tTQkhWRzQggo7CpRBxEhjgN4FohaLloDErwI6GFACBHLKn2JhjjCB99kiJPI5MWjS2Qu8JD2AxbpAEyIs8BUFCLhiCQL1K+OXATT7cQYc+QQ3Ihvjr+L5+GbGqkGXkwv8ffulzD5RCMBEyFor7m/Sff4o/QSIXgsc4ancrbeRCh752BEWxYRsueRhvMejgooL2AizMdUI6sPs13yuzPmIrLsqFdogKT8gza6F/iVRqE0cs6FkZQ4qGgBsPUKDOWAJRxRSgeVRXPYpnHA4AlrEfx6GKJSL3uB5XHv4MnzPqYFH2HTYfrJZKwEAL3XHngGLdVmqU7qHkkfgBwhOod7JeCo9ymMZS0IziPlpM5Qm4WSM/b3RwTKiJhxOQJTCFiLx3YgfPs5Y+tWUBKDqo+aiciy+rP6F5lbse7rO9XvUMvewxy46tC196+OgsxEPR/MKMs99nf3OBxmbLZbddiUZKJ3JJnRDzoXUUQxBoQhgiDZlfs7ae3ADFxeXcIRYXexBZeM6zd3SOuKu9t7hOCw22zqnDrvQNkMpSZPSynqlBUE57B9fAkw4/b6FoeDxzQpRJKhzo70gLQAk8muOAiDocDAZkybFXE9gsYtLOgBNbOqg0teIvkaTGnGD9AJQhg5Vj9r52uwjiFasOMcSibZnoLejHob/LGGUojAxcyyBo12xDLLDGTnEaLsU9uDD68mTy6Zloybg9D8k5emu7soe+P+sNR3KAUgH7rWI2JQx3HE5cUWh3nB8X4PQGD0+8OCOI04LivcxVYgsfd7JDBCzqAgRphneYZhM2K73WCeZ6zHGZvdBOcK7u73ggjRWmvkgrysmC5GLMcDxu0GYRzBR3HGMrM0u4UwRlYjqQjsL7i1Gwa16gugxYayC7oieNsTnoDMAt0bN4PUZgUHTgUFhKz63BFrr0NgLQUpLQiD6Ju0LNjstiAiyf4UNQRByCljGAJQGAUFvrAECJjhhwgmh7RmPH33CpvtDn/3058gFWCMrjJBkwY8nfOIAZoR40o0Ys42CAjRYes9jiT6YM0Z83HGsHuE39r8p7LufXM62pqtha6VdEKM8rb+TfKcODEK4fPw+Ob4e2Bk/L37MZZlRi5FA3AFXJI6b6dETFllN5cC9mIHVk+BDP3DVZcxASk4OHbt2dRwFpNJvsuVAEUzdGbLKDu39CCFslJztTnIF7U9Zc048hJSYXEcSlNU0OVQHR6CyYtmERoaq8qbB16VBt5Y12qVCmSAAbmO6lPRbZ2H1a5sVmxzp+tpVB3tNu2ETm1VDVEDUt1VrQ4Tal+KXKdO77W/E/pz7DOZD++dsK2TkuRASk2GYAkGAE7sw1KkBQdTezYiwJOwiW40i5s4IWWGj1Gh+DPKmmqT7Pp+GoQZv/whNr/2NYCAu3/7p0ivr0GsMGbvsN0OKLngcMyYNgO890jHJCUvzmM5LlhSVhh1QIgRefRK+iTwdKfIHO+kXnRZFsS8YNCSGteRSX3W8blw2rgog54zT72l5EnhD2cOPYBmyDFphRvhJOpfiCDbKiOVgtG3aCmzFlSz4FSJzBAFSnH1nOaEt8X2q2oM7Djff4DoiaTCyjkpeJZsQ5B6Jx+wrEkLstd2rS6yakIqZ8bhMMP5exAEZmaQRSJC1E2jZV9CJdwNpNRkkPSwICvolQ1o88B6T3IawVUrxSsTpWUU1zV1DhvXn0P0ADmUo9Cd5yQNjuMQ4GNAXsXA8MFJWpks/Q81GmWTO+8QiOGK/OyDx7oKnBHDoAWnAcMYEEKEj16iS6oQSgHGcRA6WxJigJwz1uOCzAwr/ezhnUIsAjH0dc4NntH34cipwAcSaCWJEnjiv4Dn4Zui5EhFFZEa7PLdm8e3eP3sGu/84gkajE8caSkqFyfQVxkv85GU1hpo5Dm9Yz5uCZeXwKL1auNAeP+DEZ9+EgQWmE8hqd5rNEvH1TJdp0QeKiidEGb44GsNmdWBGYmO9W/iUhCRkNCgkQUo1pcAACAASURBVCfRQRJI0LqKEyzCOnSMet1e6pTQPx1/ib/aX+ColMR22LPU+k71DErJtfF180dkvNKZIwldMzkxEq/Iy4L3LzLS6PDqKPBF5zw8ZXznfYcPHxUwPDICXuwDvnRl4+pAWrlvWfEWicTJc8vvLYN+nlkzZUwSUpSf7V9CJ8/sCwK6sT/mwxscX/wE97f3GGLAbrep7FS1ELw+iBoG5kzpvI7TKHubARcCtrstUkrY7w8YpxGbzQQiwqNHFygM3FzfYl1WXL++xYecwFG+9ywSfkm+lfDpvPngsbvYgcDYXGwRYgQKY9pscdwfQRCopmXbHAkhT01AUQMjheix2U64vd1jPs4YX/8M+OA3TxbSCQybuRqiLbvWBzdKtwCrOYomHN/ieFUjy+oD231PhloNvmY8vf0wiDhpGoFZ7XFl/IUTQyfEgGWe0bLhTVlWZ1+Nq7/HArcdMI5RnAmCBBKHAbGZcQIpG4IEYyB7dNqOWNaEw/1emlsrTPZwkGbmxEDKGZvNhON+LyRTJBmkaz/AwcNFj6fPn8E54Hg4oHDBdrfFuqxY50X6yXkPIgkaMgjrukjN67Iibib5e0oSKA0CY6ryhRsKJ47CjmkbRvZSq4mR0ac2WmwGpiJJvMMQB8kaqz4Mtb2KwPgtcJzXGX4Y4AqQFqn7HIZYSbOsj5PB/420B+uKhTU/HgPIB2QNWF5dXSKvC968vpFWPbLgdAk2p4VsX5cC5/X9qck4kyWbzRZEM8pxxvG4YAgJuLThsfXSHH3WzUGE6vDYQrfAsNkTbQPYOm+Igm9N/wQMwkf4M4G5L6uc6R3o6pEECWogUha5lDoos6aSmslnaBm4ItDDQsDN5YirTxcwE2IMyKXgsD+ilAGDUavr86V1lT52lvlnQaYE7zUXLHs3JcY4TXg8PMfGXdY6YVlerq4Vg/Dp9JwE+/ogTA0Qwp/atyp6m/y3zG2TEXacjjTaeWcMfL0mZaBCMSUL1xxtZvuu7pPqXPX1uXxyreoUUntGcyDbvZvMMzldETcQkpHdbsJyXHCYV9HfTslr9DnELtcgkhO971QHZi2F8ZqACUqml4xTAVJPW2HvFkXXl6h6dxyqw0ZEuPjt72L/r/4tps0gLXtWY1V3CK5U2/e4rCjHRYeMxFkLQoS27ga8eXqJzBlJ23/trve4vD0CRAhe+BLWpSDuImIMYi+/BYnSH58Lp41ICnZLYQSl5qW6EErnpMnCqTU9pcPIfqbKk6U/ah1FfxZB4h6jl7BFgUURTusAWtNOW8hn7EI4u+g/cFjkwGn/iGEckYzWfBEst9CSkxQDq8I1HdM7VXd3e6G4jxEpN0cvqXL23msvjW7DlYe0olbYKUpJnBU4Qkncfoc4A4QiG0Hpo51mFGoRqEWeVXk7L/UQNfsByWwNozDRcWH4EDBEISohVcz3d3s4jc4IsYdDYsZikKgYsdlMAttUIhqBDnIVIMsi9M9ZncwGSZXJsgarwQmmHRD6X2MxHKdBm54SCjt4KsqMpjVmnqqAiDHCh4Cb9AJ/fvyX+FL8PjZ0Vee8d15KMRihXCeXUp0fMZY7QpU2cQBbvVtz2NBde7fzeO89iUo6R7h4FPHVrzrc3Gg9FxeBFamSr3VVrOOlVPWkWqeSeTBjXVfktQnDUjJyEshmYak/qE/LRSB/VYGfZqyIJMOGIo2LSSOpKa3IuatTPQuQ9AGM3uFyJA3RpaG21ZhJAX7JBc4V5BTaHOQELipgSQ0p05x5xThFfO09h28+J5Af8eKepOE2EYYAvHdhiphRmPDVSWC51fERVddFLOns3/6d2rudQyHrO1PnsNkYfKbMs4ALI9++wP3tLbhkTBdbzVoDpM2hz+NPdQwArEoGZPV4Yvd6hNFh2kzS0PfuXohMvEd49AEeP3Io/Oe4vb7FPC94+uk19k8eIa0F76YFUxlw4P69gGEccfX4UXtPNV7GcUBOK477g8BdQCpPnMSrW0QNIHNuBLrn9zMOhyN2jxLc3Sdwu2ed8XPe+1PNrmowUpWHAMlarnVe6vh03vL5GjUjty14VoOlu+ZZEOGz1QabtVdntt6NpDj/2f0bcJaC93Ozqi2Jrr6ExBmL2w0uLjYoKWFJ0j7Dp6LGPVf0hMDtCJmBaRQEw/3tAYOXLEApGcRe2srkFTF63B6O+GjaYRtuQCnDO8LBBfwijMKm+PgSu8tL7O/vcfvmDsdlwaPCuLvbg3PBIUmtrdXwABm0JuwutyCIk0pB+ozlwgJlUhQIgzFp/ciyrELmUfuAKUsnAacFlW0+AUayj5w0Yhb9RwrJEgORihhjjoQZcVmTOAZKNV6YJUus18opSWPwCqES5mjyDpZUBIkeWpUF8dHTHcbNhP3+gJQLovNdhMlM7mYwF2atKzE5V1Bsv0FLQRxJEDMXLMuCu/s7/PTiT/CN7T+GIx1vFoiksXRbPSCcZnmLXrEGaU2vNTIfk1DFQgCF8Y34Q2z4CV6kn+Kj5f8W2T0NCB+8i3UVWyCnDBTRkSllzQgLRJUISpZTquMKACga7K1EFJJ591r+cLw/YA1O5b3MY0oZLgbNpHiQy8jrCj9IDy5mIFktOAHv+C9h5552oRDUz87371tZ28n2uzkxaFfiPvtlsqPpjHMnsN2XTlUObJ8/dN7a7eqKAdV/1WqyAEAX4KrXoJb0EFIQC+30z8gPvmdELoCsIRkDte8pSJaySOLEkTheMYaOeA5qz3ALUrDYLwLfZhQSJz2Q2GNik0htWz9s5BQuqnLCnrWfCkADNUNEiEJQktMR82HBZjchDEI6dDyqrR681LR6wuR2+Nr4A/w8/h1+dvka2exqiMN+/3gHOIfHNwcwqc08H1GmET5GML0N9Xd6fD6cNifeNKEAwWNyXvqHKGuQHZbGtNqDk5cjangVQD7UE0v3JzvmlBHUQx+8Uc4LY9SpcQmI8UUPbvEZb/MrP+nfxyj293f3tZ+aQSWHaYQfJ+Rlroo26+K3iE4z+kVYl5wRnEAHQZLZCkMUNirNduHMMLRebyFGjJsJTB5lOYpg32xAIOz3e6RlQYyyiPOaKw0/OamDaPTtvUOdUTJjHALiNICKLHCJJgCBCobdhPuDNLh2juBKweCdMGPFAQbwZb2mU0OcNCKSc1HEhGLaASCt6sygRrJzKUhLQikZ4zRovylozYFQQI9DQM5Q1qJQG26aoWZKj8iDWFh+vHdg75SqVZjLDv4WiX+C+/UFuDg8Cs/x1P9GNRZkHbRsrrM50WkRxWf9n5rhxxDjxJg1xehuDg6RwE4dokSqtAYyZWEqRcnayBGoxBcaIbXMYq3JLLkxqrLc3RhBmbUnm2IST5MMp2Zjcwy03sAJW2SMEeuy4rZ4/A/zl/Bb8TW+u94oicipIdzWq9UstP4zuqTred47zGvCui5nbQasZYRdR6Nw2ni+sMRMnRMY0TfedfjWM4cYxfn9UmRxpAF5Z6sUIKNFMqXbM0P1Cu9ULpwrxZNMm73UWxy1+uIsjsBpEELXT7XpJBhwPCxaVB01IAbdr/YlNc67AJ8YTKuwxyqE2Vd7kTBtJhz3B8zzgnVeELYb8PgIftjhyZcdyk//FHe391jmFW9eX2OYxtbsPEhGVSDHrmbqqI6bHM47bLZb3N/f4/b2HrvdDuZb2Zry3ohF1AhQyvMhBhyOC/K6IBxvgN0z+S49dFQ1XI8K/akwymaQtFP5M+KD1Zyp55EaUHx2Rv8ba+2rZeNFwTfn0Q4ZGa6BO3lOxrv3r/Hkzce4fnODtCzdUzcn0wIZBRGeREYF7xEGCVogeNDq4X2C90XlqzLqkUIOnccQAkpOONzciYHkCaXIPXMpGIaI+/sD3Dhgf5zxevcI711tsaaC4ANiHPDuo+eYpgHOSXOM25tbHOYZqTAO84r5/l4aZcMwCYKIAQAqwM3tARcXO+SUMASPWUdyTVozyoxAUkPJuWC/Fow736xoGxqNJ/WZUJNzgKLwSIw8CShpYlen35uRqn39lnmRDDGRkhqtmKZRSUukgXNW6KqhFkTOSRDQsBsM4Kg9w3YXogNzKji+eqWBXDuvM0bbH5ELIzjZ9zVYWxq81Q7HQAwehQPWOeEvXv8hHAFf2/zjU9nb3c/kWgvsqH7STBfZ4tTxo7ONVpiBzHjP/xoebz/AF+P3JFA4JPxteY3jYZbsWhEd+KXjF/CPXn8XRIS9u8cfPvo/xLmy2p+6hyB7lgmP6Qt46o74mP8a07QBsRBMzccZKSXMaVWZQfjO7g9wFZ/hJr/AXy1/BCLJvlkvT6Nt9N7LvLE4i+qPiqy3oIq9+meYf3W0ehnHpGWqBqfmzp2ya5leeKhD6hzJhU8shV4h9DVvp9rZoJmSVbRnI2VntOdpLlfTU9WOtNfvdF4DyMrdnOvufPYODCmByUWJVpSZ1kjgqv+pexFcQIWUMESQU2CBGwZFAond0eQvgRCCw2aKIKfB3CXhbUfVx0ZatmY4TSYc5gV3N3tldgfIOzy7/DJ+8+IPJLFDBAePLT3CM3wd31mOgHIP1DgLgHm4wY/j/1i5HErKahtJS5nyQEGdHp8Lp+1qKfhPPi7InIFVNtnNZsAfvxsAaPaBW5G4RFikdqfXoRVHy9DMclsggYCkRunoCC545D77gSaUa72Ds15SrPex650aF9BbbfcLLm4OwPPPftdKf8qMZZ4VziIZsTgNGgUqUsDoHOB9ZffzWvDlQCjENSLN5FCKU1YqbZwsODNROrrIXce8aIdt5pIz0poRIqsSLIg513vmVEBIcFF6SyRlrIxDQC4OnOVnwGE+HMU4ZILzAtW8cg48BKylYAUjxKiFmh7DZmMBTjE0F4GzZu4EBzcqVmOHLKVgnmeA5DNLp6d1RVqTQNkGwRazMpEFT4hKb55LhoOAIHJmpCVhGKOwUjqB/JAj6QsVfK0NkwkXKC2RwxA9pmlASrkWdjvnsJQ75JKxX1/j9eEOPn8BtQ2EwQZZ7QfWmhri0zqoTjMzuLZJOK1Paw5z0QhYCA6smVAiiTxCafiXJYGch4OwJ2IIABM8Oc04iSROte9Tb6hSXY8GTznXJfZohc6dL90DRAhDwLquuC0BL/KI/zk/xwUf8QV3f3IN8U/4RGH9l9Nf478+fOPkfsyMZU0IRXq9GaFRP0apdM9WGPOyYAgOm8nD7aSlw9XO4x+9l9p45lMmx2YQt4DOCWLIIp1nxBfmlPTv0jujgBYvE9XsJyrMEuhlTlOS1dzvHPi2HlAyluOMkhOGcVP3/2ldAQDyYM7mDQBAhWdP06AMXgRrzg1A69wGLHd7HJcFm/e+BjddyLNdvounX/4uyk//DIc3t5iOC1gZ2cgLFNqo2Z1zGCtJgCIQqphi+EAYpwk3b25BuIMLDj4L/be3gEq3DiTYIcEhLgXZYObdPqrMjDr2Mhaurns7r0ICCeIxwJ/M38MssBnT5gSf1iqeWNdqiTh36mSby2amzykhgEyaOXaP7l5j9+Lv8Pr2Hqux7T14JkNl6DoBgyD1E6Uw5nkRaKUP8M6BedW9n+WV64pnxEC4vdtju9vCU8GyiIGUC4MhjLsWAFmXjI8z8OzqOb6Wj/o0HillvLq+x9PHV1gP97i5vhWkAjlspoDlXomCstVJO/jQ1p1zhGVZpHVMLhg3EfNR5lgCl9InMxLhrjBSWtDMYR3KnkFBa9tqdrXX6ySsl1HJEBhtrhKAXICUGGk9aFBDdPWyrvAaiJ6XBakwhhiR5hUrCKo4AEiQzcdgqwKpCD14iAM2mw0oJ8zHGTf397KeIYatQfahawUgRYBoDXaSoKIFR4QPyxxXhX56j1gKSizYHw/4i9t/jYk3eC9+E8rxXBnvAILjXO0NMdpb4IEUJkm6b6hb7nX1E0AhwAEI/grbKCQ0h7jHL8I1llUye0K9H3EZd3gHT0BMeFae4MPX74Ocwx/t/g1e0ktYs/RSxDG9yDv84PYHcBH4Zvwn+JPlvwNRQWSGG0ZgYOzcY3xv+x/r2Ai9fKEVQx7AMSOlBWtKGNyA4AluGkAgPPdfx5fC9wHOqC14CNWpYHVajAKqyqVON5t4bTbE6R7tqdKpbffO0XuoQ3qZdeqmtdCPuJc1lybOon2/6hTXTRJqC61emrwt+3biHJ4HJk+p39vpTCDr34tSbQ5SZzkEW1/2fvo2OpZes6AlM5wXAqthiIICAmoCpLkDMkZh8NgEqT0rOSPntoeYGbwkYAiaFCk4HmapZ6VB17ckOdg7xDHi8fY5fnjxnyO40L2frMed/g9AhfHLfYA9PMa4w5L2cLxg1ZKX0Vlrp/8fOG3EQs1PDMzHBcuSMO0XfIFH3D8vgG8sW00AtAzM+WFqjiUsD3aEVf94VOIIJgu2Ce1qNYm6TSby9dSAJki6v8cvA8BmP+PxqzvQu+8KdMO+r1/35ISNyp8ac0HZ0aTXGGC1Tfv7A8jNku4vBc5BYYYagWRWKlGpP0rLjKgKhpiF4r4wlrLWWrLMjdSkN/ShY5XWBVyEu3YYB2w3A2bFnQ8bIfNIa8IQg/RzOy4Vpjhsg2ByAbgwAGWVptfeI0aH5AB2ArOKqnBkY5xmVsRPa78TGK7rWVSUHdAYpEgxwKUULLMorDBGhEEyObzKddZlBZFAsYT9MUtjYSJIT6uieH2PaQyYSWCgpOyPfZLdB2G29Jqet4xGCMpAtRhEpIDYwWPARJfIRMgsBoAjwTSPg6yVjfOIwaCssoqthYApwN78a6u87QIJqgoDqBjEVIWAMCapUoTACT1Y4SRB/TKBeGbvMB9mnEMv+3t1W+WtMqZ9x2oq6OQzgVkC/+3hS2AU5VZ8+/XOHUN/htuv9TulNGPDIoJAi4CywGCc7rcPn0S8cxXx1cdJGg6DQZRxwjBW79s5n1X9nRvjp4rtfI+dKjVp6UCdoyY90iDKy7lOHvXqmDtF3ozROlf6HWYg3fwS8/0bOCLEnj5fHbcaLQVg0JeUpAY0pVXlS9A1jpNoL5FXDL7WqqpONaNkePw+Hn9wgx+lhN+9+ViZcxnDFBSWZgpYxjrnVq9ScptvqdNlOA8JNqxCzBHicDpHDMl6ECRYozCo1BWdN0OoPqjMRBeQgDp+8qt4j6X+TV/yZP21NSDTXTTS267fhr2tR4O3F7ZZPDWy7CecsLXKv5vjHm4+4PGLn+P2bkZ+a9RYAmMxhioPjGGPvAS4LLsj0GIHZoc1M5AlM7dwxjKvCMEhRpL6tFywrLM4BrkFVDkXzMcZnLPIDgDXN3f45QBsjzfAuuIWAbcXT/DoybsAM968vlaIkQ59FmoI60sl3Ati0FTDlRx8Ye0pJ7JymQVyKKQRQHaEu2XFcV5l/XgSdmkd1Aor7+ZcHA6DX7V9653oMJsvFZRgSHDtuN9jTgXDEJD3BxBL0132Hnd3B6BkxGnEssxgYvggAc6gLJQpZYxDVGcXWFkyHRcXW1iLkjcvX+D1mxsMw4DYNaHOde3o5uRca8yMrY5MVugzW/yN7P2Cx0DSKnJeVqw6hsaQLPTp6tSSGZVZnTOG8Z3a3tIVBetv2C1HzVi168vfAI4TLpdLvPY3FQkwTiM204hplLZGXCQITCD8/v531Tlv+8Lkj4u6lxzh98b/QiF8VNeUzLg5JGLNPeMv4tv+P8RfHf4QqVxr4Ehk8jvxA2z8Jb49/UfWuu3UHiBSNnK7LqkG0lyo66noekmO+v26Ad5Wj0Ynv+nfOplvf+P6H1iG6cH3Ou/u1D2EboNWywoSlkjuril3MNn94E3Onr3dpL+GXbvKWEabR1LEna6zprfP7mYBkpIBctI3dEmY51kCQFpi0p5F30yZYSTo7JoPwQyeF+x/9JfYfOurKMuKcZDkRM4Fx/0i0+4chsFrv+EBP7j4z+ApqM0uSQFCAbnWE9De0eD3hYERO3wj/i7+bPkXVVekJSFuQi3F+lXH58JpK8xVAM9ZGlsyF2xvZix3C/jJdHJ+Lcx+cCVCo1mWQS7OA6M0A3ZcBOKBxiYJJjircSg2iYr97nc6mrIV+0+EzrhfENeEi5sDSBljwmYE+QAyp08X6QggvrqGCwFgKeAOQ5RFo1BHK5gkGRgAQlkbR2FFlAaABakUpGWVqGQRGtM4BjgvTbQzszArZmGIDNE/qMNr0DqqRp+wsTlcXl6AopcFPA5C3LEklJSxMmEYA9xmFCd7XpDWFTEEqTdxDnEYVbgwUmasqYBZHTR0BpTWdskmk/HPWeq7CFLjGLyyQk4T7m73lRXLoD+ukLCAKitYXhPiNCFuPY6HWXr+MDAOEpEBEdYsMKA4BMlKsbAlmiwJMSjTUEbKBTllNdO5wj5Sag1/zQlNqxj9MXqFvxKu3FN8OH4bP/cf1+bLwUt/EGdKBZI9KEoKgrq+O+Oim7cqzepnunD0ObhoB5hqa+pnhAazIVEteRbnO3iPzAXzslYoG+tYn2e73nZU50mf6Pvh5YNziASa1eryQjVX7XPgVOGf3zaA8d1wgx+lqxPniFnrRL3QAR8PxxODl0vGtJ3w/pXDs4uAb77rAGKsCWeNQU73yen4s8qG9vdzp/T0XU6dtUqy4jTAoVF3p0qOyPR4B9tCP8NOx+dhtrXJMF2TRdetM0YunMxj73ISiRGcVqm7WdcVw2ZXg0m9oUIQUy1onU/OBXn/BmH3VAI2+qzj1XNc7a8RD58qKYOvjmFzOkVpl2WtRqXchMEKkypa+ygJEgI5qVPMqryJZB57HMopTFuemtGPWctYWWTE6tZOxt0CRN1Ris3rgylHncTuHqhOgjnJ9Jadq5/rz3yyv+VaRMDl/gZPXvw95tt73B3FWbEcUffQGMcRu8udtCZZs86ZZpCdyJiiDqHA+RZhdtQenigFKwSxsNns4IiwJqVlVwiPPbe090CltE5pBTmP/f0BPzoQwpLwLM84RmC72eFiO+Dm+gZvXt9UUidiqWeblwSTfkHJb0q31p2TPSPwJsvIdftAR+5aWTCJZO3Hr3yhOwcA9xkhc95P147pF7j2WZ073WspZ6lvOUg/Jmh9L5TMyQE4zhL4DDHA+QAPOcdrjICJ4KcR67wCOWOj/e+IWAlI7jGvwjSZi1MQkTA6m9Nv+keyQDpehVC86JUm71H3CEgdKQAhiA7+ZPlrvBu+iIBt3QaF+/Us+0XsoIKCooHTzhg3R6QKM80+M4NrsJt0XxQMacD7v3iGN1+61e1HmNYRj26vEIJQppdcAHZdDy1gpFMHjGHOUq8Z217t/Jr6uQVFPwy/jhADbjevAJY1Ro7wYfw2Jrezx6rXrC687uf+M7t+Lx+qzdP9bsFZlejdHu5qausXuDrcOhPdPNLJefI90xNtrG0N9P/2IqxLiTz4Ww+67t/kRO701zgXXw/ObUiCWhcJHfcqQu1Zz+xW1Qu5FHh4rOuK+bhiTVpiBGgPReUK0PZQ5/ryXD/z4Yjjj3+CfHOHtBmlIXuQ4LnX2laof1F7+OaMxVpolALvCHFycM7DuQxp4KcWHikRDDMiBwTvsDoHUEFeF/Ck556RwJ0fnwunjRk4aFQsp9aT48ki2ZO9YmZ6wfO2Ok8ANbICiM/jzSju61DQKWMVKqYUmWRRQB3JXiGYQrt6eQdXhKzELwU+GfqeMX/0AukrH2J88liEdl2B3IzK6DGEiKxRKqkrK9pU2CIBskXiEPT/UQycLPRpw+BRknj3ac2IwWGIQYRILkipVBhdVMikNViWReGAnHWROIEuaWbGETAMAxYIvnyZFy2gFjhdzoRlZoRhwDAEHPZHrEmiNIEIKJLZM2NFnFEWpyRnrV+zpolCG79q4TkXrrVkKWUsSZ2PAgw+YBgjlsOM6asfYnz3Kfb/14/BEHiVD+2dCzPGUYreb29nOCStX5J5jIqbDkpDzVqbwCyEG8yoPTqsfwZpNNQ8a9aIo2gVOXeeF3gGBjcprMdo+kXJWkDh8uYCj68fSdbNEXzRf1kgG3/6pwW/9VttlTPbGmv/1RV/8vubNwU//3nG174eWhQnZRz2RzVSOjpkmO2R656ACtFzCKYd1nfr/HhIMw58P7w+O4frPioA/rflvaZE6hOdX/ctThsxvhuu8aN0dXJfi7YKTJeEOCWl+szeE96/8vj1ZxmbOON4PM/I0Zm2tb+3e5vR/TZH7e0ZNTl64h8zJKuiP4ucnh5NOXJhKUYpXTbHDgs46XUYrfk4ujE8yQSevYGtXy4Z02aDURsJt/UiF7L7Wt++kgvW/TWGvIBDy4DRdIU4TfjL8RLfyG/gY4D1zrP75ZRwe3sPYxYLMWDcThJcKwXruqrTGbDO0j/M+whnGQdqMC15f2oLgRpsS2+o9zaH2Pa1fVfNPvt+m7CzwdYh5w6y383Hg8qRk3NOgx/NPTM40Nna0Wce1wXP33yMdHOLw+2tMN9lMdTREWMRAdN2wsWjLYKP2N/vUbhzEk8g8g1aa8QjzlOtLRFVUnB7fSvQXZXLUxyxUUN6WRd1YgX2vS6rMOo6qXPzYcS7Tza4Ot7icDdjmY+4fnONTz+RnoHCRCuK9/7ugJyTOjcOJQsyJZJkaJyTfmMp5c6INESBA7zNtRm4VtPnMX7xfcxa94bO4YRe423zCzTocy4sLVV0tgqz1EzWPq/6d13bIUjtfckFcZTArCMNyJESejkhYNjvj3Be5HUcRmy2kwQ4mCVbBlJEhMxZ6xEnTb1lryv5SWFl2uMK658GBynjkjmt9azc3j+EgCXP+Hj5Gb4+/QAXtH0wGg+MXpizoeUbBGQbI9Vzzgh4SLL1nFYwBbCWd8iwsdRbQ4KE5DziccDjmwtYDwep+/a1hr/WeUH6uFJtIq9ytZNvVc7x2Tt0wbfCjA/9r4GnU/nda1cTH0ZkAaqgQmEC5+47nV4Bmr16srsVLcSuBQKMSMdx0ZYeJpxOSxFO1U8nydWpYNi1xGFuTi3Q2HbbOFSP0PSDrj1bI7ZSHh7U3u8k3NxycAAAIABJREFU4PoWr61+o9dppRKHyfpstWyfZedIqYyU7uS8Yl3ls6EmQFrtumMnvQyzkKaJfdNl2LjZdulWSjOICMf9AdZrLcQghCTe1zIjBwlO5X3CmpK8lTZot2A9kdcgiqoQUC1psvvYWi0lV+eV6DQ7e358Ppw2AuY5Y10lwt+K0oF3fvR3mK8ukDdjfcnKnsj9trSLQaMK6n3Z5qKOkEQtHrnGQ2OTuYAzndSyAAAXxtWrW0z75aSY2NK73jvk+3uUeUWBkjhVWEZjA1yTLFQ/UIVAQOEeORcgQzujC2HH8TjLazjt9ZKyLAzvqmP65OklQE4gKtoQ0vqwiZA2Eg3ptcQgHO727eVIU/uOhKCFCGkuig9uQso2dcoFaX+E95I5ycTot1oSwDCKZp+MsdIcYVfJDaTuLTBjLQWMAmTGODlMU8Td3YwlZTDPAjWJQTbHxQ7uncf12RiCUQ/enCltiDhGXD4VFikfAggKuXJi/LG+i8x7rguEiJBzQkkaMCCp1Yg6pQzLWPmaPTN4jtEIW688r3C3PoIyrAHTMpxIc9J16sjh1atm3AKo7I5NQDcB1iuH45Fxd8fSR0co9rTwdv0Mp+BkCbzddumO0xq1Xoi3Z+obQJ9DBAExBJ33+OHwKf4sXSEz8IP4Eh/6fbW3T+8p73HuDFl2qQV2M1JqTug4BPjNWFsa/PCLK8a4IDhI76u3RbQIqNTHFlek7sOT3x86aJ/luJH+jRR2YxaAKChdv1poXaoCeyiDxGFrjoUMjdUd0qmaM6UA1HYd/QM5cy/UwGEGpmkSOOUwKM2/7GtvhpD5P7q/gKK1IUUf7xSWAgDXThjCci5I3DlGEEU2DBHjNGEcPB49usAwbVGytBQ4HmUeRA4uYEjmPqWEIQ5nDpGtBYGrGMmDfSh1rHJ/AUBwfe9mmL1lA5w4AqSGJXWyFSpb5WEs6UDdmMnprO+ixBDkzuq0z9ejjGDIK778yd/ieHuL5e4grJ6m29ByC847XF5uMV1cCPzOgmAQHUXNcq0v3faW1qYpHNwHj+1uQiZGOkrv1LQw8rKCna/rGSTZr7DZYApR1sWy4v5wRD6uuHp0gatLhwkLpgTcvbmTmrQxYrN9KpBwVGmG169v4Mjh4slODVPr+aT3Yq7EnAw0GFQVXuo8Fcb+7l5JRKjNk851Qx5Q2z9vO0hqZMjWjAYxGEBWQ0uY77SXKgHkBTLFWVAXznvEIMgbq7122h6mlIJ1WeDcCmbC9mKUiD6A4CKSZTW5IBfG0Efc0PYbEbRvmGTbuCSsy4yIESU4OHi0Ju06ELC9IAZn8kEzEm8fCvmgoG8pbQEQmSNWeL8sCmICArQ9jM1RUMlK7TmIarsCAGJ0W0uGLqgijnnvNMl+lJ6mpPIT2vrJlnrbVVYuY4E3T+b8UXWgbM2dLIFaQ6gmUP/mKoOJ29+JLBDStLX9XOWekzH3zGBihfWrIwmA4eA1OymjFNT5Jdn6bXnKtewdAUDtG4K0PjoRcCjCBVFBnKj2ckuoml5RG5mgZSKo72XjKfGWrlSoCdLuAan+KKfoqHD31zr9nRS0Td6/LUPIygpAviCEQdiLg/Rmy2nFrEgOrzKhZEWjFUF7LUvC/v54wiZ+XifImSuHQU4Zy7JiHAVunrng6/QfYL5NYDiEcRBEWCA4H7vAdqd/2a4twYVn4av4cPgufnr8EwBGAmfS/C12SXd8Ppw2FlIOoxiuTgJQU4auX5Vi4QCczwxNrhtOomy+7hpWo1j6e5wanm9T1OfOnGPG5atbDHdzLTRtEVxIhDIEpYttGT0zlsjL4g6Dh/dOGQAZxQsEEETIRChpbfUdZpAza38syQyl0jDcaV1RUsL1zR7b3QYpy1jmnGU4nC0W1OcpuWCcJuTNKE6ZZu84Z6G990GiCMtRCT1aNs7gXEI9n4XcoTprDrSd4L3Hcn0jhaJO6gJSLjVzBUDgg8FDy/tFpThCyRIXWteMYYjYXm5wuDtKPyB9lgrhgThvfHuv6WsAHAXS5a0nCOA0OreuqUJtLBpjDobBsKTWzCuVbIBzCduLCcc1SwGrijtHJhKlTqo4h1RWDENAWqU3mDASOmQkzLxXZX0q9NR26wwo7Q3nkn5gGstKnG0T/Kr1K5JVuRNOzv9Vxz/ksJHtofoMnVNpilCDFOcNLOt5qhnJO1z4hKduwUveYPL6qCfRumaLWSTOjoiCC0q45dDWNjMAaab6dAf89vsJ5Eqt+yeEbm92kBZqcBcztO1p0f9qb0AGF/1spw1k1Q12fajz3xSEXafpuWI/VIPOHDdrwdAc987w7J61H2dmicrnE+fCvq6RV3M0tDXHpHApM0LtONGdehi0bdqOCEFqDFx1IZpDGMcBfA/8uAS8gRixbMXnBIQQsdmMuNhtME0jckk4HI6Y57VGHR3JvojTKKx8XjI0pSj7avPNYJFYAErxLc6lDKHD6as02I8ZKp+9DzRybRAv5l8JZRFjrukk6u5WczZiZaGadp2xE3ICLQu+/OJnuLu5FUMjKxlEsf5gcpFhHLB7fAl38Qh/f/kO7uMG3/n4Jwons+yMRpq5SHCDC1xRGQKppRJDTNgXaRwQQ8S9u8fNzV3tD5lXaRLdBwL8/QHLZoM1JVkDakB/4XCNkRxSLrgoGV9fFvzNNGCzHeDJYeueYCl7LDiAWPpdLvMMLlxrpOsCUP/K2z5gbT3D3GpAIe8TuGAdIsbNRlu+tLpo1iCbGElSy0wQ2Ge/J+Rsh2UR5mWnHiqZD0mkgdOi2WB6YLAxS7+2ZV4R4oAYxOFx3ldDLucCJoHuT9OAkS4x0ga/vfnn+DT9Lf7d/f8kwVhuRFi902NrYF0WMMQOEfkByRh4BzdUaaTDadlGDQ6BEAcZ+315gws8rcZj1TFEgDkC3VgDTp1frT0kmytx4JM60QzR9RYkImrs4DmLvCSFtTEV5DEhrlGetwA9etnGGKBWgmpOhTkKdBoGIV+9MtucVRD0df7nB6m8FPS11ArCSIbUYSK9H/fyQ5/THDrLnIIZyAz2BOP1lEfW8a6O5ml5TtG5Io8HzJz2nKJnmuxt82RDpxlPavBI6s9lQxRZ+ZCMUXUMq4jiqitP/27yBDVbR4S+cLh74E6/8glfR3eOXVzuCRJTKKeEPGekKH2PyQnHgI8BwQvTrdMSJYIEWHIRhy/Oi7TpmBVSacmStxymS6gUzKVgOa5gFJTJIWzFYfQhVHZ51Erh/gXMaZFxRGF4FxBcVL+GRbBpMTfT29ehHZ8Lpw0MhfLIL7XoGMDLb7yPNAS4zoorIIUnNoPVKLsrK40ZYmzwNtRV1dYydZEqWb7OERCCCnU9nxkXb/bY7pezjWCbSBsAei+bifkhfFM3tfce42bEStJzKJJi9rVPVCnddXUcnPMw7C9IGRtzkUhfFsd1nhchDVEHr72hMJSBgJIUwndcJKKj0EQxtn0tbGYw9oeDQi26KCXQosy6e6qQYvne7nu/DreZsPyLf42ckgg1a2rZ1ek0CBIhgwDvNEKk64GFCtXHgIvLDQ4HaWC4FsBtR/jtBuQIT3/4fdz8y3+j35HnKKVI9kyjfowCFNYxK7XWxzknBuDgJetW5DvOBBIAOHHipuCxzEkcLwjk0ZlEUGFpLRxC1KwiERIDb8pL8Ou/gnvnQxQvzGbVEWHGWoC/XG5hmeMYlVu9ZlDqsL3F/+KTwMWZD3ESGfv/cvSKsN+XNoeWQaxujq6Nb9IN3Nm5zPJ5dB4+RqQ14Z9vf4H/5vCN2vagyYH2/O291OBlxnM/498b3+BfrQqxJJmTdx4FXO0cfv1ZhnexRulbBgwAN1jCybtxe34bW/vZggRtXNo17feTzzqDx57PDHxXx7F6m2cWR3OyzLHnqrhQa6z6ZVHv3T25I8IYA444NjkHNTQI1Uhn1r6QJHu0dybPG53387kuovzGcYBBbuTNqD03M8ZR58GSxyyBKgrCersoDTeBsRwXHOe5yjK1UDBupEa2lILoSTMX7W25UB3DlFYsx0Vh4R6NNMbAbZ1s1/+f2xRvG1uxN7k5K50TLc+iWQbqIPydHLf7nY4ro9GPyprwYDw+3GF78xL88iVe3x9rzzx7ByO0cs5j2gzYXe7w6slzfLp7ap4r3gxbEG7qdWVeCN5pU/uUMQynwK3CQFoWHA6HGvmdD0eJOiuqAmsWO0rZm4mVpp2loB4sE80M7O9XpGGSwN26ghVunwtjQ1v89u6f4dP1b/BXyx/hWO6Rk8D9Zw08ppQrskYylKQkJNZXUcfaVBJkXRcAcAHjOOL+7g5eoeG2rln1ha9ypa0B8zss0JvWLL3DCEoY1NgTx2lAyBIcJu8wDoPWTUP1jLDjpWwsvg7W5sG5Tm6UIr2pvMe3pt/Fs/AVMAjPwlfwfPg6rumPkZNk3UJXKmL6gSHIj6y12ikJ+YxXJmOuTl6b51M7QxAh6wz8ePlf8Xz6GpwGTB5AyE+OHsrVuYXiXYN1zA1QZDqhBnX0mtaexmbjfrvHiw9e4sOfvafPy7WvHtszyScncgfUWrCYY9Jv7f6ebENoOtxZ1rpzZrh0GSiBexZ7VVsvpqKrSO+vApUTEhQRMVGaIDB7qNeVnX3aXUUdOxtHQ0dw0wuwdUtVBlTdA5MCVB2Zh6YEV+eR+vcj1PXTZujc0ND/2L6xz+uAd+/TJkECCs6BUtEMql39fJ2pLc/iYA9jxLJoy6ey6OVMzwr7qPdBanCdQahlEcYhwjmPcZOR1kWct2XBPLc+x9LmBsq50LgWgKIQ/hGb7Sj30HeTfwOsF50O6ckicxpI7Z/XQexAcabfNjGnx+fDaTs7Chgv8oz3/Sh/sAJb+UV+B4G5iQDbdI5c7b0EKOGIDoID4Xf2vwXURqfNMOrBKXd0wI82fw4ujIvrA/yaMM4JxSAnbEZo+34pBVgTohJ+2IYii/BxQUnAvJ8x74UgwUg4iCT75YUiU6LeJEYNs/SN6sWAFeOyNoYm32CHFZ5k4xFMhDUjsiiJyTBN1SkjUrKSbBhjoVJNi0cpVs9G8DECcBLd6LI//Tojko0BQGpSFOrpvIMPsqhrpNPkmjLzxCgKT5gnXTW6p2msdPf+yRXc40fioKaEZUn1egx5t5zlHXyQBuY5ZRyPsxgfqvScl/ooB03zewJxy7wxC6xzmRf4SeG5zBLBqUaTsmeVgqiCNKrMzJkBKqBUcP+LFxjeuQC/E2H2JSACcuGM//34aRV8zgOcq3kJg4LUwdW1ZfPZZ21MgMiaLiLcqQmRbpbqdyrLJJkxqYahrkdZ7xKNOiVlMC3VzoGSNPxgeFHV8nkErjDDSHI3WPFPh1/gOR361zv5nn23vy+Rw3ceMV5gxOZRwOAFNv14A1yMAMHXd3zw8k2XdgKX0OuJFsl9eJw6aO2CLQoOwPXkIk7/bepXnMRSDXywRcz1vBrUOFXK59Fg+649TDEDBIDbPEbcvAFd78WIs2ekFtCyvxm86WSuWPsv2Vqxvc7C5LiuSdsBBPjdU8CPlZCAGUi3L7AeD1iWjB/HHV4qbHWYBmw2I5z3OBwW5FWM2yUV5HXGugi8HETaBwvwzmGaBtzd3GE+SnbOB989rxAEFWbMxxUpZVxcXgDOg7bvVsIGeQExoJmpGeqWQ2fjEu7qo/q5P+nr1RladY82g6bODXNDenR6hu1UM5YgRsIXXv0c76wH7O+PeL0/IueOHbJbe+MUsdlucPP0PdwNG1xvLuuaKQB+uXuCD/gXFV0gDKEeQBYSJSVi6mMHaV1xd3+P42EGkavo/kF7W65rgtd6w2wBllzgh0H0EivEUo+UClCAeZ2xLAueMGF3PODVvOLL4+/gng/Y4Dm+kH4HPz7+L0gsBth8WOB2DrOSJBEZLNXVn0XGqayqbIVU9cfFo8dwjqQWklXPCtQGDTZn8tQIfOSaPgQUXcdxGAC3ahcdc9i0tYQPkrVSQjAjaPHkgeDgA9W9nZPVv6jNkE/hWeuyIOexylxzPJz2q5VSBam91hiTLgkh4olDhGcppyhcEKLAMr2NGQnpmtgQWg5BUs9NziEGjwMJFNksK5NZ1qT44Y7oNweq7IEPcEGaHRfVHdYqxk6tq6QPULYt0wxmPnUcUd+92zfV2jdZ3F2acMJ1II4JP3gR7n+qzkPbx718r06w/r2NC50qrs5Zrvu+u07p5HtzOHVNkpQw24uYM9OybMZiXq3gk6Pe72y8z4bqVI5Bxv1E8xGdzFUduqrOZP2dO1qdpJPPzEZB2xPkzMaDsLqbfXHucJ/obO2f6zwKF0UfmP6SjHdNKJQiz5sBhrTBWOZVSeSgfAMOJQQ4JROpffqgaATnql3tnPRQHieDMZsTrP4GUFvWnIxRN2dVQ5i9RVRh1s6FhxN5dnxunLZ+nRcA1yXhPTecnqCSqkYbqG0usDpozuE3lm/jy8sXH9hcxMDjfIVesXY7pp6XXcbFTcbP5j+BX4QyP2XWTMzZg5uBZd8tEDjF4QhyDwsKje3Keo2ZcWAEFzF45JQgxcWl4uFTzlhTFmIANZJrXd1biCPIEcYhgoKvlP9mnBkkkEtGGAes8yKBsSDF36WwMDZuJ6Q1acG9wzgFOB9FKZSC4KXI0yJg/Ti44DCNG9C91Nl5B2HOCgExRBQn28xXgxeqiFsETu0BcJHNVEA1G0alAM4jF9YIrm4uCLTSMmnjNIqiDbLhSu7rHJ0WlYoBbaxcogRMuQPLmoF8kOhuDIBzovRKAZVcX11S6QVG/+rJ1T5XK64Rwx5EV+qIdUsbDQrHDKDQqdxHM9bPjXbLhprjReQ0SmTwFxGKw1irIVpW4YFwsOeSuiokbvTYZ0EK6FgZs99brnTye+87pZwRNbKc1iO+61+Lk8sBwHLigNZtX5Vmy+69G1f88FnCfjuCHkAKmpR/6PC18e0/ehhF/uy/V2dNBXbvHFa2xWpANPXTHAHSjI383eAosvbQlNeZQkVtNv+WZ21egPwbt3BhwLQZMB+FTMhvFB4DPnl35taT0sB7pZsD06uyVhmH/REpZWy3G5VDIwo7IJdag3P38pc4vnkD7wgfIaKMAVfbCeM0wUcP7wM22x1effIpbu/2gJOaBHtlW3fFnDeW/pXWo5GV/IjBanzK/tsfjiI3hwCGA8cLYaOtorrbAB1stNa0woz4k9FscwB+8DcxivTsOkf6OZnzDjXy9AqnVqGcXAr4449xF1WuFj7ZXrk4xAhcXO4wbQa8uniKl5dPkdh1xlyPfugDEAxHRZrXpiOO84xpkog0M1BSwu31He73e4CFNCuDMY7aM8wRvA+1RjoXNYocYxo3ePXqBi54hOCrkbalGTkn7PdHMAOPPOGdKWING3xl9xtwmqnZ8rdwdfkY/+7w32ugTBzvy0db5C7LaPuNlSRLanRc3TeADNswTXj8eIfXr66RCxB8UCdTBAo51LrEXnKw1u7EISA7h8IO201ELqHqDNZ1w1zgvDBBF8cIlf6bkYpCFZ2wKZPuT9L5CN5rQ/AuQKI6pAXfxMSzHnprFmc7hFCdVRgNv/3ODJpGxCj12qHqBZNF0jTdLBNbH95Lv1GnWfga6LMlzacSvfkZcpKQXLUMPZWCkhNWiFNo42KyvD9E5+rN9LNiRT4MSG80AEbQ0K3t0wud/b2zDcFA6XQEgWqLJVYPhk7ezC4h8MZaEmnyu0tFFS0yI3cKOT8VGr0VT9VxpJMYqAraWpP2cKxsfFqmsf33xDGjLtvffW4/v5XITwUkUzu3H5F2bTv93IbufrKgSBf8fOBcqk73Vbk0Mh2qLDWn1+8wPZJEIEJxktF1BLWDxf4TUnHfrX2Zq3VdcTzMwnkwjfDBCZKBpNWVkCCVyqB9OkSEL0/fw1V4D+awnXyOJofUjURPdGhjkc0F9y0IJfWuGZ9lh9jxuXHagOZFewDfClu8KAtukCA5G4miOuZu4vovS6T9q+uX8KXjB+hjFjblhc+XuUbV7PqQ+x/TNT7a/5+gvGJeUjNKuw1ktTui3aygQvqapFwQCsNRE7793nNak2WtDgSVVFDAGKYRQxGIg9VdHQ9HNGx0ATtxxKBOak4FpzBPcVAM226DG2LAZhqwLhLxy+sKF7WAMxfwKoX7uWQc5gVhGBBDEIcvBkzbEey9EKNAlJYnh6QL0mnPOQ0hAMzYXE4Yp6HC3+rzQQz+zOdUrGL0GeGCd6IQcs6SvmYAi7Q0CGrU5VVgixRIjYAVRISo/YWILQLS3d9JHSJ5L9lIRkcFR3AuYPCyRkouWJZVp1igRSZ6nbOSXhlj8g73y4L5sGA3xcrI6fXypJRzxRUkV+CLw3/15i/qtRkAnBTC2sIReCdEmKNB58xPaDU8Ag/OiQAO7fsZWBeL0n720duORF0dFFps4jRGYY5I27sA8Af/D3Vv2qNbcpyJPZGZZ3mXqrpr39srm91UkxQXiZJIjSQSnNFiSoLgGQsayPYP8M/wXzH8wRjAhgEJMDwwxtBIMAWMpJE0kkiRFFex99t9t6p6t3NOZvhDRGTmeasuNR9bh+xbVe9yTi6RsccTzbu4TYf8un1GonqGdCYpecv1Aof9oMX+V/tN1d8/sr2QIqN1Cbss+ejoe3OmaoXT9X1hBtdVFeCasRQjLH/dUh242hM3F4mihHFeA1MsZil5hggJM46LoKsjPvV7V8an/KzWGRI59IsOu90Bh/0BLiNbVWvlHMBTMcar2rtcjF3NRYTeHmCg61qkZM6Vqvk8BMxknxKmJLzh9PQU/f034LoFQvAI4zn44j3sV0tcXu5ABHRtm1NUTcZZQbknSYHNCnpCdkXbOm0vN4jThNVqhaZbwD/3ybwulU1TGU8AEQusebajGMcUUf9+VZWwszM3hPNhMeXTATlLBMUdK98R+ggxIsWEi/0BXdsgeMIwaryTCIuFw/rsBNPyFN+6+TygBsuczmUOn3j0Ni6MftjSbwQFktjjsB9w6Eb0vYEkSWopJ0vjk0hwv+glEtN4rBctUpyksTQDh3HCol9gt9mBmbE+WaIJHi0twZywjo/B4x6Ik8poh8+HiM/c/F14X6DUHRGW/DJ+efX7+NvNv8c5zrHfDwjBY7lcZnQ1cVIWKG1R9AzYQ6OyXY/Tsxu4eHKB/WGP/qzX+QgAVmis9+CE/dZaTZQaLyRR9qQ1SUTbLtSRipyeyCT9mMRxJan9hhjJKWVFNCZBhRZ+FTMNxpREIERRXJtG+nQ659AE6RElPJiwXKzRLxYYLi5wOEzoOgach2UHWVRafjgBy3I04y/1ea8NC1lCAjmH0DSAOmv+av8H+OLi387Q7sBRcjeyU02NNHNSVK/pJHMrlZKhQfPPlFevKKukdauOXan9oiqr5DpWmOWF1hybzEKlN2ZnECqLhPOpJyHIbDi6GkFXmVLi8jCpdSsARTl9uhaoswEystNGjbT6MrCRDJREFl2T7yZCyeA6WrMrBtbR35gZZCVFtDb2qPrOFamY9TdcwyXnz6Xq39kAhUHp7axti/QHJScO7KJfV/PLxf9qDJE4qZ2uvzivAWb5XFRkypgkuuycpVMWQ90F4QfTGLF6+T7C2Sl23/0hUoW+Xst95z36cIJAbaaFPK38l7mV9PdratSctsYgBS+KED2WY8pRvmddHzGjzaYsk9hDmKzTtMR8JQYM9dboJmt3lA02oCIpkqiOEGU58Fmu6q0ICU+mdzBOg+bnX6/MlTMjFC/Ghc/FoMIwKqI1D5R6CCKTaDqkFbZEWC56nNxY4/x8C2z3Mk7nCqiFE2MiKnoaqUHBKYl302mUjrWua4qZWRGksLtbdNLIeBRPJk9JkZYY0lvGgRJj3A+4xCW81vdFZkzDhLDws55PZlsQOSw/9RqaszWICKc/+xns/+ZbAlrp6hS7YiwnLYRg7W0j3gaBAU9J0j77vtUImTwoaSSLbE0ZYEiIu3EOhynlvWyCR+MEWGFMku5p29k00koB3mkfGimEFiWAYRC7iQUqNqYk+qFL8OblqdIKkXUzniHTmVKWjBbUu/vk9gVOLp/i7rs3xetCIiwtBVQMGyX1WgagRD+ukiZXgk9XgcW7ZN7O68j5OsfClTtz+WnpDGZI1SjiJzTilIZiAFQGEVcLkgvTHSE0jbb6sFQD8zSqkEpzZTgllrQjFcbAPDXUjFmdXTFiKq/fNatQreFVpnmsUGQGbfVl5qVkSEFx9SlxvszFZ63s1Os7N4qfPU5RXBJmSpDWlWUAEQKa2x8HDRusVgvsdns07Yi2a1Xg1feq1+/IoLMxJnGe7Hd7TT9cIYQG7uw+/Pp28SxClPB2scaN+y+pokDwJ8/BLdbwbYOTxQIh3ERa9JimhO1uh8PuACKgaZorHlKL/7GBC1R7HJnBacJhe8B+f0DbdeiXPcLt10HQul1jyjMHEfTcQfmz8WtdiyM0znrvr9uVrKNpqhBzFHmkA7WehzOGkVdMzuprT97B1DY47A9I+4NOVIRdt1ji9GyBYXmKH954vjybGOb+Lgq2prgZTepZBY8APG4tVng8DdhsBKjCGqWTpqRbNsBi0UuxvXNI44TN5gAXI2LXo1kscNovJaXycoO2bdE2Lc78PXy2+xp8/ADx0R/gcvMIcVJIdpaSgcYHIDhF33Mqn4EbeA6fXf0qvuO+jsf+ITaXWzgPdH2P4BVtkx3Me51r2Z1ElUIr4ADb86d4en6Jvgu48ytfRApeoq3eo+86hOAwTQHDPmbQmnpjQ/AIyw6Hw4RIDq5pgBThVI47b7Vi6oxz4vB0rLAyjsTgQEDTSjbLfj+o7JQNMYhvEGF9uhawrpRwnj7AbbwEJA8C47Xui3iw/BF2l5cYhwnDOKHX/oRX40260UTaHaTmMYUnZ2U08/AK9ArICLxFfyKAQkYjzJGe/wqhURtzchRr2VDen8+CZ7W7IEuDrc4T5jZLmVO5n6QPzpGYK3trzheq8Vr5CutakI5ByhFgxnpOAAAgAElEQVQVBM94Y86YUMcQKWiNPYvLOK84TtPszZkBV/NlMkOFoVFezjXSti4Z6p6s4o0Ku8k10mUN5jlNZoTNDS/7u95lk8tUUR8ffTLfSVlvTg+vDB0A6N0Zlv0ZDocPMaQETlDwO4eQgWOqMbHSMaujiSA8PnNvZHTm4Ansjc0m7TUseq9ksUnJjiNgsWzhTtdoXrmPw2YLPDoHHj6egXEBwKo/wyqcVTRIhZ7rbbXVML2foPqNWQGMpbuBNiywc3s4illvj+kYzGR+fWSMtlqxIniE8AZOEbE6v49H9wccSMxwU/zk8M6dFKZs0xFhlIeU9ASQHU59SxXxt6dv4NvbP8V4GGdMToqGvXjI2IpKLbwpSkpisZat7qFuuWPIT1wxa28RHiZ1uCQs2gY4OxUGPgm0c9Jc/qYJ8MFjHA/5gNp9u64FvMdhpw11yQ6WrG2j9WQxAaHt4PaC0GVC3YpkvfxTwDq0f0yKEdMgPYBCG0AbVRpj1D2IszSelKbc74zNGFGmZDzSEDRnTNI+nyRd8bAXkAIBT3BiweSzL784L8IzQnoDEbP2zDA0KlEEqZHwt4TkrT+QCVCoss3VIRS4VwFFKKH2JvgMJc6Z4elwbH4g7bNSaKw0aJV6tykm/OftQ+zjhFFTjaZkjcWL8Di+7CXrnVS/tloS7t4VTsWJEV3C2Rnhxg2Hx4/jzLAxA8yMr/pR1xkPV2u5MBsfEeG1sMUrYVsZedUZ0++kmAT1NDFG9boRAcElOOcz+qelIg2HIRsM2YBj7XtEJYJLNB9j5gHlkNtOHc2jfq0oNMUjbGeEBJlLz68h82Uxdq0CU0Rf7WGWcbrq7+O1nys2tm9c0SYpuqilYxaDqxiuBIDO7mGFKKiM+x3IERaLXpRN0sbb1GS6B/mCaoXMRbDfH7C53GAcRpzdOMPp6Qm62y+BTu4JD6yWnUDA2UkZez4gWq9KhGmasA9niP1tLNYX2F5upJ/gIqFtFSGyWphyi0oZYcZ0GHDYH3A4DGj7Dqv1Es3qFuCbekHntGD3NEXHDDXda+Fl8z2opqfy4+ouq81WqRAVXeh4Ue3tzKDSq2lCTgF1JGd0seqxWi2xWZ3hndO7mZZEHF4j6wBwFB4+V7mAOEW8tD7BB3GP890eu+0ObrWQ2kQfwBCni/cOy+VC0q1VLWLvwG0rhf4hwFHC44ePkWLCYtHibvsxfKL9FXgEJL6Hyf0UpvEtdRqJERHagM45TS1U/mBpfACea16DCx7fCX+CR/QIm8s9xjFhuezR9B2CQeqniMQJPrRau+0wHiZ8+MEj7Pd7PLd+Gc8vXseuWeAC0i5C1lPT4p0icObU4LKT7Yv3wD8YsNtLts1q0YGdKtV27NUBh1SlNKkXq9ZBmIFhnLTVidBu13dYLsXbLuBjojxyYvzj/q/BiHit+ZIYKwR0TUDXt9jsBhz2A5qmQQimGluUSPV+dXCAWOp+ksq07Fg6omsG4JpKnzhW0u1jdpArfpnPYzkQWdHVlEkzTAv/qwyvbAAWo4JsjXVsbAAe9hLzEUWXM5YbpOvr5ixFPpO4MrkctUP+qJ5JXS8g16GJ/qLGZEyVQg6Q9ioEYV4zZwAnzELnNd9Qfcf0GWO7DORIzTxkWnSfkiKJwlNsenav4/nakMrtqntfI/Rnn1Mmn7jeHv1lLlM5j0lpsyYZ43sAnmtexwfj97Bpn2DcDeq0Fp2XvczBUt/zzVFkm/1h7ov8GF0IEY3yalK8i1w44qWGeRoiphCwfvE+hsOI5uMvwb+SsP/2DzC8/T4kQ8ih71b46bOv4n73OohURzDdoFqLHLM1IVxlRqEa4/PNJ/H+8B1cuieaRszwjb8WGbS+PkJGm8zIOcJ68RWE8BLuEMGdE55O39fVr3aeK6IGF0IqVAQptKXZITe5Wf6SzxIRfjT8Jd5P3xCCwbFiKlDilJKk4xnjt1CmWu6cvDaSTKDk58nDrN4mra8wT5mlFY1DxIcfPkG3khST3eGQidaMxBQj4jRW47LHlxQsW09TxgFC2waEfoEURzjv4bsl0mGnvXmEKH2QOToWhMmkGohzBHZOC0UZbdPAeZ9h3bNHzDwEamjGUVJKWKN+k9aTea15kAJ7DSWLxJiN3weP0HgQiwGcyJRVypvjSQpPU0zgKSKEIP25VPlnMlh+oRNSxSHGCIpGMFrjoAXewjRFkHjv0TbSwwhqVNpYZaplD4y2qjIZnb+E6P04iUAiMS7HKeI/bx9jO4yIMSJOEn1JnNCZvlmx1eqx8EFQNW2fzcC7c5dw/3mJYiYGiBlnp8DZGeHRI2TjBpgbB7VOSyRKa11HVu+L8fbagdI44Le7t7Dmw+x+eW2onPGUzLFgyE4OrlHnh3diHDuHD1c3cGO/waptkKaI7eUWKSnNEHLqaTFEqRovz8Z9vJZHo5spWfWnSqSQ8lo7zdIQOtTorNKPy4eu5i2VYK/e85qLL7RH2QC1mj1HvtzG0Erz3L1Cl88HnOdo0h8A3biLdPcuTh98Hw8/eIjLzRZAwtnJGk3j4JtOzx5B8nIcyCDyIWlw0zTh8nKDzWaH5WqN9ckC7e2XQavbgEZxY73vRh+VoAbUSNJ6oCmKIovlHSxuA4QfYbfdYrfdI04JfduAgjiq7B5Z+UmMcRoxTqPU600Jy0WH5XqNZnkD7uxFkBltuh6iVOnwNFIjyPRz550MvxBOMeJRnZ0KLIKNhioKO9K1CjPmoqQVIsu/vn36HF457BGaBmkcxDhiYL1eY7M+w7snd5G8L/R3zUUa2Z3iJOesMvYBsTUa73B6doLUeGwvN9hugdV6ibYNOASPaZzQtQ36JsB55dJeWmaQb9H1DcZhwoePH+EwDGjaBjfoRbze/go6twYnOd9N8IqUrBH/BExTxOryP4Bv/D4SN2ASPl63T7jTvIrGdfgv9H8jtFvsNls8fTJg1zbouh5t28ATMCVGSiPiPmG/P2A4HEBEuLO8h8+vfx1rfxP/gB+C1BA1j7yBpjiS1N85X2N0L97H8ON30bcBaRyAZQ8ki2AUBGVJ9y4MlYCcLi+Q58Lv9rsD4hQ1I8bh5DM/Bfzj2wIUxYzNdgekBApiIL81fBOvNl/K6sPr/S/hyfoB9odHOBwOEo1eSJ2yAGuVHU5GZwnZgSyRGC4Npamk8ZHz8OQwKGKrI4c3ui/LWoBnjZPFKe4qtlMMhxk96nNzrS4L4FdBnC7RN6dngu1cGfMwA1ONGDEGqnNJagAdGaFZL2KUptc24CODLf8o7Bq1DXrFYXONA8cicrISEZKep7dzBEqcx+10LnWcys5nbewQisF4fFkvt6NRFBj/Ssm1J5S5XHdH7WHMLE5nV+0pbLx6t+MaPFmUGdcsn1fLSktCjmaRh8IAmq5DGEaMkZEcY5wmkJPaT4OGyqMtIll001Qlv2a5R9V+S7RVSjI8fObZjGmISFEyu2qnsPMO3SsvYHzvQzTB4TMn/xKn3V3c6V/WfSLlwYVWynqXOdqY8tqzpfcXmg0hAKOURznv0RxF946vj5DRJukGq/4r6PqXoWxP/p9IO1VTJXR1wSpCATOe3D7HyeVT3HxwBlRKKXBVabNcU+aEf9z/Db57+ecgL9GYFKWBnxg0QtTjFCVVxDsgSUG3pflNGhly2nON2c0NtirlKhfYq7JjClrTBIwxYfvoqUbGPFJkGIrhNEZYf5w6GpCSFFeS9pyon2NM3YUW1HjEaURH0mB7m0bEw1Br4IjG5B0hpohxmCSypIicw2FAu+zRtAHDXhCfzHDN/Ew9QFNMUiiq47UccR6jlgLZHpPARnN1D1ViJfLnpSCfYjF2VYENjcdy0WfGURcWAyT93QCwooYN44jDbgAngSJfLju4IMqdJ8GQKwZMgncsqGlTqWuLIvUBIljmlXmRpzghjrIuKTGGUfsqMaFhhuMkvdtSwh89fRs/Ot/joE1wC+MHwqJIlMyjOOHjrzo8eEB4/ITQkB3fhAhC4ghE1hRUvZd6wByA/379Ku41i5mxBYjB//fDU/zJ7j2deNkDcEHIlCdJym7fNmBH8Jzw681beMFt0U4C0808p4U5wy19jkLr4Z1H07yAvv2iOCcUAfZB9wgPl+/hyekdfPrhj3BAEeomHBIDL20eYVyuMYZKQa8ug1Y3tMBcg1bRa25ZQC4LAYmslzPrNV3TadoTQYB1DEwI3uMnsVo7lVLWVKPGAdM0YooRnqM6cUTZngBEphz1KZEwy+1XZ4fmx8sNFZwoGxSsRljA5M4Q2ku4w4jzJxc4HEacnJygbYE4DcpDq4URVQKb3R6Hyy2GYcBitcBi2YKX97DnHths5Bwk5c8G924qHZFCMButCuIskUT+gxnM/YtYLnuMj97E4bDH44ePMQ0DvKJEejVkkwETKTDTNI1wPuD0bI31yQkWL3xOYGHN4K3FRvZGJzB57SlnfM/UU4m6j8NYlCqNsPdtgCcWRxBHJHZw2mzYlAanKkaCy6XOJWon0mo8jPMaZKNR75EWZ1jtlxjjhP15RNN4nC6XWK4XGNcrtKvFkQpjzyhyLsaIV97/kaAN1luqLR1caOCbBt3d13CWBqQf/h02F5eYhhHLkx437tzAOA5SdxHEYRgUzTdRAHHExfklnj69RPCEmzdOcNLcxWfDr8FxK8qUD3A+gbteHHyjOhoB7LcHTOM3cbH5Ayy6X0fXd1itFgjkUZKNgDN3Hz+3/G/x1/yHaEPAYThgvz1gtz3XuswKlxyS/h8ah1Vzgp8/+TdaV8d49e9ewn/57N/PFCuOCdM0AiRp+JRlKuDI45W/v4+1+x38hfs/sN8NSDGi71rs9wc9g4DTVgdzY4UA4io9irDVs5N7uHmP7sXn8cLJZ/Dij58DAFx2F/j6g3+H7XaPcb/HyIw/T3+IXzz91yAQzsJ9/MLJ7+JPdv8OF5eX2O32CMGhbUKWeZq4CUAif6SOGAZjSlBjQGqBGQp8AoZvO/TLBR59uEFk6Wm1pucwxSiIrKaUZmNBAVQq5EKL+pQSl3L+oWfPDAOuziGY6i6FeT+Z08w+0K+WS42BGjSjNtbyi+bMzymD1U3qzwGldEbfq7f1utZZc90SFQ/mrAOZYewQEdmJQZ3va8l1OmczotXYz/edj9qmVbO3WjXOb9SzLWsr/DfLB1R7pntcP2v23PqhtRFmtGeK0OyhBGs3VxuQ9fWpxVfxV/wE4yIibbYaDGAcUgT6HsE5WATTbsyuXpvrDNGyKJwipilhHKXVCBha50ZonKRpr375C+Dgs/7S9i26sxN8wv93eP7N59D5lQZPasP3qhFcVkV0r7JUhU/ZsohT1MM1Lfz+gEmR2xH+GRhtzjVYn/wM+ubzWbioDxoJwCv/8AK+9dPfB3BEmPZTtGIAAHtGcnUMzqohZk/MB+wyPsTj8R185/JPMQ0TiIBu0aHtWkwGPW2Fttpkr23bqkhVhAcnl71CgBgJksNaECQZAIUA1zVI+wGWBkgsiuJIJJ44ClifLhHagPOnexx2G2loCihRaXqllwPgtcYsHkEIk3rNGVEg+uOo4waa3iM9KYwwxYhRoXnF0yC4juMwIEWfa9swRbSLDqENGPaHLAhEu1cjgAlxYkxT0ggEtG4MeWzeBzCRAqqU/awjhJZK6hzDBYeGHaht0XRN7sUCVsQgZfFJ6wIYkCbZ1geENA3mMElapjEqcSPDgaS2rGbE5Eqag/bYccseFC2fnOE8gRUxLyrARoGtlfHfb97AzfAC3nUPc8pkHCN+0d9GpAs8oJ00DCIxMpx3+KV/4bGjbSHZJEqlbxzutB2WTYf/af2G6u4GqZxw6bZ4c/8OYif7GJJDs+nwO/09nC3W9Qkqaw7gy/0CXz69n40sS//LRo8aLX96eIBvD08xjhM2aY+vth/gDX8u6bx5/1gjUlKvtlj2aP0ppBowArgEa+F3jlxhRGJg0bboVz0uVmucdqcgACfjGunx+UwxTTEhHg44u3OG2zdPMDadKteu2sJsfaOk9pjhVSl8VXMUA3SZfR/C+M1fyhCHBMchRw0BS+1NGqW07xfVwzzylqok6V3VRsiDZIyaGpgdS7ZlDFDF3/J75gACIZry6QwSOmGcGFjchRsnnIQO2/YJdudbHPYPEUJA2zVo2zbzksM0YtztMQyDRGucw+17d7HoO7iT+8Dqrsw3St2vpLQUNcE8u945EANDLHWOQs5mEBEMIp5oBW5vguNjrE7WOBwGDPsROAwFFhoVXwgNlicn6NoWoV2Abr2B/cggTABJo1Ikg4lHdnbJdRX0BlVtWJ1mJS1YCLtdzPJJsiOQ53rlclZ/ZSopACQk+FIHWbxDogykiMMh4nxieLgMnrGfItKQcD4k7PYDamUhkxnUGGU5tz5GVa01skEelCLIB4AEut8dRnC3xMmLn0Z48F1cPj3H+ZMtnN9jueyxXPVoQitZCszY7/bYbHbYHaS5dr9YYrFai/F965OIuwZTGhEOQCAnaW2rXwZWH8KPf4Hkbori7z02i7tYLX4d3jfSc08dRFFTDc0ZucItfK7/TXz/8J+wc+do24P2PpuKcq1p4otwgpaW+PzitxDIoseAH3yW1ykx2o1HYIcmBXRpAaCkVpMj3H3nBm5tboHDhFVzC8P0IZ4+eYr+/nNSnzYOUi/DANS5K4IIqEI0ABjD4YDdTlr8NF2LsFpi+QufRbNt8NIPXs6672lo8dnF1/BXF/8XJmhvuPOEaRFzFmZPN/HFs3+NPzv8IS735wABy+USwVepcoqASuDs2NSMtqxUs4K6gAguNFj0DeI0Yr/dAAws6UyygVCML2jP01QrqtlgJW2oXsuWmqcpcIxSbKlvo5pFZz1tzvHmZkfR95D3/1h3rnVkyv+k+fevuejY4BCrqzhWnvE9G8Isw+aI1yU1eIuOqNLEdB2ivNa2FqCrRnH9zDLU+cB4SjMjzpyk8j3bwco5rfefW6ZWFVf4IB9bh1TGa5+xxZjr6XImKvZavQcE9PiF1e/h6+l/QYoRu90hl4Uf9gdwa03pkfUTs/TTNcxXRCoLXARLsOMwSFoxp4S277BoF/hY/3N4bfHz+G78Y4x3bmK7GwRkT/e8DT1O/A0sm5MyX4vu6ryZ5wQ1q3Ej5HIKIZArQ8XnFr+Fr4//K4LfYtIshKb5yWbZR8RoO8Oi+1mQETYpGpTO9R8/9c5c17QDRIVg6iJUu2qBVpSJsnrvjz/ENzb/L4bxkJkUK8PsuhZ+IU2tx3HUlEaHlCJa1yF0CqUfI6T5tRg4UGWJ6u6LeQiM5uYp+pdfwPa7P9K5W5SKiqEyDRj2AW3bYLVsMOxJw7dj/qzVIgHIqFXH8KR2pZiw2+zQaEPSEVI7Z7D2tmycBPnSaaNw8YooKIR3mA6jrPk4om0C9t5L75XszTelNGWhJW1lNDqRKibknEJDF6ZBjQlOp+mavnhUGQCsaXZeToxT1LQORQqaptwaoVkvpRGpQi4nJEX0lHFETVsMzhU44JmwMbKTNfKLDos3XsX4rR8Aea4yP0nxkEhAylFQB0oBr4dfAQCcxw5PcS4RWefQNA1+99ZrsKNeCmMdtj/e4e8/+93cN8/2vW0Dfuf2i7gRzvSsAGCHQAQEh1vDKcKPPd567T1MIeK5t+7g/lt3xJt8UsZs9Vnyt6aHKmeuvf/2vi3Ib+I1/EZKeLK9xF9d/B0+gQ8xTiiR1IqWiQhN26Dv72PRfgmOThDjBvvxP2GMH2TjbhjfwX7/ltCgO0Va3sC5fw5EHc52l4gxSl3bDLgjYZxGXG62OL/YYAwDDBI6qWC0FEnx8orn2QRKrq8gBtjyHY0/lGfIMhTUquQIrRo2c3ZtBpYq62QmnoMZ1o7K+oAUcVHp2owcu4JGt+XJdfRXUO9QvWavA0kVIUmVTtlFzjndlVfPgZZ30fs30bQ77C8eIY4jDts9Dgp8YfWl0Bl0fYe2bTG5FS65h0sL4GJTZp7Zbp16K7+MCuFv9yvcV73aWaNQ/tzcBNobAL+Frmc0iy3SYZPp0TzTzhH86o7WmQK8vosxAcRTVvqzkC9WdKbjq6oOnmF9yWXtD0RTMGCfck5yShf0/KSEUk5ee1ev59HMDKdRge+dvYCX0rvwhwGH4YDWR2xcwIP+FLPQ/pUx2tzkZ2iC1j0pL1aEu+EwYOwCXnn0Nt68cQ9P+zXaW6/ipHuK3eN3MBxGbDZ7bC73otjr4C2FKwSPxeoE7e1XQIubSARc8iX+evG3AIAX3fNoRg8aPW5Mp3A3/ht0oUdafCXzuZWnDOaRI/6xGLP2PwLhzN3HF/p/g7emb2LPT4AWlZ+lZAzcDq/iZngZlY4qZ48sNU/25qXv3cPp5VpkFhVFy2QxA0BgEDp8bvk1fBP/EQ8u38TDDz7E3bu3AddiOgyIueeaUWWCI2mFQADiNOLycodxnND1HbquxfJTr8G1klp6vr7EycUqK/nrZoUTdwsX0wNQJLzRflWycWCKIuNm8zxeP/sCvvHw/8OwP8A5h8Wik1ZAtXFRRQQsopbB15yD10yXpuuxWi7w5OFjDIcRvgn4VP9rOE7AQ5bBNc3a+ivtaQSPiHL/KeG14n4nNRlKna6mR87FjYB1QbiuU2PCyB6o9/7ouu71IrqAojaWK6fuiUVxJTPLsi2ueV6OLpbFKB9lhnA6w1nU1gdHkci8kFyiszLHYog7hji/aTZyHcM1Q9NafYuOHr9vbOLY3Jj/du2Es46cP8mZM8jvbv48o+2s4xFQG6L5c0y407yKB/33EWPC/iDOKWZgGCQzoWmk9QUfG5hcRkvquLSSlBiF3w2HATFKY+yXVj+Ns/YmXmm/AOc8frr9Nfz48bvYLd6bGaKLbYe7b9/J6wwg81+T2/VMGYVcBJqU5COmhqCmPdEXCIDXrDo3RXBkbeL97OsjYbSJV7gwiVK7YYbbnIHM0IaOCIDp6DDIuzPiZTDe3X0H3778OvbjttzPhL2e1dC24kV3Dkk9ys47xBjRBg/fBAyqrBqUaNMENaq42sF6sph5jfPLTmq4yBEwRkzjBO8cpqqBrM2zLuS09XA+ANoGoP6saVQJLEoZA2kcxfszTWJwqFfT0oO8l0bXRB5TarHoW/i2xeV0jpQidvuD1JsFjyF7nMn045lSYd4kRwR4SbfxPqDtgnj427Y6xPkfRRWD5uEnZULi0R8fPES4dxvd7RtInDAMg6CLkqZiJkZoFdrZW2+XORMyTy5Y01+PlaBKAWIdfzSur6fQjAMThI6twTmjaTy6RY/e91gsFwAS+kWHc0U2attGvdkLgXxN9YOllk4akY/5wDtHuHV5AzfiqXhjzM5U682W/s62Q/92h6EdcevBDVAnghOg7LUVQVmUFAKycZszxCsnCANVo+OAu6en+NXlz2Cz32MY3kRS1M4pJgWYUeVseQ/r/ksgOlHyXCC4L2Ca/gLT9ACzBqrOYbPZYpwmLJ9cgF1Atz3Hg90Wh/3VyIgYfJMwZDiQKynDNnzKKQ0Vu9Qznor7We5Hcs7jFDNNXmHOmpIDokp5cGKkcanBECVZ+FdpOsy6l5WAPKIz+/1wGADLxKp4mnNVUTYq4zNL4SPvrL53bIjRyYsIywMWixtq10XEp29X42KQC3Cnz8MUY7e8lc9n/XwR42VWFp0tKdH500Vw5X+OTF+tFWpuviLkN+7B4272MSkCd3DL21ma2rrYbc1gKze2UVepm8BsLbMqdawcVXKk/HpcVUJXv3d8cV3zVo0hP0ONF+fxZHWG53ebjBJ8xPCrB8/loL3z/uoWnh8OCMFjynJBIxmK0ouUcPvpB3jSLuAWNxC6UyzbJRbaF3O6+AA87GyCICKEs/ug0IF8C3SnaqCwlgLI3r3ZvQN0QBMDHh/WuLe7jX79VXhCNrIBi7YU9McypQoYhDR7ghkvtZ/NtJXnWimfzCnD6gv+lFgTEyfAObRtwM1HpwjbIDXNZOBg8iCrv8pj4IQeJ/hU/xVM0x/h4eYtPPzwA6zPTtA0AQ6SMl9nkSQ9a9M0Yrs9CGhIG3Dn5Hm83H4e/P4K754+wdhOeHzvKU4uVjDAjtP+Hp5f/hQuNo/QeIe2bzMoQXWEcH/xOt5f/wAPLt7GfrMDmMVw877QecXaWJ045dzK2odAWC9bbLcbXJyfgwG82H8avVsqPZfvX3dl1Z2rh+nfUbxQufGSOFFscKWnm8jRq/e2fq2ad4Caa5a6t5oQys9iJCCHGM3RPzuh5hzQUtd8lIAMJnSdoVTTSaVhzteGLN26pg15pslY26ycaVBPxPDvISUbSPoukSBTEj2T3YgYuP79wr6O+E9++tXNyPpBfpdn716ZO65uTX0fAFdCGp4c3ui/DI+At/nbSJw0jXx+h+ikRUBxCuhqGnouSzlRSiw4CuMoiJExoe1avH72C/jU+pcQ6h7QAJ7/8R28+6n38yo458AzEMfZrunP8uwC619F4uxfRzmKyzBWaWeF8PHuF/Ct4Y/hxwkxcnZePev6SBhtwNFG66G7zpuovEAOnCkESglGiNaAGeSywiQfoPzjaXoPA++lT5c+E0QKNSow8WmK0pMlMUKrufokNTcE2dhu2WN7scM4DPAaIXqm6FYiY0bp58GmRApsvfce0UfNJ5f6FIMknnkmSDyVXS/NAPe7w9Gjao+WVF6YlBvGiOlyB0unIi9RMo4R3hXPqvcecJIi17YBoQ3gScafpggfAmgYNYKhDAjqIXOEpg3qTWV47WljY8vk76C9amTcyfpoQASvnVkBfiFBXbzYYLrcor11AwTJ0U+xALEQkQhVrbWzQup8sMFotRfOOIxoulbaQUCEbq10G7uylW/u3IR/6XnEBx+qC7PKC2fkHm5t26IJHp9f/haCF2SzoI0UiYDTpye49fgmQghKSyYkhBZX0eHFd+7jzXtvwxpvOwDrwwprrIBGPHPWD8gYrynR3a4FtgCywWaKJQIpvWkAACAASURBVCoFFnm9Zq9B0kWLeNFidJRUX8AjNDfR9V9FihvsDn8CxoApRhwOA8bDCOeWWHRfRkqrnNabEoPjEi79PNL4R2Bs8oCclyavzhHCuEd/scE+CiO7olyrZ8XQpqYpAlQcCNmRoJ93Oc2wKOYlumXGDP0EJUVpITFGjkXgZEVh3m+wKNG18Kzonwu4xbFTphb0c3ALujK28vfRd6pnyHqI44bqe/glXLPI306rs2rsJII/9GWSs+fW4y182OqMrG7Xe3/FyKyvIgDzDWyqOsY10K9Ax0ZSxUuuvHbN5xg0n/vx+7Vy/Awltb53+YTN/SrXP34lc5PM/2p6rJRBYly2S1ye3MCSE+Ad/vH07hXHElVjPR7xk26FF/QEE7RnETXwNMF5YNF5LLqAFsALm0d4a3ETAMGvbsNG165vKrhU2RNqetQRRptP/kA179gkPG3PsVvs4SePT52/hhQneAU0cVR4TIrW19QjpgmHYZDXnAdrA20wY6qfcbT39d5GTVnkxCBmvPGN1wBitENAmJqcpsVlByRtEJTR6kgj1z3O8NnVb+Cv4x/g6fZDHMaItu+xWrRYLBcYI4OjKIjDJLDiw/6AYZzQNAFn65v4/Opr6HCK8XxE/3dr/MPP/EgcpWyueMlG+dj6c7gZXkHwHl3bFMJBGe/a38YXbvwW/iz9n3h88QH22z1SSugXvcoaWw/7kkbrbLWZQX2LxXqFcRzx9OFT7A8D+mWPm83zaLiZox+iPu81AdYpdFw+lzVbbQlkMOcZvVuly7OsDgUqqvVB+6Ouo6uYhw1IdEBLYWCLdM15+uwrlWwvtXa20nT0vWKgpNk3r+OQFmFTJHG2TnPmKC21c+Wq1oNc9ZbKJUjJC4ME7M6+UUP/Z7ODZA+Pruu479wcuf79+Sv1C+V51+QulIshdOhKBLFedwbQUI/Xu3+ByBPex/ew9zscdgOmcQQnSVMPjUecWPQFa7lkd9K9iiliHCPiOGYHbL/s8fGTL+CTKzHYjmUq52WWd1zyePl7z2s5gupJ16+AyHCbP6npQUfzM/Gmnt6alT8XXsO33dcFdCVOmOI1qfvV9ZEx2mzhGJh502aCSldOlKuKqZjiFDkbRYkBSilbwHUEjhno/AKLvkdME8ZBFomIEIKgJKYEKcYmQtd3iNOEtm2QWIoYY0pwCvl62O3hScKcMUbExPBmXJaZ6MEjlMQPSzmxrvDVvFPCdrPBQA5Je6+RGpUZEpUZ+52EfKejKBugxZZE4t1UxiGGqR1SaKNvUR6dpqy0iw4hBIzDIAajpkw2XcAhpgzMQo7hgkccOSPfyUJK6t9yudBtklSivMdqhDJH8Y4mBWWJglJHnNB3LfrgEZzD4KVOKTIwjRHNC/fQvSgF3MQWHS0M1nuHrmsQtEebfBAAXC5EtXqrcRBvTtBaBSMoZmSUT2itFDmH8eETTO+8j6Cpm8waFWTGYZwwTQneaoT8EutwWxROZq2vE2O9iQFtbEzrAVLd+FmMvCY1pQUMGEkh7g1d0CtncHnURZAIE5olYRh3UbqsaMU59QQZ6zYjj3PUG5CzaIXtSIJyCbRgbtA2vwOkhMYltD4hthM4AeMhABCwm2h0mxjRtQiLX8Mw/j/wjUStnXP4/q0XMZDDcBixb3Z49dHbWKa91A6ypNNav0JqA3584z521MBp2kKZW2GuRJTz8aXWo0zfDOKyguWy92oDK6Oh4fgrR/egsobHwrCuOy1bc9X4OP69vmoDpQRp9UwT5RTd8hzKEf7je+Zz063/yXFmKqscJHJvmXNJky38wFXC8Hg6zDVYzD897+M1mH3nGZ873oF6/54VRbj6sKKe2l1nZ+0Zzy3vVp81JUFfqb9v0fvIwI9Xt/HKNKGdDthL19lMi9elGOU7sih3MZb+noA2hIY4MM7PL3G58SBO2K8YY3OqWSKuGPtugXL6jXHMlbOsN+dx0IzuiAhDM4GaiL/pvoWbuxM8f3kfPAHLIPJBfKxivMQkKIvDMIKI0DZBlBmXMOnYrS5YlD8FtfFWkjA3xCRdi7HeL4qBkRXNqtI1GwksCLXZ2JdXW17hi+t/iz/H/46L3Qab80vst4L0xjrXpOUFk+SLY7VYY708wZdOfg8N9ZI1khjdtsHq6QIv/eg+LMwj2SRA5xa4s1jgcJB+UtL2QkfOxbm5cKf48q3/AV93/xuenD/GYT9gmiIWixZNI3WIRLauMm9Wudcului6HoftDudPpIn5sl/i1cXP4F54HXWpg83fnInIFCGCyeSGpT2SRjVnBGJ3YtPdYr6PI+VXhkJGBEYEsyCkmmO5pu0rB65ixin3aZ1jDtZn/tjW46PXzFSx10oGpXJzmq/CdT/LiDR7hRQ50nidGWGZKmtjRjR/+V7hG06fyqoPyLpDgPrys0kjO8eYiyj3n3kJ6zWpeAoz+MqKXc/rZqusVnuJYFfff4bcKYvHCNTh04t/hUQjLpuH2LRPsdvuMQ4Txomlvtr7bLDZWOU+kpo8jRHWYiQ0DU6Xt3B/+Qo+tfgKHPlSezlbBtONZI0njnjrtffwyW+9ivrDZk9UnfCqBXCgWZCMZz8yT+FiADKAQB1+dvXb+MvpDzGOMfckftb1kTHagML4az3IqbJkxGD53XZ4rrsEil+iAhONGClimTq5ty7Ya/0v4nF6F08O76mCq8qgD1kopikhskDch7ZRhU0eGqN492OUxouhVSRJAwo5HpR5J7lEroZhyvURU2T4KA1ODXr38RNpJF0LaHKk1rpcKaZc5F8L9MzcsqIGgeyHeDMJADyXBoUEdF2HpmsAlp5Mg4KlCJoeSbohSX8lC1E3Icg6KVyqrTGzNvcmCw2LwjHFCeMotYCSFVfVEmiqnFcABbFYkqBiajPRDI6hSrHzDv2iwTi63Fuv6wQdbcaVk4SdU4wAQdM7HabJ43Kzx5kPENAYtZIIii4ntGH1IiacJeqkyiuk99F+twcAdF2Dpmnw2eVvIFCbU75mChZVijeR9MYzNsCMsd/jndfeAzaiwDmIJ5qdKasu38eUpqI2yb8lxaVivqK/5SbwUucyG9aMPYu3Ub2F+XWjR03VIwDJKQgLAYlAaABK0jQ7pZyHft6eg5zHe8sHuGguQfS65OyrUuCipsN6h261wDvLT+DVp++jSyP6aYRzhMtuieQ8LhdrbFc3UFKby+zzul7zs76uRH6uUYiPvfhsysQ1kZu5elCP5+prc6OKcRxN+knXLEVcytnyrdOsOuKZd4DJ/hnPuOaq16h43NWTTNBoCGdUTHcsFOm67AO24erezRV9e27+3jXTqQ1XU1h+sq0nz6mzL2Z7mG8k58Q7U16LwSLKEwDiqqzCvPJX51eUIsogSXmrrqM9WQQAkmL2gxNzTtnTOcuXHAmonlHT2a5fYZWkJrtWMMzxAyJ4AsbVEstljxKRv7JsZf2AfM6MH+Tt5iJL7EvMCu7DDPKEx+tLPF59B1Oc8PrT1xBigzVOZGaWeaI8XvoHEqaUFLJX064JQlMKMEOksoddlpEyLprTEEpqbDHK5HnFaNDPJUKpNZVPUgr40vJ/xOP2Lfxg85d4uHsXl2kv7ysIFQCEJuC0P8OnVl/Fc82rQAISaePcmJAmxsf+9iXwMiFZTTsn5GQYPVfDMKJtFTjJGrZXaaMOHb50+nv4G/r3+HDzLsZhwmaa4JsRXd+izTXhcl5dCGhCgzhFfHj+IYb9DtOU0LUdXuk/g9faL0LqslNZt4oYsrFQmSjGe606na/hYVxERmUIGI0LFuUYo9TycI5ZzMoPys2u/lq/XZT4AjZHNlpmM49mJ8XaQtWcuuYtuZpFf5rjrjYY6o9kHlONzMHk9lweMPzMWLR7zCJpCh5in7IIWn7NZDkg70Wpr7S7ZTfL8URyo+f58804P3Y4Xifp5PU53zkGHLkuk6e+Z+mXC2W1Dp9b/BZGPuDb/j9i0zzG+eGhRLDHSUsYMNNrJArJgofhHIJvcHNxD6fdDXxu9TV4l3soZZrK2W4AKBGWmx77cAARENMEwW+29MfjyastwPV8rJyrnJUZrZl+DFROEBlU61ZYN7cx+PeQpn8uzbWvSyvUvXSelAoKo9W3j4NLapzoQQSwbfZ4s30Hr+9exSJ15swBWJhhhl4G5aa+RBCEqpSykbLsGkSIIScIj4TDMGEaJrReDJ6Y5ilSM2biSjykvXsL8YOHcOMGEVJbI1DgEZzU45ISxrFEAEUYqaHiSAwMIkSNwj3Ta68eGZ00wIzEUefrpLATANRzPg4R4zRJlMmJ8JL84EkVNMI4JEjLg5CbTAtgRon2xWmSSJ2mpwTIgToMkyCXaY+7oE2uJdqmeGchAE76c0hTbokajkhXCJqI4JsG06QKI0HTSqjkyqvXKEbxYtheh+DR9YTd7oDdZov16TpHMSXl0xWGozfzJ0vg5hmw2eXDOMWI3XaPaZzQNg0Wix532pexcGuBsIdFqUxRrP+rpSI04jhhiiNimiSaxSUKHaeIaRwkNTV7RAt7tZo1ud2xr1EvYx5GE3ORU/1uHKlmymLEG0qmRXwNvZLBs6ajQzfgYfNIaNcR3unfy0a6RwDAICaEo2r0el3eW7yKPh5w47ABgfBgdTPn7BvX+Inpcj9Bk6+NBDtHV5wfNu+jtShKTH0/e/Z1BtBcuNl9Z6BrR7t2PMb6e/bp3H6MKkWqehLNHlvzCRUrqqBFc4xdM3Qicdo0oQU0YmNKrim9uZVBpRcTJM1S+JoJZn2O3jsxFIHzmHdpI+5QvzbTBgAADhIRT1zT0LUqnZ6hCdNUpwXp53V/iaQe02u/QLqmjYGaTGU3jOBBQJXyZtzKoQBtZAWrUv6kX6E0Hc9gJWyCvd6QSonK79m4ymAYHu/ceRE3L5+I0aaf7VLEzd1FXr/oPTarMzRNm1fIVUyEbT5qwNZ0zXm9AGTPflFwiCA1GilplEXH6gIa5/GjW2+hnVrcHm/jbHOKxRQAdhijOggzSIlG+PW8JVZZmWnGxqFNrjMcuNKH6RZkq2VeevuynvlUr3Sho8TQ6IQgHp7R8/jZ1W/jneY72PMldvEC7w3fEdgJcvh4/3M48bdxAy9inKZ8LwP+AokqN41TpdDqeebChT2R1tKr3K+dJzruntb4zOpX8W33J3i4F8MtjhN204SDytiyjsIvUlT57j3arsNry5/Da+0Xsy6U97VaW1uOQnJmOMh8LJmvUGRmhEWG6DzndCMTNvZvdGJIr7LlarT81zq1jow1uyxr5PjcXL1+wntceF8e3zWfNkMQKDx9dtcMcDI3eY4lhPxu6boybsq/FRlfHL5Gz3Y/kcXOeUDT+pNmJznntdxH7mVlKfnJVOnXx7KOZtyvjJtQoon1+4QcbCm3qAwa3ZtSxgC01OFzi9/Ek+ZdPGnfwZuHv8FhEkeD1Lla9pr850j07FVzgpcWn8Hd5uM48bfzSMRwqsbLnB2wYfC49+ZtPPr4U4Ak+DJEacUTFFei7E11no2pQAZQEt+NlvNy5cvN3pO7rvxNvNh/Ghf7B5jiPxejDZjPTF+9Lk+2kk35IJjMtPOYU/ES4yJcYu8PWPLCvimfTaXozzlC8C4TcFZA1BqP0wTfNkhOFFT7DBHA3gOc4EMDjEnh2VkO5DVz6u7exLBawl1sMY1m6OkhZ2mwZ7/PomxEVX8zVzHZ+Wfq15jFYPBO0ItS3RYAovhDIdCnKBEwjiJkncH1x4g4CJEmBuI4YrL7Bi9w/BRyk0+owTSME1j7pIXGz1oSAIJC1jQNyAH73VApvAQXgrRvUP2HU8K4GzQlrygyzIzhMOJwkKhgaAKmmBD3A5rGq0dZ1mGcJkwRCI7Qtp00MtRG6MNuj80lYbFaSl1hjIoCKo8y9Ca/WoLOThA3OzAxppiwV2h0R4ST9QL3Fy/jk4uvoMEJMlslZE+XbG0RIMwAUkTihKg9RXgi3H7zBt6996HQhHNYbHucvXeCKUrNo6/SBBwJgigbmh5JulBJxE0Vy4cdEqVIE7NGq3XnHBWemg7JqRig0hA86t8JlCRdNrQNfrh6E4MfEH3EBW3yvnryoKxVVcr40TGZCy/C0PR40PRFQQOunK0r6XK1klMZY/XFprDh+NxcH3l6dlqaKc41KzYvt75CAMF4DFXGGmlRo91bDZmYrh0zPCH4gCZUiq0qgs7g7ZUHOo2IxRgBjdqLd1ccQuIskb5kRBUAxDUXJ8YUxxLBIFIFukL945LCVRRuVZUJsFqhrGOgRKhM0ckjIFMyUTH3+e4BEqUV3fBI6RAmfeW7FhFhlLHWqKFgRpoiavzHaqfmdFW9/pOuBMzoKZO/yaoEpDRdnWZej+pM5vEDxSFXT1MWd2Lg3cUZsCg0HTjhYnmqnyck57Btl4D2oLxODh87Nq6sSd7nanJc1omoZBHUkQbnPGIX8X73Pi66czTJI8WEcRjx3P4uzsZT2UIWimUqq4BkqIjIQt9o3pxS1k7DUawHJccNgETryp5mpVIbSOf9IdMvGBnQgwgvdJ8GEWHkPe73r6ti53C7ebHQA0vElFjqpckTvPNwoiWLQ0PHw9XzAJHJbFvMnAHp8uYrGuGSbuGT3Vex7y7wjcv/gDFOmKYJ0yh9DG0PiGzdHXwT4EPAJ/pfwCvh83KGZ0YRl2crKm71YG0SbdEgjesQZQAx0vnY2hpwhMnvrN9APljbUVa/KG10kir8jFLDVMhs9gfPX+ej1+dC5Rmyghl41mm2Q8vXvHx8H/19xs+OhnL8+fL8Sqed/SzGEF0ZS45Pwsy7FCfEBHhP6IOHM33A+ezI4CR1xylFDArm5kwWGS/Je1WPdc7LjI7nke35xxlXli6vT+Yh1SbZ2T9z93HW3scp3UPChIfTm3h7/OZsGMyMj7U/j1N/Bw4tbjb38+tCb4VH2jJn3QvA6Ee8//JD3WLBl8CUME7CC4hQmtIfPTfvoXh3ROc2Tasi7LxnOnCbsRHqnfBx3Ox/gHH6x+tWMF8fGaMNwFwYQQ2GIyK295X3zqggq5waCSBVZEhz5pmjKLLMiJwwTlNlCROk+NPgsas0EkeyeU5qu6TQcRJDLkja3hgZLg1I0byaKMK0nhMhb+a1EP1EaLsGIwGTpkmW9MGEmCQlUGrB0jNh/mU55akOCcxOBVEFf60KVpykDi8Ej7bxoDaIN2PS8DAkmgRmbY0gQngcIlJM0jzVPEZk3hKboxhS0TmM4zRTSFISIwURGWzF1qwNAS4oCIsiAcU4lT3RZ00xYhwGMQ4dwTdBjIhphHOSBqnHFnFKAEc0bcBZdw+fW34NPzj8GR647yMlYLuV5tDL9UK+J2cQoubOmXNKCSNHHDa7bHCenqzQdR06f4Len2o9mik0VDiGUnGpvWBYHUEIXpoRc8Ir770I1zd4+tw5mhTwUz98FQ0C0AlVEYDkSTGqaAYTQoDihpBECS1N1sBYSHPsMzOulVb15HHMNZpWHJ44qaEGWESAPOAbj2+efSs3mx5oNFEOp8kzMPOwMlbqpEtjbFnp53LW6y9aGlk2tYgzOJD9Ewnw+RiW75nj10QLuRwfkAhDvon8PcWI3W5C7TxyjtD3LdquE+Urz4EyQpgpiFafSCSpUVzVyclHiyMjsSs9/rJ3+aqyTFHbVUwTPEmua0q6lmR+XY2Ac8rACjDj2vnKKDVlN83OZlEVkJ8/TRNcVIAilPEDpAaiVpKQwPowi5I6jxDM98n2LwSv9zOpGgGqKRo5qj+/GA4JcB4MlwWswP+GDEpAGo2L0ZqPlzTOet4GuQPKVABy4rRwJDwH7HTe9fiiAh+pYaCOOyKHGuY/d6vS9Td4gtkgKuFu9HJV3ZPRms3G8KjreEw+WiN4o3GY488FGFjXcg6TBkPu8648jdkkQfmMsRXb3mNQ5Dw2/a4oOOacEJ6XYsIUR+y2e2yS1OKhJ7xz9h7eTYYuK87EcUpIkfAzF58G+bIijovDg1n4WmRTbo2OU65zB6QnquOo/bOUI1g/ViLAmqMrvc5YdxUNCEho0OKWf1mNthItsyi2J4AowbPPiqhAk3PmGbXiavtv9JM3NMOJ67kz7w4xVu4MPZ/hi6vfBwC8Ofwd3tp/Q3iQko3VzXduhZ9d/g5AjBYLOIRCMLpcOZJeUV023Yyf6BvlV9MrbA0lakwoCJEZWyDTeznTbHODSQapdXfsxHBOU+blTvcoj/X4eHAlN2YPKPKgeqmcqivOuOPvl0gyUzlbMqUqOlg9Z2bs6L6XmHu5rui49diOdMkc0am+ZNoe9H3nlE/HpGA1QltN26BvOySOiClhu9+X6BsByVpW0bwG7qpjzAZDpccdVZn6JW96Nh9OmslStwdg04b0CcZczCEI4IZ/HgBw5p/Hx9qfqeYs56GhBaC8gBUNXbUvzAnBFo3t/5h8xHa1E8TmlIAgsiROE6KCJpGTsc/2k1DxHgVbY5GBTuW/0UFK4vSx7ycSViLTlKj551dfwyHs8Adv/s941vWRMdrmNSOcN1pfmHkgZwdldhdllaZ7MeN0OsHH9x8ThsmAdcP43v5PcTF+kL/lFC7X5Il4ICR9kLQWbBhHtE4g6of9AE4xW98pJUy50a6OhsoBzweLURl9pVG2aasE8YxE9SqVqI+lRtr62LpQVkCsxiinfJqnR/vIEEmqBOexWdqlHA5OLPVkjVNDTcfgAHJOG13LTx+sUBhwOh5LI2G40l8NkmYaJ1NiaGaEDodRD5msd99L4/Jhilh2LWICxvGg7QoYTdtmVEhSTwknoG0bLFatgIWMMcPi2qZOg6RrEhht1+HE30TnVvjU4leRDgkf4ocAWBr6PjrHYrlAqzV8IMm7t6M/xgn7/Rbb7YA4RnhPWJ8usOg63Ayv4PXmK9JPLtOv8rNYFeiyyn6nKX6qfDMILjAAjyY1eP3tV8BvKztjBuUTK4TlTV5xiaXlx05Wm8AgK25VUBdi5GbIWZFQxSpq81WjpZQsxUnWex/2cIEQQsAHi4d40D7MzJE5wnxIjhysFxKpkmbiOGvFmU51zMQZYSmBS2FvDVmtNOsqIdy1ASFIjztRDBOiCp5sQKgQk3qV4jlmJE3HIQDiWEgpYRylF9xxtJ8TY787aCNOHF2V5mPqh57DbIxSeTblfytiUcvyOk+wnG3KHx0RMa/QsJ8RUHCFUkVlb1kzcPN0z1Mi53UXxQhwzsM7QtN1WK1Wggyr25hYFG5SoCRyXmuUoghPqpSYJM26hX/UzWuVfo68omVcBpJR7x2QO7E6M4nKehj6oTSqxk+8crrMNe95JzRqNbVEEGdG+YT8SIr85YXGIgNBHRmcUlFkNEzUmKGnyk9ul6DzuDbTRA1BMb4FKTnBegeqGUDaZxMyJ5dNA5NxUosdo/Ffa9lxTEeQqpsKzY5B8M5Svoyfs6b1mONFeppyvpWcWSlTM9njQEGiXWMY4aYJ7IIa/Ql2Mjy8/MuMcZzwjfW3ME1TBpX63OYNeAoAM8IQ4NiDNNWrNHwnpVXKMisBOTJUoxVKSYHUXIOSRvlkRI7JOBMAYDLDNtOs0LJFxZyDpAYDIMTMoZ2HaG0wQ8hIwBwfgDOjhGUcMkCVE6xRv6RpzVqT19ISLjF+qvkSPtF8yRA04CEyDApmVbsbGJwRoBPVr6lOUhkMZprNHTHFkEB+b64UQx0lKad+2X2tXquiu8RILuKD+4+wvuhx48MTpFzOArVVGeBJWu2oc17vimdex0bcP/V69fKMQx8xEj76vTa2akkwf6A41Mr5LjV4dmUk69ow09+56gFX68kmP7INToREwvsSItKUMEwHXG4P8HoG7Ax7bcfkmOGcykQg9wgk1XWhETug6Jh2nvNY1JhhG39Vz5V/cjU5dXJleQS2ElZ1gpixSyAmNOhVV4GugQRiGKlaxoT5yhfLQfbQdBpCokn0Hp0rW8/IFDEOB4lGKqaC1Jc6QV1n03sInCS4IbKSAHWoW99G6Z3q8phMF4wozrdAHbyhxj7j+sgYbeaeSlylCujrtW1fvTz/O/9ruffGXIB7wx2AitKUvT4mFJwr6I1kJKBs2ZUHJmbs9wcFtJhEmdWc19A0mEZJPwQV5lF71ctcATtVZsRIHZsaXE6UXYmoMRyTeodTPti1YSaNmsUojTHN3jOF2JRGp+7TJoiR6h2B4eBIIlWiTDkRWgrcQSGgb7VxMUiMVe/zesaYEC3N0/RxkpRIIundRTEheH+lcSCn4uH3Xnu/jQnj4YDmbAVGwMX5pbRdaALIO42YFYJo2qANgAU+v2uLcg9IEfjhMCBOEU3wWPRrfLL/l/keP939Gr7Nf4z3Ft+F8w6H/YDtZoctE0IjoXICgL4FkvTm2VzswQx0XcBytUTbtbjbvI5P9/8qK91CBSZ1WRlQtUbZTV0LL2hxrAjnLIpo3q7dFFsDVUhsTJazMmxXUs+uI8oeULURZ15w4fl6L07g7K52SCHhafcUznm81b+Pwe1hCpptOJE6OYCMcCn6XlXzQNZ82tKPLG1OTySbcs+YJo2Gq6EyM2Ls/KujY4qiNJq3CxVi6Vz0lnS961K9dHV1jZ/xtm5vWWKu1uKq0lueQ7O/swGfXyvZASUyUYSifcc+T0pn5gw5vjfI5RTuZ02kMm+O5l/eMYOLiNG0Hdq2QZwGHAZJp/Qk621rloweqhrX2Qoor680etR7YyfB/wT9q16zogTpWVMhL/K8Ui7z3tv+m2bDmRatzqw000YWqAC0b6Qqpmyfm6l0Qs81m7MzYnszAwQo45/LMr1XVuvmn+Mj+rJ02xmtVUJS5IsaGzrfen5XRBQX58DVFK6Uowy1Kn/lvOTpKW3CUrXnAIMgiWq5IEiRPhvYpgxqHTcDQzNhnCKa2GTe98OT7wNoEWPEnYtb6CZR6DglnIwr9LGDpYX1TcCggAZyiE1WKK/NNUtlByTiAzuVArZhixb1NplnVOcvz4+qfmu2piyyxOJ+gwAAIABJREFUlqg0vTYnr8kP1rpy05yVVi2qYUAdxAUExWVSVmedrq+duxIJkNfB0Fo8JQSq6Z5AWk8nPJpyFreZrUmtCIIon86cLyLSitLPE2bsESJnjAeGbUB32WAIBzBHIBKacw+3cRijcISULGposkCzSExR1wh/psvKwWNy12SSvqinq9SXzfjUEd3PAgv2L1cJfcfppXz0WRmxymjLEzH6Q5bdc/7LkJrGIpMsQltJD/2o7F1SHZiJ4TwLJgQ8wCF/3zZ15qwDl0ionTvda/ELJCARYhqgeG7woRHEdRuv0jMZDaDihfaRrKMAIHMMR2QjVPUX6aVY6jHNwZI06yGqo8n4XopsOYxljsxHzAazweSMlxFYPV5hWJ3PdOxpjKCYwC2pnFNnDyFH5u1plprNJCnRnKD9Y2s9KWXbgJFA2dkOCZzU6sozro+O0UammM4VHGFmigrFRxE3KgIwxUp4Ui0r7OCWg0D5gPDxuSyHxm6QGHApvxwnxjQOwhpIhHjUhp7JDk59O74qzA7vPMDw5DyHiZ0q7wkATxG+UsgsuuUcgUkAUGrDJ0fCdC0szF0rfICME66kWraNR980GMBIWs9ijJuZK0Zvh4zzkpA1rNbFmxSEIis/ukfmqam91IlLnY6loQLIDa6HUby1UwQuL3cIbcjzBKT2JpgHl6Sv13LZIwRB3iSQHpJy8MZhwmE/gAD0ix6vLb90hZH+VPcrOKP7+MD9EA/8j5GmCeMwYhwnHA5ymG5++nWAgO7+XcT/n7p3+7ktSe6EfpG51t7f951Tpy5d3dXV1d12t5tu3/C0B2MxmvE8WFykMRIvg2Y0EuJhpPkD+AfggReeeAGBRpqHgRcLDUIgEBJGjASMACEb1L7IHrsv7q6+uauqq87t+/ZeKzN4iIiMyFxrf+e0x0g1S1Xn23uvtfISGRn3jHjvfaRTwWs3b+Knr/4yAOCT0xdBIRlsz8xHPDNFiZ0fQzayPBpPxZt/uJFxIVKV5UB+E7qsmLESQrbC1RzaMuFAY9AVj42mgDVrXiLkOeHPrt/D7XTGmgven37c8IAra/0SVyZJ8UcUe1ZPS2AsOu5GUq3/F1x7Z9esLQDt7AM6yEufvWLGw984rr2eB1q0GZc+xXv3aXju/nk2xjTKx8ybvykwoT7Ucpxv38YuXQi0dHw1qeeKkmS7PUwT7s4rbj/6COb59ffJ8XlQMHs4tG9N4ebu1xEmOv5xgOSs7xJsx/Mk4c6979gdDv272iGvlzDaLnSV/FwgeUPdLFsGwF189EvFioCF+/MxRbQg4kN4Ugdt7dlvTajcxZmI37y9B6NDxossFDTAIjxr7UR6aDhvnm5MhGkyw6EKRQQ5TwsxrM7ThGmaxQK+ytnedfX+33vlQ1h4VWXGo+UhjlWiN6aUcDzM+MTj1zBhUv5T9XmFWyuPo3uNAGYLkSSfV1BWbT4AXGkZANZWWr3CTAQUKy0QI3K4KWqNJ2ufVVUjW0UaFsoUoBWQ8+XZMxAWpbuUWD14esdkLjuLa5ERrMqc4U0oXiwKKJmZBpHOmqO2FRNuOOSKkCVkISZwEv53/OiABx9c4/GnnmirFdfvH3D4cEah2mznJgBLmDJpSQddG63TKUZ/4cMZEgLcpL+2N7f7P0C1ebloeHa7U4yXDQqeLDacuYb9u7/dVezkhlOCZxKt5FjXmW5hqp+Ngdvv1gL0PVIZu0FCWgulTALH8rnJa1LHtnmTJ5cVwZptNMgVPhlvkfx9wGmzGFdE4ZJSD278M5kGhm/sCY1MpCmlYlX6nMZ1MvmDAk3vyKITQSqEz37906ifr/jo9WeYKOPN77zR3qmltkQjpMeo2tajwJ+tTZZT48RFs9yS5nwIsRNqIKnF5sRb2rFzfWyUNlvA9n1QfgDfF3F/cOUWVy/WKxcG+ohj+8x4f/1TvLd8s91LOSFNqRFqy1xEGtYjfQqBWBYNy6i1nT+pldRSAPcuXBBHmRnLR09R786dYgHdjqVUSWeqYUayiE6YIuGJ40oargizXrFY8tjaMIEKAqNyXoGccUyEfJxxnjMIGXMWd3op3PqpDCxa88cKiOdJQs1KYZSlYMvYuaXwNdjoje6ZlOQcFzSssZ4XTZ4APH78FIk0QyYY6yIb5fT997C8/T7yZ96ChXyiERGBD0GUwHUtePb4GarW2ZsPE96kn0Yp7tkSJpXwqfRlvHZ4B5+fb/G7t/8DpnzCfMWtttjNZ98Cg3D1idfwlbd+A9ePrzGnCVf0SHCpACtWx5kgZrGOZV0qEsnn5bw29LSUIfIeB7+bEUM0XKlVQndMOLS6XGad4lCM2ohxR9gtxkS0LAmJnbIU+ibg9x/+kaTgB3CX7sAZW4JiwobO0cItaxNALig6cD92hE9rcnhv9FT34RgDo2zj6xWH+xQZ7/flryjQbt9/kQLXC/oSNtqtUP9mGP/4bj+m7f671M7WkBQ9nD5iZrHSThPhvFYkWvHg+oDzPOHu9oQSioBe9lrGMQ0CUMCnUYGz/i+2v6OM94JHEG12x3Z5vO3O5j0b4SXchgtqcDx5EWy2OIGGVC5ihZ7J1k/5F1HztGz78vFeGsV9eOVt7ICjayPt4KB90OQI4b6d/TRPSIUoMXe3UofpMM+4OhwAFFR9XuQmDXPU7L+1Fqyl4nQWemo0jkiefXq8xWN+1kSlRAkfvPoR+JUs/Qf6/AuPvwLR90rLHm1HC5glAzJNSWp0IuHu9g5rISBxq5VFqGDyM5+kckxtylxQ0BqNQhuDgIsbbzThtFb27NS+aTwKROe8VomiSLUCVZSVBFLlVDxlicTwIx5jFTItpZ0K6rbuNm4bn5uLY7hcVEwCyijiMtmeMI+Mh0aCPe/km99/A09efYa7V+5wfHyFT/3gTTmPC1dDDKao1EIum9Jh41Otk21fsCjHmRh2TlbgWJuh0UYfemnTaFEQ4OGZPeUvKOjWCvdvWaIZmYvyxM3xBu6+7cqScV8PtDv0YLOAQnyQTF3miBKL/WUmgAk16a8sETXT5DQ3jsg/6ujNMBPkeUvSYRIEJyClqTVhRpzKLvvYyGo1I4J+NuVd5f8WyQO4cYC0Xi8rwE0iYgtRlSvXCV989/NY/mxFAuHq2ZUkcdItZ+UzYskpm5HZximRRCjBulBcV4XZ97rKb7UiTUmU1n+elDbbGE7UGWAjHowv/c7n8Ud/+RtYD2vHMkkFSzAwEZDThE/88A28+aPX9fhMgJBtG05Y+A5nPmkbAtQcCMAa6r3QNCGTeMOKejYQigQDEDe0ecOMyELiyCMfI2gSg+sZeOVGwjRuT425l1XORwFALUWLO+vGrJK5T/YCN0TwnWBCvzKnlubXYKXhlyTM4EyEelpwkwjz8YA8H8RKlkgSf5SimZ6EfKyagGQpBSlZ0WnCcj6Jl4NjgW05mzEfJmE0CzqPUBSop8OMwzwJUy4SblpYSx1IAp9mZCcS5k7LGbwsbW2LJslo4QUKhrVI7bT1vCDnhIfXD/FLN7+Bia7D2lEgqowDXeNA1/hXHvwdWLwzVM34WvojrFXOEF2nV/GAHkpyGOs7CucNswGQMP93/vBTOP3iHYgJn//mZ4RgaIhWUYLSxMIYllvXZhUWO4EyYQXoUnSj7ySloYbjaEyboIrugZAmCa997/BjfPv4XQBA4T5hDBjNiNEiu/QghgkMvq5bodbWj9tZFbeKRc9Me43vV7Yu3XM2dkl43b+2+gA3PLbxx3vy9wXKaZzTTtdtX3csdIRb//2SUB2Vusuwkd6Y41+EeQb6y+IRP1xf4dHDB1KLUffY6bzg7nS+mJbYh2xet73QlN1JeGgOepBtRYwo2uzNMjb7IhY4vrm/nqMwd/mZcc1erud+54RPvlAA0BI+jf02C3f7x/bJ5fFuxjDsScelEer9O/fD2A0p3a/s9+3S5HWgWvB8WfH89hbNs8AsusGF4TB70jJjA/KO9S5iYmXGU7obYm/lhd/+xO8FocIUNol4qGD8/Ec/g5s0YZoqllXooSTrkjNyTGLcBEut0ZkzwEnyGoDBmpCIiaT4tQwajATm0hnqwIw5SaKW03nd7Dfjh+uifDSUK7LASNJwWC9lXcFF1OAl7MsEKXEhEes1ZKD1sjls5XhSbjzFBmImuKSlezzzpt1LrQ1C8vN1CIZyMA7nGV/+/S9CzmgCqYazsBtLGUxwlLFR1rA1QiEGyI0n8rhwHkqlncO18FBXLmzYirM7tLv7StBWU1NEennPcM8VMBFp+wDHqJ6ZoyDuZTNnOa9Ispbuf2wtxfNm9teCRsWkqkdxOELH1T9R7KQd6FtN8YqwN3jF/AkbS2boganJk67nG8wDrySAkIGEVvtPlDKZv5ylbYJkkzuo65d9odrX+Bu1x2IJcmbgyEepc8cAjjbNni/E8HijTYqKqowtkrUziUxr8CH4uWoike0LgGAJkczxZSvHxetjobSZUGhfWswryXZPmPGV3/si3v3SD9orLvRY1qqE43LAO996CyZLNX2Ggs+NJAvZmOkqFvNkFOAs79SyYjlLxsFlrViLH1iW/kVZyVncnxNRS5QBvRd4CXJKmKcZa85YybMhmQ6WkhAf1NIYp22yKJxJhJRkumxHtlN/mNUtDYSbB9cSfqiJP1JKKABuVdsnlrodoKQFsOUU6GFOmI9z8zRb/bvKjLpIenpYhjMFMBFwdZyBVx5gWST98HlZcb49w4SJNGWgSoX729Vc4tw2VM7ChMoq5wdbbH+VkJmEJPHWulMs7hqAutslJPJ0d9YC3Ef89INfwWv5bTmnp4RHPJEm5nj4QufGVmJcSoGFA6xLwbp4OmVjUvo4zPUtYxbsIwZ+5ms/1SkEiqH6VZhdrQyUVft0YdrSRouVqWpIgStmjdonUiHC8SclwpzlnOSzq+dIifCd4/fwJEs6fks84tzCvWAJpEk8/BoVlqajNMGvf44ag+uv3VC6HSUpXp1xZxjDvvzI3Xg6xkq4cM8figqO7cVxDvcJrtE7GH+LnCXej5+t0O3L9BfPIcV5S8he6dqP/cZnAaFBx+MRr776ihizqtCZZal49vy5MNEOQnE+tp4pYFCEHYfnfSwmZO+FLTn8muzeZhnHYcLWxlhlfbbx7Cky3MHjksLc/x7xr8fFS9dorY+JR7xth28fkmdj2Y4PZpgkauFrl4wKo+HjPvzt90fkLfvrGNuP+Odt9f14KGefRdAEdUsRT0AXVhrb7Q0fwzrqv6wKnAwr7jN/a9XkLA2c1jpJH7//2p+08Vro8GY+WlYj54y310/htbtXnLYq/q0FuDpkPTbAOC0Lrp9dIRuPT0BiuZ+yhmIt0CQeAd4EPS8kkRtYC+bjAVOmluUUTbRPBjmFttFklbhqRS1Co1ey5FhC9xuUlRYgrGPl0mBYqsom1qcabQzkpKn9a/J+TR1gzZ5MOtZkPBSsJXr93JkrcIrHNAFUW2IXNzT065tIDNetdmLWogUkvJXIhOqqb7scF1ZYoahGgB3NjmH4b2e0Im+L4cuNCtgq6N+YwbbfpwSjc+6xldsDjwC1PaQrJS23PWyJTACnR6zn4cwIbbhv+9PoSjjvqr+OxcTj2Iz3m8zlV8hYTCT4AZOFZexZo8Xa2eFQc1EUdtvnJOGLCl85eqSNVa2DTKosq1GBWZIsOb3pRtaKdYu33AScCGvxRJMldNPnKhImlvHpqZ3horaHPGtzabCou2fw/Pp4KG3ollsXQGGom3U+z/jpP/hcB4CouAF6IBFirhMhVqwKlq7dZOWJzSol71vI3yhALuuKepbzWutaFPkU4EBLpzzPU8tqOOWM+TCJZaFWCV+IihuLF+t8PqMs6yBAAbVKmv12li2MkxvBUjgRWpryFkppoZ2mkAA4TAnzlLCsRdtWgp3EklaIME9SP2tdK86nM+pakAAcr46YDjMqV9RVEGxVeBT1Btaqaf9NJCFNjnKYcF7FIuTjkpSzAiPWgsy+FyTc0dOKr1PF+aQMi4V5Hw52+BUwN7lckulwWQsWTTwiGSmPePXqk3g1v+VhRDVgTmCoRoZ6QQBomS/0oKkpWt1TcV+zC2YmkG6eab2Zl7c2j4Y5zYzhc1hbsI+0tWO427KOknp/BdcfHx/j2XSHQhXfP/5ARqWZIZudjW20FmK8DUHshbQ9QdYFNH8ngHnzvN0TnG05se5RgpyJyXgvKWpxPP7u2FY/RhN0LwnvLgw7nLq7e0rozvd+Dtvfbc1HgTp+3wrKvSC636aREO46t99zJlzfXOGVVx4gG/OgirLe4fb2znEwjCcqG/2k0/jL5qk9oX/vIsSTHReeIT3TOzzRGHnXc6/ImzDkty+FqbBpFds7F3EGbdTBlw4geIfG8e723bft4opTmqQTsiiBPXjtj2/oP+zv1t+uoaBXpMKdQF7H9+Le3OI7gObtMQOXeWOM6pG5S9h5e2o/ROFRPSwMWGZS2SeWoMf6I4w4YWP0PaV9YyuktitNrdbr9+b38L35PQOCwoRg4a1J6Uhl4O3DpyUzpR5vIDKeJ7A5r0VC+hUezZjDEuKVsoZ7JsKURdEy3nHAjLfOb/oQIF6Aw5T1LDmwrqWVwFhLBUrF8eoAEGFZF6yVJawSGqYGBqrWkqzc1iOlBE4Fq0jyoCTJVDIlVEvI1XiEtFZVAPcaLwCQ1PitiRsMnZKG6Nn+JKPHCaTeU2Lo2X7Htb09VbVOnqRdJ2Pc0tZmacO4G93fEcfJVU8PyVUcDmOxjIw9TYo0Id4h751r/zv7oQprsY2XI7zdXyePmxJsHjgSWEJhGYDVTtFRAjXPtbTLKSrTQO8KD3CjLSUysY30/aprmiiWQvGotjTNApfmIbV10L457N9QbkCycrvS2cbe1RTyJSAipHZUlxts7H4TwaHrmEjEQSVEiQVOMiTZ6KmNJfJcHRcDgJXhAXJvH99cHw+lrRHn8BNRsAP0f+W+/a9pxJNtJruv1osgUMjy2nmeHJQFQXA7vCrJGwl11U2nodiyxaJFmjAfDpjnCdM8t9THgkzGjICoxBOCxUcZlSgpGrutcfSA35MwAmEyFh4HIklJnzNub0+t4HcUAowxHADg7gTSdxhotbcoJ02jO2FZV0l5fzqDa8V8nJH1nBOYsNaCWrTwLDM4ZKq0uZES4NN5Qb0743jIWM4LVlVQU06t6HVKCXQQIlm1fo6XChCmk5N43aqVR9CJBVlBzniVKmmglbkxS1mCq+sjHhwe4eevfx0P85u+Dhvhx7IQ2UwYzW/Nlp5cNyEDLZU9xedjg9QYbhMwAjFnlCYYMkOLVgNErLWL5MdoiWpEWK0/NmDWcEc5HzhJqYqc8a1H38aaCoiBJ/QMzyFlE1hrejkMbRzumTMDxb7w6L/2Xo3+qe2e9t9HQa7zKFwQ4LdW+svj8jXZayvuRm+rVzTHeY0MmroxXVLW9q5eUbnfSzd66MZ29uo0WrujN89oyvZZEfCnQ8aD6ytc3zzENKlRZV3w7PYOd7d3WkvRhd4Ig5eZd+sT0W5rfPPCHGHMDVrQ9/7LFNcXXz1sRuVpbGFjeAj/tk87eH2579h2/16E5wgVq68o93pBcLwscsH2d/R4jcLKnnEARm8D/l/2zlGDfYPV3sybEuYJB/q25K8pJa64OU8HCJQTrm6OOOYJbqA0K7oo74nkexN/uaKsC57fnVCWsPpN8DMi0M81JTtxHATSNjmDISOAO+BhlF6oyTomExh/+P7Nj7r5u3WFXPjr+vNv5nVTKRf+RfAj14Rn5Ta0L21OSQx7ldkVL/3MBFxfH/F2fQuvPn9FShkpr11rkVTrqYocxZASBiw1Dc26wllr42E0TpBmGTYVIaN5m1iUOKLUKo4JflCbj0QNeP08OUvNsLNNVWHCDElrT+o18d0CU/FRqwbCSLmYpLjusqFFKwF+EktkQW4KFEOzcrUQQX/Kcc94Lqu8Co5eI50dpfa2DLb6mrJH0plh00YkqldP6yPe6a4K9Muf8ZMYITLCZkoU3qmtvd63bcd14Dho7xC17JGsbVDD6ZHnOuztsrIeslRaYxNWK9hn6PAaYGAiAPnK+6E3XSt7T+XyXvG08h2s5YeolbhItSITUJHEmGBLFTGNqJULIR9GTxv1SNLL8tCPhdImm6+hv/7VuHpEYU83VbK0/kM7lBrRNNGAiAciqinJQ0aZWiPmaNYhrc92vDqicsVaCspSNFOkjO1wmHC4OoJIiBDIMkgGJg9FeiWsImBnTIcZp7OERyadp1lJ17DgU0qtHhAlc7dX5CRp+/NhQmHG6e6kGZt65p+JMCVhammeMR8PqGCczyuW0wqgYqlnVI2RXs7iUZymhOlwEAWveO03LlLLolg5ABXypynhcJw185cUsn7y9BaHqwllXdr5vGkWRTApwzDrWqKM/pizXKVIGGITTnPGUhlrLZjBKFxwe3unXkt5JCXC8eogXsJ8wJyu8DC/qbimVlvqFXBXBDnsrJCljwlf/P2fwh///Dfwye+8gZuPrmQ9K5pVxdqHWiSJoNmtLLxSGIAp6w1/GcKsih5+r7ULXrD8UYbjIAtvISATKBPmeUbOGc/mZ/j6g2+DACy0YGWvr2aTjWQxhm6YpdXXoCeLL/IqxTC2rbgZ2t0RbJug2mBD3d84qr22e4FTR9918zLCdHy/++UFz7+Y2Mb5XHrvPkUWMCGvwpI+jAL0OF//bs+6Iif4UNtvec547dVXcTwehJZxwboWPH78BHencweBKIyOa/WycDDbU1yWOM/x+/Dotl27z0HI2b1UaB3o5O5zjIYM9sie19MiKWy/bNcxyhO+ZjLufU9xa2OAaZcpf5BLpD3lfAM/7daG74elPeSkocfZ+xRT72f/mah8GO/olNTQvuG4ed1McLVyJKe7E6brjAfXB61ZJ16iUhYJ2U8SYeLnKjNwPGK+OuLpkye4Oy0asmf9yT8ie/Rjbut/EXI78sgFKPTg24ZbG4XWHQuThxVAQ2MIJSQCfPWsNEE8Ch/mJ8Cws7p2d0b8uD7BB/gI+VpreyZLyBd4ZZhURsZfevYLneC+uxNDWnYRi+wpC19z2iK/UfCcyLhbZBRXiSoJqdNTOIuYKGuNQm+fubbazRlRSapa90vPXqWEKQHTNHfJ1CpXVNazYyGaoAveGWUZreFbagFlqSkK1uQWyA5JFkW4jXVYEw9WHVbLwlC17A0lC91EyxBJLfQxKCRcu2pvbelI18VoBTOoFhRIZJrI4ErLOGQNJTTcs+ggGIRVmGc1qFRI2H0ynBi0OIrftQyI1b+VhDoabeVWqPau8TZOABXrX/d6q90IRGmIoUYDpeVtfkp3lAG7csluIID+9XJbgG8a5wFWIsOe59YXmtI8xHhtro+F0gYYwCxDi3hb2mebRMdEOBCsHhC2QUUGjACQzxNd4ZhvcDedwOLHR6kshxyZARKCzyQei+PhiHU9Y52rZDhcFmUIWRGbsei5rFIZU1k1RjrMj/VAcyLcnU84nc7tLFiaJhA0S6ESspahMWckjSGe5oxSC853K+Z5xnSYAUqYZ8a6rFiXtTsDM+WE49UBdJhUscySdpQZ00xYzlprDkA5Lc2SmbLUJgMqlrOMSbJaasV3PeDWFLZ5wvXNUUMadb4kCtezp15EfJoyrq5m5CkHIcgtOtQijKHFjRec7yTMkQiqiF1h+sxbOH7+Hc28JEw5TxNyIuR5wjRNOOZrXOeH+Nnjv4oreqW35DJCbD4giUKSxrcTwNtNwwBueMbxcMAxHUX1V1TrjYhC8VgVv3VV975TV4C5ncUzj2t3kZxJs/oiosB5OOwpL1imink+4MnVE7x7/T0oh9aENRwIQBSk3Gpr9ND2iAnzo8JlazMMsFPw9hQRGuDyMmFwMSRrL0zQCS2Hd/baudhFu/+SRi1cEv79fbo4tz0ld6vcxLn06fwbDLrv94Vu9u3Zd5mvKhZBaE5Ems5/xsNXHuJwOAIQI826nPH4yRMsa90wkTF8bm+dbbz3KnK8/3UfOr2QMgrEGyHmwmXFULdjiVKHt+qwi96vft73GSF6b0t/38Z9adSd93kcZnwO2/l3LRJdvjf01ylV7WkVJS4oWPtrLGdfIv9t49zZL5c81bL/axP0JMJBQonKueDJ8hR3z5/jeHWFq6sDQOK3WcuKZSWs66rFzQHKCSlNmNKER49ex4N1xfl0i9N5AdWKUq0I8dYjfR/cLkDzBc9fwmy5tzV87WE3oYtj4xBSR7o+QOMnDHox4WNZ+VpXCVFLUg6olabrxa0ef3nB//Xod4bxRnrlKTR8bqZcov1D6PllDCK0ZG8EK26vD1qoo9I5w3lTwqX/MHcSo/FXb38OmSalhbnjlQCwcMXCC9bzGbUybnDEhEnqnkLqnlnmZhllMJJan+qRq4kgKWkIEvkqCQmm5MxYxqxZyQMMiNRbSZrQo1oooXsxmSWkVo7BAHme5XiNwc6ba+OsHIs72+LWlgRXitA73ERVVeOzKt4NOXQMBM38aLTP1leXq7KFxFpvABdJUuNxhsr34Sjr8op5V6uMp8kupPhmqRwJvFrH/VEGkwGYWaOW7DyjFdg22mvvBJqgClmsPxfb25IO1vqG28gYazNnavO6JHPY9bFR2uYpN2VtjwlQAEybVHusF+IkNNHOCRlBceC/Of0UPjl9AY/xYVOSaq1ILMWgE2XkXLEujOV8xvE4g+YjUiqYJgYfD2iIAGjxYAjyrHKmjtlDJMMsZKOskoADQAsXJEDOYi1y7oyIcHU8IM8T1rVIVsUiSmMfemmfa9s4TWmbM+bjjJQnGZO0LOGYLN60WmorM0BJ3e0E1FV+N+JgikCtXmct5YT5MOPq6iBKniKzrReIUNaiBcclU+Q8zwInI7i2udqmlxTOy3nBcnIl9Hh1wOce/hymfMRyOOIuJ/BakecJjz73aazvf4g3ps/hmB4AAF7Pn8Ynpy81QbUthRJFOiKZAAAgAElEQVQM8UJC1zCFDR9wSf+1MJu0ZrzxZ6/h+rmlFaJOWmK4UFjaGT4O59LQCqFK6lpnRxYyAxY8irXmUs6gTHj/+AGmnPHDq/fwfLoNY2ANq+3bg9mLxJwj5NbKBITwFFjfoH5/Xbguedjibw0H4IrX+OzeNd6KynavHMRnLxM6fy4wniEu/+UVuK1QGcfZPAdG4Ll/x63gfbhaVA5ie3t/Lyu/OwJdG3P864I25YTj9RGv3Nwg5RlJz1Te3d3h6bNnkmSIHZf6NuPnhM5zfGE9OiG9G7nOLagJWzH2/tn2v11e0Et5uRyVoiKRuu8mRCCs20gzur5qvRffL91zXqce1PA8EGBjeBR/uwT7vf6xhfU4Jm8t0PXQwrj3+/mkHicuKLB7eB/vEVTQA0Aq1IkAJ3h3XgqW9Tlub+9wdXXA4TDjcDzqEQAN5WeAyxnreivnnRKJ14IIx6Oc3TqfFizLAjBZxFIYX6/IRTjsL+9lGmew6/fMzhPRUNUE6svtYdyn5G9s+MKwFqJQMZCyZigVQ7JHQFGPX61hb4e6SI0trWhKvmGU4Xbkoa1gcd+U0ct5zqKUV4CLll0i8/xEGdEaFroSeZD1yVzwO1d/gKyer5ySlkPIgXkVSB5w4Rtvnz6Jq/UGUsYCjZeXVaKxWjkdwM/dtT3MTRliEOz4Q+P9MNgYzadBITBYcTefCHEGGo+/4iNeWx81ZbbRvsYPCad8hw/z40DnfT0+sbwO4uwrzzLCVQ3eHpKpEVe2rmYkCky690KOn7nHLaiizc4ZkpquPXGHPFcgyQJbJAmCEmuzJIYf/eCQZDs+5/tLvmvSH52XZSdv9J+DTKdgkzyGblQw3YKZUVb3m1aIxzSmNLQxyfv/HGSPlPA6OWQYyYzebb+bpu2/ownLPVOh9pr8sdj/QAqINFNjVc9R2KfQ7IUpYdU09GmeUFZN0JjcUwS4hg+wnidKSHbwoLnmEQbvlqTr6yOOVwep4VVkA0D7z6rI2kastYArY8oZhRnLsiBNCae7M6p6dKL1szJwOq9IicXDpmnacyKpQ6NFsWvR8wCcAkOkViDZCETVkJOUM/KUcJgnTPMEIjkX19LoM1odsVqcCHNlnM8L8pTQDsmzWG64iLezllWKdet8QIT5OOOnHnwVX7r6VSRMeD9/iD/l74LBSFPG6z/1C3jlMfCJ/Fkc6UFbF8mAKQqXZR8iQGPmgWLmnkEw6/gtQePlGViBt/7wTRBpPTYN0eDq+MlEEk7KZiF2AZ4BAUwKSpJ2kpSZVpLPeSLcHk740dUHmo6Z8KPD+zbI1pYRwJH5mxKvAXCtv2ShKc7Xw5a4X3u5ZA0fr/s8QTF8YM8i5aO6X+jZf9fWUz8ixOO3/iK12La3HfK+oDlee8pU720Zxd/Q7zCYkSHH9zvlkN0D2Xk0aCv86h0ArAmTEo5X17i5vkJOaqEtjOd3t+2M7FbI3oNF753pYbJ9x0M5A47s4e/QyyXFoFMed97bu0bv0CVP2fh8814rHvHOs5cUsVHgN2Zu4/YPdq6Cwl7plSMDlmKEC+TD/MeVG5+pl5E+tBHfMG66D+dLymmD3wueTWT8YFgbcmUDQIOP8V6DTSkVz549x+ku43B1xGGaAQDruggfr6VZsokYq/YzTROur68kq/Nywu3dWbIxSm+BNox7DI1XmhCHtk602Yd9tIDBc/y8t5fivXGPjeGVEXa459oqVQw7qkFaWw0dbm3HNc7JZbW9/jbjpL0nQ3udZgwwKk4LkJXnGv4S92NtLwzjJK6o5ModM8u5/HLGujhDzClbkEsbc85Sn+/d+c+AqW6BG/arnUujWprsVoombqseWioqGg/jvnBx/8XFz7AXbF4Kw2s+4vX1kcg+WZLOoGr9XO33Lt3ho+lxgL1fH56fYOoDsttApiwOjloZq2bgtkcSeZTbyLucP9PwuwHbkSIq2YniLgj7gVJLuAcAP333WXG+RGgxOodEykLXUkpqHE8uopueYXhMyUM3GaL0QXiW7Hn1zKkBhqsrleLJVNmPa8NXM5eT0baOr7scfel6odJGRFcA/lcAR33+HzHzv09EXwDwmwA+AeC3Afw7zHwmoiOA/wLAvwTgfQB/i5m/dX8fYQ3tNE8jRpeFpm3oCcebDXH23k8ETFOWtO1kKdQrKNuZN1Hq1mXB7e0JN3PGNE9yGL+1HQm5LE7SkMpi4Rw7fQvu1vAFWnh5BVgUv+vrgxQPZWiWx6p9QyrA14K6rkgpaZijI66kCSasi3joLIumIIh4x0qpWM8xeyXABOREuLm5wjxPWNczTqfVCWTVRCEpt9DVUhigovAwxKwAF7DCKiXZaMv5jFJKqAPj6+iKIbdNmDLheHXE529+CV84/Ap4TSgoePiDazx8cIPHbzwFV2Dma7yVPgMwvMabMiDWDdMsI8bk26oZg1U8sYxCDD3tS2pyrc2CYrBiiMdqnjLmaQIoSYmI9axZtTRxA1iS3BDUlR/Oaqo1nxJJyG2e8MePvoUyLSi54o5O3qcSg4A2JucFmlmNvoAotTBdI+sbZW2XcTq/7HF8fMhguL32LIH9/f7N/tE9oXf4fXfrm+0qKgLbcAUXlffGN851fMY73gvlcm8IBWHThYRRWRhhtPVKesa46HUzYdW+x7FHPOnHLXQrTRmvPXqlhRSLUaHi9nTG8+d3zdDSz2uA0kZYjPcio+89ynvztc32IsUsztcMMPFc3t61URC7fi/h9h7+xfCYsMYQohBp2XZ+27a0IfQCmOGkCQhbBdAVAadZ4zbYSxgdWu6+j2PaKIj2dHuBNg28yHser63n2NtkWDKmfq80uRRWB2yLH/4sYVkLyvM7nNJZU4YXlHB2TUKqLAoBYF7B9RnIzlW3efKAy1q7zXBPx5mSinidUue4mrQGLPmE29yj8u6A3YVc126ja2ENbJ/te29daN7QwyAc17K/jtsmd6l+116/tfboaFzbbrQ9gpoIx6KsrUNb+0qP7U0BsvE/yxvQQjTHATBUwfL1AYBSJWO2hRsSCFOWoyEpSXkGuyx3RkKWAD7mJkOJ4qbRTcuKYnjBMWg77mIdp2Xe5Mi/ENCFGqW165ROeO/qPaSckfRIingEV5iXSBqIaaH8ev/wwQamRslTSs3I4lFFKmPgEmftw+qhawF2RbanL2FtLtKXHlGezs8bRWyz2U4NRBSOYsUjWZq9PAHQ75KkRhUq0cDU+KNRetZ9kGPlfCRjKQCXBXaOso2n/9BfLyClL+NpOwH4dWZ+SkQzgP+diP5HAP8egP+YmX+TiP5zAH8XwH+mf3/MzF8ior8N4D8C8Lfu60AIlw3WBGeDcmT2OzH0UZiLChSAGBIpBJhgFukvHf8KPlx/iNPd98C1YC0FqWSJfyfxqM2HjKI1xg6nBdPVEXaC3lPYO0EWuiKfEjkiCbEW166mDYJ5X87LivNS2mYGAcfjjPkwN4S+mmfkRDjdnlxBq5AaEVRhHh0iOZ9yuJrBkHCPsq6iPIWrltIOUMrZOTk/ty6SCON0d8Lx6oBXH76O2/MZp+d3LZzT4NkpqyDJfliBaE+tEAXyeHUAQUsFLGtrq1vGwLwsm+b18RHeyO/gC+lfBhUpQAoQckmYFklxLKVovE+L4G4b0TJ/hc4SSZFTQtJCiRoCOpxl02OI4pkrFZzTZvOnRksZpZyxLqxMgjVxjOBhM+ZoDT8i8XyuqYpX7XiHrz/4U8HR5JnVUCU+vPGxEEhtyhmgIZfsBNUEC9NFe0/MJbJq99snV/KCELIRTInE+hmJ1sUrWq7vocXYEawjEzfu1azE7ca98+l/vwwTv7cviJlCaMJzT5cqiML5zmEeo/IRn9t6M/ef7du+LOhVJJApfgTMc8Zrrz6SSAJFjlorfvzREwkN20hKlxSb7e/7z+3j2hZH+J57/e9NAB0F2AvXZpzQMxfUK8J7/Y1r1StvLni6ZNAh6WaOe8YJ3wccDCwewranQNkzUfA0r9Q4kt257Ywuzrv1tjEM9Cw3PrfnnQSgWQn79iL/MFHQwrpG48RIU/yzeJhjxmE7l12KRNAUKkFAlX5LwCNrr6wtjQW2+9r6pTaXtsqjwsrVnwnjd7gNa7nXRgdp91r4nPtnmtBvRp6OC/fv3Gvo7i7CPg7H+y+QLq2HDa7s39+SDwsvpMbH9p+/RAN1jGZgorBucTCDccBhFUZSGQXcGcfPCyGdpYZsYgApnkMT75zwYFVMrF+yYuYTZkjmWgvHrcxAqR7SOwj6GjHZz3HAR/udwVjlwBhoLeBOOfOQTOa4FyJMAwzDxZCImRq+u7GyG1n3JoNNSA4/cqNn48w8EyftpRmID7ZXn+fb9nPXf0AaspvWvpaJ4QqgmAKnjw95NcygxIymuFFyr509I4IYoc4EhBqpm6tI7eeqwqbIoffvqxcqbSwtPNWvs/7PAH4dwN/R3/8hgP8AorT9W/oZAP4RgP+EiIhfMJLO+jhq43aHPEaUwi72NwOzUeWPW4ixe+7ECzMhU8Z8yDjdrahVzl9Nk8YzM2Ga5EzY+XTG3d0ZN1myPnKtLVvhWqQgp9U0sjGYMmfjG+NUEyXkWeLHa1FvVq2YDrNaRWTHumAOTLNkZyx6xi1aS6T8gCh7RITX0zvga+BZ+QDPl8coEr9o0EZCAs2aoCSLZ3OaE7CIRemjD59gXVYcr6+luHatmKeMlVlSBdmG12BlGW6VVMsA0jzj+OgBcHdSa4WkGM45oRYNBW2uZFnPaRLvJuWEV/Nb+Es3vwHCBIaGfrJQRvEmqaK6Jtw8u9Z0vaSw9UQZVYmEbT5mlmKITJJZiC2UMQFcxYPGtjEDZiVX1AGPZ16YkdaEqzrLJmVRvLvsbST4QCRE4MPpsZwHnCZ8+8F3cZtPugcgY1hrwCTnNhz2h1l80NoHMuWtIkSGb/33ywrO9vc9wc0nZ4eTsfPM3pYfz5BFATH8yuNYDOdo+3Dbc6MSEYWdPaEgEFk3uyDC5hI8LiksrDjqNKqtWiPI0VM2fr+vz0vep0sKkPDDqnIJIR8OuHlwlIL3RQoATznhdD4HYaSNFHGttorpy197lvCfpJ091mHei8YDdsa2J+iLl93X5bLyuIfzUYDfCqMR7iPuj/vSbeK9x9KmRPASIPZ7WwOQZr+1luCNswhHUZAarx5X2eSmC/AY57e3lltY+PrgnnAf+b3uwmtU1nx+cSMza01TTVLS+s9aCJp9tfcUy3hVFptaWz+T5zeKjsCYOgE0eC5eOn13xBefd5TCa1Mae6HafuoMjW0dfby97+USbd4K5pfWuh8nwh7cp+Ndi+zRT05X9vZRaGjcG32DQ4eXcbff1ZbobWo4UcqKUqJCzAEEEd/8PoFRCoCVm6Ruj6cpi1I2JaQ8YVKZgJJGkbl1xs6S6G8k8qUmFFnWBbxWlRNV7sGw7oEWdGNvtMHwv1+jaMjoeWj0wvXzH7muz3ofn7Zcuh8m7H1SXAoENkC9e8VFGJWL4ryGCW4wW40bBAp0SY8XkECsMnsOhCC2x76N5gq3FyN5gsivKSVkjUgjoiZ72T4ukFBdofEVddWkPyDUsg4Oku31UmfaSMzGvw3gSwD+UwBfB/Ahs8aiAe8CeEc/vwPgOzInXonoI0gI5Xsv05cjj37r9mWME/dDgDtsxO8PaGMIRgDenr+Cx+t7Wt9LNN5UCmbNUEUQy/S6JixLwd3zOzyaJ+AgSkzS+vHeIzoMaZ6WOCHohrPNUauf+yJZzPPdGSWrdq6vFXWpg7nFRdv87fzb68dP4xPT50CU8Nn5qwCAH6/v4vH8A3xn+RoqShgDmss+kvg8TaL9rxVPHz/Fs2e3Wv9rljAASdSqSib8DFqpyKcVxyrer/zqI7z1hV/G8etnWOAGmIEEfB9/BKLnqihJ/5+dfxFzOrSt/vb8s1CtKlBcPdhOktmTGcjnhLe+/abya3mw6NyM7hAAVEnmwaznvAgq/Dg+VKVuEk1pOKYbWt3bUREnXTOuLMXXp6QGgQROrGmFxar2/Pgcj+enoET43vUPYBYfCZ3Qvx0OBxLBFpRgkqKzIIEHNTjab+0gPQ+8jztUfMlrywxfJADZ+Mb7pkCMz+/v93sY+tDP3px6YWh/XPacWR23bXAngMbfX0a5irYoiZdXxR4RJrRpI97f9tFaH0crHe28drw64HA44HR7h9NpwYOHD5DzhOfPn0tIZC3ocC6M32jufYLopbn8RVwRd/aVqcv9jt65Pa9nNKht5ECXeJSfcLeXhocv46vRjfAONfXK52WXyRNRjolzSmpYHKnG+NyLfje+1Z+scdj018ut7UsJ5F17+/tubEM5ZrdGJmTK2RENq1ceYePvPXj78LbQ8abwtnFp2Jnif1RAe+W8F1pHL63RbX9FFpZiwWL93WEW5JgLsLQ9wR3iUreWNAjhY6SSd2c8yZuKgn30Ovbz7WHa0+54lGQ7/s14Nsr7/jsvd420wuWpyox6XgY06OmffNsOgKsc8RBZTJStFPCHWMSWcj5hPQtu5URiJJsknBJJ/aEiuesxFsUHVcwSEY7zjJprC88sK2DF7xpcYank/1mufu4uZ4+8KUSKyS/hb6Qr9v3yHjee2O7Gxd4gvCpVQf4SIdYe971i/Y0QiSNme5d7OdHG0WF3UP649RXaqoyFV6yrzjxJqa6Us0R1ZcncnrOElFKYN0GyxycmCe+bMkpetrAK10spbSxxaV8lotcA/DcAfvZl3rvvIqK/B+DvAcAbh9d3NqcK0+Sf46JEhU1UMXOp+YFxIMS6NwJs/QPvHH8OmWb8v+tvoa5nMFcsy4oEySYkGyfhcDzgVE84nRc8efocDx89wHyYsSwreF1ASC6IDQTbCVYyWCJlcfcuS2mISYmQpyzFq5dVMrftMCg/2GjeqQTKE27ya/jy8dfwIL/e+mEAr+V38Hp+B6+kt1CD2cBSubIlF4F8f1Y/wDdP/7fIflqbDsxYkxTe9kLzfv7MmWJFS7xChNfSp/GpwyddINa5vD69g1JPLfabEuG19BlMSVLoWqy0ual9DdEUYYtH/tw334ZqTzArutX3SCkK86TCTlIrtQg8VYuZSxpdfVTP5nGtbYwNvcJa2MVEWAtjSnImkLJ69tKErz/4JjgzTvmMu3wnWUBb7RITQ7aXk0BS2uKlA7K64kPpGsR6Me23ob2Riu0pI+35XYVkEPh2+MSeQjZ+TynpXLbCsuP8DrNsQsCLGVQvcHR3dp+Pwvx9StPlUKb+mb3P1mYUBEavW+zXwnq38AQsK+kYXtkETQ21lsxoGQmMu2dPcV7kjOvp9g7n01nLhJQggDqjJT2Tex8cYomCl1LYTMp4SQXg/na3AvgIwwaP3TW7L6zyEg7v7NcLbXR97kyB1RgTqYDjbcAhhpzBUiGKiH6ivdAPdE9ZMRoTlJXAr8SnkDSD2948fI69kGs8vN9PMWywC5MMfXrb4fOFOfkjmiodCVSrKLbKB1o7vF0/2zdEJCH+ymeTTQH7OCj8xMRD6mhOm2cHc6XqDf8p/rq715PyNm8/Cq0Og8jn2qcG9+7h9ltnzKMgRnYeGZe9torjHl3wufe/j/CLQv4OT7Ext4LqezT9Po7X9727/8PrjS6H/Xff1opRTh5Ox82gQpqgImnf55UBLLDs6J1co2NLUw4Fvq1tvUcJ85w0YZ8n5jC51jxztVQvZ4QxULZXrLwPYFsaq5//lq5K29TlbYj41t709kYYDkqeS+16vyGTPwOTDeNM2gu7DD/0gB5lbC822kDtu426YSj3Izaa2ZpkS+zEICacSwUWTXVEpN63hDRpRF4jR6z9OeK/QLz4ybJHMvOHRPSPAfwVAK8R0aTets8C+K4+9l0AnwPwLhFNAF6FJCQZ2/r7AP4+APzUg8+zM6T41GVmbcSswbsJ6yLmMosyYAs8Wg6MaL2WP4PjdEA9FKznVYo5a9d2NmjKCXw84Hx3wt3tHUqtuLm5xvH6CjgesawL6p2FuHHrc1yMlmQFZFUqAYinbJrFlVpRJWkFMxjiTWItGGup42ut4l3LCVOaMdERv3T4GzjyNepqjMs2tiiIr+fPKkS5bWoQyaGslJC4ooLwRnobwIpvr7+LOqnyWERhKOdgAYgYTZD6aMdZOxUlsB1WHTbj6/nT4GkkH+RMkjSbohK2nCWCQBQWgAvjc995G+989y3M5wlIdk5NN2YrxmqEMW4I+b+sax8nrZtSEr7InA2vmuBh7RM1uCYQUiYgE9KUMU0ZPz48xreuvwMAONEiQnFhUKWWOSimZ+hIqwrkopv7ehdNApOzEf3x3IIzAWNoRB1ZCQaEy0rFVgnw37feFto8x5bTeEdAtAmWsu7+vne1vUSRFgfGsREkENa++3V4ph/7GJo4KlL3wewnuUb4mtJjmV91VF7E9oLy4DKoh1cyQz3MkrVsThL+U4qE29SitYSY9fzayMTjHP17VGQvzaXBDU7zdr1vQ7v3GQ3uU45b2xeMHvbMvQrfjjA7Pt5D6DJOS38RbsE6q6ReLOvyflS4eaff2LYPkwDaMnXBm95X1k0gvn/hin4/whCKa/x2/J3G5/r2LylfMRyrw109e2lH4LbL3yTVC4aV+JucXfPabmwzafJBnJ+JBhR4jymxZIlGbH4t+kMiQQxeUmWqb7utSdsfmzsBruMZShGmKSyiw47CMxH30N13ePDQ1wayu8rdHn3t3+qvfhxd6zvvdMjZteHwYf+RXLRvIZZtzKGPTvPred/+XJpWsLnf1kUfoCZ3KkYEWcMaKSwyCkNygbtCZzjez9/WsYKR5xlznpGSlmYiOZfcz0+8cUnpciWPIqilYl0XLEuVCDJNBscDnPdoyGhwsN/j3w5eg6IXjYZxfnG2MbJgz4tpre9TWQ5oRM2Y5XR0oD/D+50SGsdHO9OxrgYWaZQgRm004qHPx/1lWctrrVgB0EKhrZ0xgpBUMb90vUz2yE8CWFRhuwbwr0GSi/xjAH8TkkHy3wXw3+or/51+/z/0/v/CLzJNQ0fftPaREEeCLxvWiaggCgcebBYkrkrUwm4cEzkc6Bpfvfkb+F38Fp6UD6VGGDMqLzjMGWmaADByllph52XVei4rru7OuHlwg2maQFeQw89nEUglc6DEu9oBYRhZrwWdlwGy2YgATqkdF5WYfD2gyBYmIbVKUs445Af4F6//DbySPyku9lpFUcoET4MQSKMyITPTkKoMlQFOplDM+EL+FXzh+Cv4vbv/Cbf5KVY+4W59IkojEbh4Ae+H0+vIeUL6zKuYPv8ZmSNrH5FIWFYq0gO3NgxVPnIK1qdOGARqdcJRmZEmYK4JXDJSjkLDFp+ihVxKEAgQGGgxSDV4Cw1OVvfFRVE0AZvIXOCE83FBmjK+ff1dfDg9bkSpqpdOpuIKpeBCYLzaYQ0HyLkyUpVgVhHoUqhjiOHqzwdEgXiP9PXK7PaKVvD4mzHj+4VsPzO5JwDoVHXUvXDvQ+/n2JTFYc5RUL587c9xVDY2wucAm/uF/3FEP9llxL9jAnIHvRA6js3wrBfMjEnM09QphEIYfZBR2dvMZcCjSzPf9T4gnB3dVbxe7Klsvb9AcQtPXrziHDsPj/zQN8EmVDgOduLuIHj764rNTaew0HwNrW7Nm9TY53d0Y8o4J1cm7DttMxHYnUsQGNp88cWbb+PuYyCc2d7ayPW5Ae722VoxD5jvf7LWd5Ux+9zTBx3lrrJgIeJV94J5pBqHVblBVu36+ojDYUZufImxLlqvyTzKJHwShFbyRYp5F9ydzljWBWV1GYN0LowoGDt9JIr14Hq6KLKMe1gTWZp7KzROXpS3jF7QYd0j/W4oFHhHWMWulZfYq9t3Qr/t2sc/SyJjBmkExbUXA/cVAX9IZaWUEWmMKUw7DepPrUBWh5syD8n2aB7TbOFuYJSWVI46umFvc4ODr5/DJ8q5GidWIefayhnP6AxKhHmaMc8TskbuJKXFlvnSDXtexJ5oUo9cBfMB67ridFo80d1wXWJr97G7RveGhwg9HdrIEdjymxeiV5QjmtzU9zkQyd022ptxiTf0NvZ56dqJfNF/DdVGijvKUs1juDNOJkZdTvcN4KU8bW8D+Ick59oSgP+Kmf97IvoDAL9JRP8hgP8HwD/Q5/8BgP+SiP4EwAcA/vZL9BHPEA+LP260eBh8tDraZvMNsVfkmhsxFOR8NL2Fr1z9Gv6w/m94fPtjzSYpi5MraziaEJh5FpCty4rnz29xd3eLeZqQ50nSC2tqYasGz2B4NszamFPL8EdCcKUGGrWCjATbFJLEgzSjSqKET8//AgiET85fxCv0iVavS8L5GMypMRSDExG3bHEd01OAiG5YkXNqBOcXrv51AMDj8kN8P/2RIJrSKoPdF4+/isxXeF6e493nP8Dp5iy2TKvbZgyb5HlLEMJEDUakgb5Jx2ohoOIpNSGBYJbY2io1cqudh1YMsU8AY0QRRBrKKeEDJZjdGjiUsbfjcYnaBkskdVpO04Inh6ciBGRWj5qfM7Ri5HGuDe8M+dofYQaVoe+LlyolQk0keGeCDA/KhnFh+x40t3vFs47u3af4jBbjMPYIMxhx7hnC/RZaF5SasQpNxNkM1DwJrWG9v7UWXu5v7D3C86VsSoPlse9Y/xmJ+aBsdZbCnWfGRi95t/aUOAmfFdo4WX1HdkFInunbT6F2dFTUxrb3xnDpSjQE5ITMgTKGlxMCC/t54UtjMGF4r+0NoxzW2qGCwPS3vAaByfrv6J8NrzbjYVNjRg/YiOO+Br4WQQAkDIvk7+4pRT42Y5IvsSmUmG9FIh9jvMMDru8pjK7Q9U3EMdu4SYmFJd3YKmxxv8Z11SeUt4zCsuFIDAO3/gVPg+CcCKfTgloZh+MBc86yX5NYyUmPERAscZeeWYFmO55nHI5XWJc7Ud6W0s6gy76/3zBkVzV+BxVyA6+Lxb7nLEK7JIFirUyzs5pIyo4AACAASURBVHoKG24wj2tgcJP2XZWNsN+2Z/D9i7jM6EThX2X6O92rHNHtvSjMZG9pl//YEQk0cs2hZ+vW+iXSowjke0xPTer4NJw3zmDAUYR7l76bvCDjYamYVBmncsLpdNYacVMzPAiIuK2nJCdxammeY+M3+9EiofeeHPVfuPvT5NfdK/Jpfcv5lY/bdqrJv3v4xP0/O+OKNA4AbRa7f2MkweO1BwMKf3ea38gPutE6VrGzJz0ofqf/l7heJnvk1wD88s7v3wDwqzu/3wH4t19+CHb1549aoJAuXFzYaF3TT+29biyICIzNPR0vAMYnps/jZ6//On6v/s+4PT/VkCLx7FQtJJjkHxWKgLIU1LXgVBakpTRlM5/OmCs3RdSUBus354TDnIGUdA6rhkdJ3CvpuESXSSiV8Xr+LN6avwgG4TOHrzhymBJUGClnhY/UmjBoiFerj+n1+fcyZ7/lBH6P0ifx6vFTpncCeUA4Ah48ucHDxzc43ZxRwUiqOKashSZzkDnYiz6bVVp+EwWueQkTQNXlDks20hJ3KMEvVWkoQYqCkgn6AndWBY01gWZncRoIkCUOsVj6SYusv3v9fdQD45wX8agZ0azc1ks4YBoIl0LSiKjCXOYsHpCibYiinYKy1zOC3sJM4b4/23bCDgFugi5xECVtUWjzXmR845wCydwVxC/K5kHQjsLV+F5vaDEhhjYbecs/XiykjpZPb+uSELx9vwudxGZYLdzxZYa0F6p5XxiaN9oLtym5Mcjb4wvP786sPdutiTTWwWXPGwm0E63bJsNc7u/flDFuZxosfMjoa2zrRUrgHizuM1J0nsym4InQLbcutUH2pLdoNIH9mW4VRikx/tyFQfp5I2OD9xsowhyG/b13+VP7z3km3D2P2Utc96DuVpHZ7nu5d7lPpxnbCIHYTgtpI4l4GWkeM3A+r1jXgjxlHA4z5vmAo/IdQgJlyVIsBZT9/JEUbiYcr24wH66wLCuWkyhwtejKGd0JHTL6PRGVdCLPy9sgo3xk0Zxia8Nr6lbdPsnesRA+CvyfrTHh/ZWHZaJxqBHiG/hv+c3L4QcZHLq1t3GNuHiZz+zxgbhf7Znm7Qw4QwQc5gnzlMFQD1YtUlfWjKeKL0WN2yBqMB3ptv1OO6HMPpaRdhmNC3tNEw2VdW3F3g1MhjdJE5zk5JFA+7wNaBZ3fUbOMksStbVWKWu0A9sO3vvTeYnLsTOasZpyuXtdokixPYIpbE7pou/foiCoG8HYUpeMk8zUFmh6oNPjeDqeNorGTYgiH+DefF8strTrJzrT9v/31QuIsa7E5WeHX9EJr8YYmtAXnhsPXhLwiflz+Oor/yZ+5+l/jfNSsJxXUGGr36qKW/+aFNa0s2fiqi6lSl0E49nm+dNBHz/3NtYf/RjrR0+0HdlIpTLARQtYJ3z5+FfxaP4UmCWM8yo/aHPv5MHKwOTCmonzzcOF7ELwQOSYuSlGHhDIPbGOCkKXEDNQdZhnia2xFnNNms2jksEojJ2lE1LlDkRtkyT2jI6JhOZUX+K2GKnq+yShhwQ5k1agRTS5SuQEqVePxctntbMBamckKWUcDhNyznieb/FHD74JAHhOz1FQxJO59EQgKmscsl3GcAsTGrnK2cFaGQnCUOWcWmqCqcO0F1Bf9Hl4o31qa9iUDOreFe9lrxRGgWYUfvoF3BOcGwAGIdvuRWGJvI+N9Sy2+DJU7QJ596kPym5/Tu8+JeA+YTUqn7vvs6T2vRR2GUMjXxSG2Qul2sbmvW14ar+G41z8jIaMwVvaUyiBcAZvR4nzNOUABlwzg8kWb4MZoVN04qx8LpeUxt4AcGktfc7R67ARRneUqf4HK80R96vD1PDO50Yui5rArv9EG3VrzaWQ8Ovl+e7M1CayuzVGD6TRgA1su/n3tECCSPaExLh5t+sQ9+Q4pxdevBXzElHwNO170o3G1BDxQqQGupZESzOwnyuWZcE8nXE8zDgej433VY1sqcon5yRKHGkYMhFhnmfMkyQxW84n3N4tWNdi7M7hAGNjasDssMDW2CNA7KhFrKJAmoTKlvkSXCkAvQuHbEip7TF76St28dfpSr8WPt72I8Yr7ozoZcUgQ+hAd8dvfNpu+5EGi0QZ+x/abQrCtv11XVuIpoGp6vxqLb0AD4dNU97Y+L5Fd23XwcKKe5iEdbY2h1mY9NpYZJuzyBSUEnKS5Hk5Z4WrG4/7qDUZWNIxF5Z2a5UzcMtasa5ncHem3+HUlP92mfFjK7OocDd8bxwhwGE/NLfjp0T7z3F/z0SMPjoiNqr9DcYii3iy9yjOh0O0xQ7t2Vy7hDz+RpefvUCr4/WxUtrsknm4JWxLhPzzxpPGkh3NkndwfFIppqxzJF7e6KP0Bn7tlb+LH5z/BH/y/P/E89NTTYetzZNtXkt9CjB7FkjZx6FfZrSzepYJ6OqIX3z0G/jjZ7+Fpd4BxKiZceY7UGV8+vhlfOnqryLR1Kycca3tc9vwWWujjavdCgP6yw1HrC3VFJplQZvkboPpBgoZFBHeF2YiCkhlgLjilQ9v8PZ33xRvotKppOtkiUWM2di4XGnxKWQoE9U0uO0B3WiZCJwZtUDjtgUqcl6i4nQ6Yz2vAFkGpgn5kEFs1e6BNRdgysg54/tXP8QPjz8SQRuEigKupRXjTUlCVBsQgnwZK967V43tBzBbQhHoGTWv59XzrnHNTZg1/BIiWRGLuL9I6PFwYLu2Me6RMffU5D5lxe47ozLM3BN644obYZbfR/I+Evfx2jKQ/d/8961A+uLvYzu0+dwJhRfuxWv0BlwK67PvlxQUgMOeoZ379r19Cj9WIAh7e+MeFSDz4u2FG25hsQ+DPcW2MczI2+4xTHibdg6U2nPNOMLxOe/J4GAKp48/3rfPSq+lxa3wEy1Y2xG6yMIAun3aTbS1OLzey0JGn/XW6O21+W+acUC07pqy381/Pxy3a6O75Fkxil3yTIddHITYPm1+hP/9Eotx3igY9+8aX+n7784rh34ZQEoFxHbmmNtALbRsWVYsS8HpfMaD6yOm6aA6XmnG3BOzJPMCUEkMgqT8JueMfPMA01xwOp9wOp/1nBxEeOYqpXSyGBzPq9Nkx0Cn3X0Y+ojb8bnYgu9Jb9FbGFe3ebhHisz9u+KR2F5mSGq4zc7fN/QiLNhIx3ZaVvninkc2IwoUzkSwgIs2lsqQtPvDu3ZEI3petmOkhrZkcubuVMYfHE7217NzqxF/dz5+1QrwsmBdVpySZBOfpoOGU1IIqYwrjnYezmWPhGk64shArQeUsuLuvGA5ycl6k2u3+7mngT09in8Zfu4uzMaOKY2ygmJfossZriM4Ysghtyy3frxlb3zSf5R5DD5bmWDb8g7NHsb057rubViuj6XS1jIs7l6BAtjTqiRZhkFL2d7eaBYCva+AYbZ3LT5YBSgGPkU/g08++CL+afoneLZ+gA+W72kCEUNgt5oRCK9Nn5VwwETA9FCUhIgAqvGQTm9KE3714d/s1uiPT/8ElVd85eqvKwoOthJjrDoJ6pi6X91cL9yTz43SNAG5CWVRG1GBhrKNhdq5NhO2uVYcn82Y35hR54rpcEBKEzosZGpW+BwzpTC1c94dY6m2Niq/hHARW6tFk4hYzTmDEyvDLY1BysYutWKmGXnKWA8Fp+mEb9y8iyV79k+pQ16V8Uq4ZO5XM4hkjWr4dDp8UpgRIeWMPPmiCbGU72Nx2M0abdaPuyjVS14eF9oUiwJ8Lz67IWL71yWBfZ/udMjU/jYG11b20lmuvRHs/bhPN+7zpr2Ut0JGu2H2UWF7UejY+OyLPAvbMEwrBO/9R3zovGXd0F1oc6PLaOKJQrBbiq1vOxf0orBRHwt2n99T4Bi0Wd/7YNkJDPqlX1+f884ouzHG5zrFIsJcP8X7Eb7WzmaoJq0CMNeFr4EJC/2+oIHsmgAsMIpC0/2SwQsV5d099hISw948YVzAdvFO2wEUezjhSsZOjzu0aFTMbCntvFIc79iWwFjXszJABUxSELdbXxUSagVOpWA5LzgcDzgcr3DUBBHMrII9ScOlCAwIqElkg8oSwXFzdY2rwxHLcofnpxXrsmKpDKwV2XQXNn/faDY1o2CcnAucUzbjgtwvzJoA2TlWpAM9DAdQRb4SIUjUrX1csxEn+OKPaPiMqACJhL4/njCgDZ3Y6aZvQBDCxSbtfeyqo+t7bZH+N/AtwgZ395TT/X1lgij06IlW0r0g2PVRAdKmzF/XvDBqWbGciyQuyRmTZhk3OaPh2FBux8dMMAyccwYdtDSVLFA3lZFe7K+DRRH0fD3CLcIrQmbTUgt5i88FHowmKIbeX0TPwriZu/B+gqBkIqn9W6uZ7mSv95JBf70Mf7943U/aP0ZKW0fULzPcSFQtHBEGIO7E6PZce77rS76V2iOvIa3gI+HL138NKy949/w1Z7qOtfoi4fOHX1avGOMb87v4EE/0+FejFoFj69kTy0ao/3/l+q+1R1uoIsWuRoGsF2xccOmJs32ObYyQIk2WIpEYg/JAe1+4CRFEBKSEtz94C4fDFcqx4Hh3ACU9YxeUNmqCSm1p+jX1h3jTtB5ZFJBstMLcq/zVGHyXeUiUGPteJZFBnjJS1gPkU8Y0T/jRg/fBmfFseo4Ppo8cQPCNiiyH8HoFeLM1e6VIvYGdwJSksGJHsBzo4fd+p9paRcH9PgH4vuea0v0CZWKE+fDU5XtBsGz9RSYYvnaKQ9d6wO3wa2uBg3Bmvxr+dU/rcztC68sqWbseC2Ou+ndsa2xnX9mgzd/LhL2fR8N7o3Ej06jcDFXdQXvFWwu/9ZTT1CiAK2vBqjkoE0A8M7Ez9ghvmIXa9pT1NAoyl69LBoio8MSxgEPo0s4+Gcfb7ecggSlGOC7D+tujoZfH7nu8Fwqatd76H9qL+4NZHqlAUzT25hW/v3DeO+OlruMd3L9H+Gh7qG+t+0yELpxv9/2LffjZIObWYnd/86qeD68K45RIswY7PW9lBeA8vaq7163zFDtFZeDu7ozzsmI5HnB9PGI6TJIhWo0P0CzMXFdwQcuizLYLCDgcrpBywXlZPLMfM1BDoO2gILU9anMOCgnAWAtLHb22HsZbRoX4HkFyWHoxWBLKWjv7Q/dKxx+9maYAtG4ZXApqAub5CEJFKZGn9CHaNkuDxXagf06B2IfTzSHOIyw5VD4PtLR/b0snub/fjXm4NtNw2hD89C4/BlmwH0sYg2xmyZRd5Tyc3WtKVmvPjovY1qehNQIhYZ7D2Lk2OsRsmbct5N2Ga7zL6Fa/rm1KG5mC/R7F36Khsgdh73l+MV/x0PueJnJIBGd5ZhgabdXh9ugT9AkZX459udHtL+76+ChtZAL95Ud62hMFur3mKH5p8cmG14ZYVtCTAM8oqmnVTAzIOOJnpl+FhQPYptldDGbNKsW64NyUOWi/mSQ0L+cUkDESKwTs9FCQ2F97hlnj3NGeMQLoxKhNbGzB/xIcHQesjHhuw6p6dixaLQmEN//sjSiC6L8kyUsYoAo4MyEpjUAVtWqsvloHTTgWmJk3rbZmLYTDaI14B8QbVmsBmHC4yjjSAXUGvnHzpy1L5UfTEznXEBVqm32Ds7RfgwWUFQC7wnqwZHmoUb9ubY1N2NqBszMvvy4JNGMo255CIjgbGMDFdnrBeosnI75vCbGP19vltueoyaq7itsIk0YE2yB9DIPwHEcyKtcW9rR3Ju4+z9FFIZL7Z7YWVXTf9xRru297dGRckUFZuJ+ft/BQWlHkqMEKzIrjod8Gc25txrNLcV/3wv0+vMYrWpIj7dmVONF71O5TBvYU6T780WvcdUpS18YmLUq8uzM+ezww5oZPRgQMvv7A3jSaoExKzUJ4N7XMSgPO2E6NYUSdcu6YHnFnT3EjIoePCYIqeW9DIXUOF3B579o+09OD+72yih96vtjbi3ul663dMzoaZYXG+wJv6t7V/ZEMR5VftnqlOt4S2iH4PrOEEDaSWipOtycs5xVXVzOujlfizeACi/KR82uScdL4A7GF7RfUKkaQ43FGKQmn04rKpRvzHu5y+NdlCoGJzNv3WtzzDlPbgxHAvofjVZnBa+/B7ylD+BT3hOFXUDYYwFoZvCxg89ykjDxNO8wgzpQ02VfgR8P2ufx6XN9wa9ivW37kfd0nj+51yhrSSBRKNw1t2iV7VOUMZvekskVUuEfQ4nyEZtsaUmix5zO2Z2rLHhdxw8PqiZKUkEp5k+3az76jtQeOvFTKTC3LilLM4M5NiRujCPrxDTKO/WN8395TGpgaF7fZunxIehzKDUgvWDQCYHs60DxzMkRS1klFHS1ugPH9LcMHg3S9/DmXT4axXUb9i9fHQmkjsrTtGi7DNCysLWFkrrZowfpuhLsJr+4OboF1BkDKneJjDLLF2w+qPbW4i8DgmENEm5+ZSynJUWfJyYuqCGLFrgnAu1/+AX72a19AOtvBUQ8V8m6F1dgmcxhsEaIjxMaYm2VcasSZIjFAv1uHMBv/klLgE2JpyUEpsbm7581+47AJVYDJpPXSEpYiTM4uI1qWz6RWV9Ys9NHm2hEUUqafJZRxognvH3+M7159r03qRCtM2PMQh448tN/cNuIW3qQ8qFbz9IkQlluGzOT4ZyvVmIptaCUQITtkr1TrGJwvXRR8XjYUL35nZCSqXV82DnnIlGLnYjHkrY1ZBtCeNUEzKQ7GUIguPCTMa2dCHVH2LqiDaezaxkdko9u+G0e+G5q3I6iOv3eZINszdRAETEi93/DUvx/p2/a91ocK20kzywbkiNtHU5K3b0FJcAOUw7A/07W3MpdC6Aw+/X3afa6NZYC9z7GHv3nhgfvPMozrYl7G0ZvWiioPsI7jHj3NA8rqR24cfE+haBTDFEgyFmHpbxWzGSLMG+Fi2y+2RtYShdZ9PsILqH03ZXu738LvO2B0eFj90O09N3b4M/vrv9MB9YLVeJO162Y8YFsn8ZKFWftbvWYga8u2SA6TpH07DTD4UuMhjLDOAZ62Zk2gNsNGa1/ofmGgVAlxfPbsLmRrplayhmFK2njGt/cYC1/hNgavzQa9ryOmDfcPbXDjmU6HzJiHBifDB6MHUUK4z6DSc0WEdvv3wiR8/bSp6TADPLnxNazbCJ/Ys0JsoObRM+f7BW0eTg9YeXZKfo59Q/e0CUGx/bN63YMwVIxQcRpigXTcxuHTdcOT5qRschm3CaSG66J4iUzQ40QP9wCHDkkiLYm0Uer/lkWyu6VUwVoWilKW0MjkfGIvKoS5ApRxlVPD31Kq1ios+pvL8xGvJwrrqkNeTLfk/SMSyXDuAv1u9KYB2xckivObJv5c14YRtPYp8Apgu5f+Wa+PhdLGLPG4bs0y7oiNLGEEqdYLwhg7LAnq5SJC0TpaZqmObbuCw+ptq6DELRuiZCQkUFICSgCnJAWjQ7FuIxbU9o10ojkMtU/ZhJ/7p29hKpNk2rBNpXVguJ1/M5IaGST5H0XOiKNGGQgaU8+EhNz2sh0eM6JgJeTM6m7on4xI5pBTUidRFeZk/SbqY4HjBme0RB6lVqzL6spYh9s6FpbClZWBuCnauulE5JDthLv5DtOUwInxrZvv4KP0VNfMMruZkq1MNIph7PgUXf694sVoVlh4mF2e57bmTiM9dt4Vtvi3R+Ze6AkMHf2z+wTz/u/OtEJ/KBcUiiEhiVE2iryXxuE702JWHH+JQ8ltZEN3PMCjCaH21vhOvO/P7Cu5e79tn72kWESvWM+w+6sXtHxu8floPfU2to0JU2U9MK696hrYyMQL3DCvtcTVBaWmKegujuOICpML/2Ph5588Nt/hZYaofVyNHh/3IhKYXUHYwuUy3vdz8r3dNr+uX9yf0ThowrHQREkL36V96PhREIaUOJJ9aXMWvtGSBwfBzsZk9KlTJzqYKxx13C1Dr7S0gfcejCIst/d6ZSzMtoUXOm/bvy7hsVHWscsoBMY1s75cqJXxSrkajxdoVML4eUT3wP98vqqA6cJbG+qkVoNumKP2K96wEFYGAigD3d6sICasq44F/Vj6XE+BU1CPhxGCHS3UNWD4+AH3ljfcdg7UYBcXxx6RyBYTOAkltFG7dbb3Ot9Gg5OtTVPehjkQu7rfTbHJWgoPijAJPKV7hdveo6Z6jLS2p6uu9osSQsZXN+20VtBlQwr0Ynv1/Lqj9UEBq2Fd4FJt+96o0w7f4bbW1B31sznZPL01G0R8bjvq+IblOWFenXaBQSlhnTLmLGUQcjYaEdpim4PRB2k9TwlEE9Y0af1gKVC+rmIsTspnKjQZihkZWuNpsyf6PsM8ovjUzdIxth0JifReYbt9nbo/MmfqAO2kzouyb7q+j112OBH6fdF74fpYKG12eSy5EcAkNaWM8AUiP1q6gcaetS2Fg4ZApERd3R8DtrmEJdzCiJARL3GXWvy7pAfWlNZVLQdKTJtMS4RXHz/E04dPUbDqPPSmCkcpZUyHA/JpcuEzQEEYg4wxURoNoU4ojSBX1kLQoYlIdMhr7exBvaXBCnXlItNj+OHodn/Ab3NfAwBqRa0FpQClFollr86cozdVFCJuSmCPuQzLeNLCSWfGR8cnmKcJKSd84+bbqLDi4tJGrawHbS38lds6xqaFcKkHjxgtT5ymbuamvAg8kp5PM2LKAdkife+6YWdGnWB0jxTU3xnDU7oJ7Pzu9+6Xtfc8Oz60nvHFZrnBpLdWuqU5tm0Khz1pZSBGT7rR7bH0wJ5gI7+78HP/1S8S1/2wrUuC7qV+7ldmfMa9skab3/oxtMjsNl4/1+HMzAQm60lmOXCMxge2uGMKTTsZQ/0+ifMdlbuth217RQXMxjbCqwnCHMQv5nvbN5zpzt7tzM8A0MMoeCxgMGZEsGmlSFQIf/H3tmtnilrrWuFHphi0ealgY1nSYh1H4w1xl+makzOdADO0Z/e8lSOs9p5JMdlGWJMIe1+/LVRfRoGPyk8bK/eYGPu9rw8zJo5tR+OH0efRIOL3dYl3zmxGJa8TBlW241pDdhMrzCxyQGLWMjQBj5yNboTk0PQF2rwjuQUlYjQk+Q1v38hqU6y0CTc4ashs4FX+TIj0adgf5MzQpXuQ9uim/LUEPtFL51FF9nQwdgzfI55GuMi222EMyidFXnE6iGEuXWsdHdH1DkdmIoHy7WgwcoXR5IFOwden+giMS2ecwjv38hVto9vbCOu2//x+G/H9cKdUcKkotOKcpJh8Sl7rj1kzTzN35V1cCQw8Y5gpGx+G4pyVegohnC4bqAOjDX0QDIYZO58KuNJa1e9mLCoFcalaaGRo1ekBXV6Tkf1shzXcM57ncl2XQZP2X43Xx0JpI0I7b8SACO/MYIhbu5rlmEKYQZOEtY2gaHiYkCE3oeWJbUCW91PKaDYklkUlZHm+WlijEoIkaevJiIb1bMisjPyd997CR194gvVgpQAUBRTpXvvxIzwsD5CnHJCKmnBDPikndiYMBJg1ZpOpD6kDwCkhD5s7BsI4EwegdeYioa9t0w1bXhWZSOpEiKyodUUpBWW1kEYnEA1fuc/2aMSsEwj/P+bevMmS3LgT/Dki3sus6m5SpDSSRqZzZDOzspk17ff/AGu2a7Y2u2MzuiVSw1MkRfZVVZn5AvD9w28EXnbrvwqyK19EIACHw+EXHA4Sz+jl0tDahs8vX+GLx69w2Tf0feA3189dcR1joI+hG0mzIZL2u7j3CxWe9NT6gLTKKaurUwjjSYlKAnTm19MVBtv85t4Mn7nBeZ+CC5tv1qVKXbm84D8JE303ZnDVoXGvwQqbMaRzr3ByuFSPbVa4qsA2f6kZ2me8vbbKsIaV/G9WdFcrE7OS+22ubLCv3lm//J6NqYdq0FrgUaJaAh+xZ9Y7VsaLeOYZqSgi9CUgmZWhM57WHQVQaCrfwHnb8v3i2fw+VmfmcgFj5VTpt/P70pgrDtWIQdA4+QtXSErDRMnRZPxZKsxKqc6EKIdJrSODLSuwIcA5l6udOj2f6Xo23iyMaTaiV7RPsDP3JlXwzjhmJTBDyIkmLYTJnIEm+eK7mF8u40y++h60IpEKbDNfcxgnxT+XocnK8t6TrLZBYSFquhqVcNL0fDaSYDjf8+j11bbDkMorP/ZBCsMugAVwxv0sNDSeKuxWqaHKKS3zBSS+UPn0a2zNDD+hS6td1+IWvHE2T1arrl6H6UmU9B2HJ9Fbgib0EJ7epV4noSIOgIkvJZnf2uY03wiaQEz2yB/dTE/FZWYnTM4rMq8PfWSS2TavJ76TOhQgJuPkzHrjXZS/V8Z+OmG8Wpa1QO+M3j1uMbiY6m62kEHU4niqRmjTtp490UK4b8KB2y2hiTbFKaok7OqgkbNudO8OzuSFrgJ+00OJCG1PsjDp2XflfCaAhQF5VjCN91XexWBfjScFrk3RLvP1URhtpphADZ9W+kagTYNHlMERxcGHIVfzYAbxcwOareq0xVlmAGAeuMG+8sYAuDE2yckkhxAQgM5olmud7GEwCIuH/vO/+RMQOoalpTahzozLy46H2+WU4jbCGMWgsVOUhG+dQ4mSbYfwE0j6/IHN92GJEqEHglIwREDOAiEYPlO39H1hvKRhl3o/CBhHR+839EP3oGXNW6/BGleOoSGlNWCK8mRvDdfrBT/87o9xe7gBIBzbDcd2OLx8DIxu4QejnMOzuixEa7Ugno8JkDBZw6QpMPKbjJFkuKNYqdZ4+F0HDSMx4LMCEgY0J6U/GKWUWdDxqiH7lYalMn8u7w0MF9BaxsNzEqSG0Tn8bVakylOO0LECxVT3qg92n5VBICuR53pPVVHAO4fxZVhW4WZ15S3jNMFOOL2vY1hBCsjT6mSab82JzMKJjQaa06QJWGGPupJjK2fkOklyPlj1wVPG6Mm5Ma8wVtzE84zbwP03rUreN3jXBkI+T3AVSgRUZTCMjhF4QOB/ZRBa2Ew2WE4h065BYRjewAAAIABJREFUmAKoJGVKQarD9zXUlnJXE7XGeDMAnuRQ6rwT2GolbV6JmWmbmf3ohvquzps5GmA26O7RQEarBJemDpAQolHTyoZy40wznM6rCSj9w+kqsHn1Nhasv/V5bt+7H2F4sAgfUydYgmUlLN8mdAMRY9skOyVbZWlOQuWtyMUG3VIFU9iS5l/+nEefXZE1gFazyHGY6jP5ZoeCp7Xeonx61Y6SpF6TydHE1xCrUmDOrWoZS9Ge11sYyUz3wnEf2oe1F6WdepZ9h89tk5t1/nsxnmWG8kJAnfYaXaTz2SPhkGiIkxwC3BgIOQ1jNk6HEsHFiuPoS+FfRUbM45/n6WtJls56zoyn5eN7bPlEEYYHmQvbvuOyX9C2ppE09WvnZan+Wb72IWGU3A8cfeA4uvM7dpwkjkkZg+TjMXfBxir3hIhA2ybHcxCjj45xG5JAcNuK/ltwHIpHrt3rDYmh71bDMPEwd34kPfSbro/EaIMfTgkGsNWBL3an0kxjBm/kyrELmMGSnYvSJKIU957pPskmIgJvIbAJEiwDIuwMsIYfZiM4ppzvDMBQL9z+9OZ+V4lAW6x8sRLdgHgazNvtQscYgSY0yawtE8lQQhEl53AClU9FaM6bbGcSSbvoHA9WpjPjppOr29400TLAaKqwGK+yTEJmsGndLvwhBtIOtIsYa0/7M/72u/+Itm1iuYN1LyKDb+x1+t4TAFl0rSZsvLSQSZ3kJH3bbC9j2vy6mjc59OasrGQlZvYc37tqGaJ4Zs9zM1EuGODasLCeTyGKXlesbGaBtWozrpWGhDQBcl25i+cKQ6XK/V4zqjnUxARrVH9/xOcQvShh6lAOtT7j0X7n+2o8RG/WMPBUdtnD6bf6ctWBZXwtQpsUX648pfmWxpxaHX/z78hwqJg7KeQUCuuEg5XCPl/3aKeWX3vaHQMevja3D1TGffoSlf4yjc9tGEzTCqPTIXkdgbNagc3w8s6UvqySJ4U42h5J9bN+adssdJlXnYmSinrqT3ybx2hlzNk4RHrumfbO172VuPxsbdyThKyXzJB5fA3mGN9E/RM8ud5AwT06MgO66Eyc6/EBC1wjmnDlXa0dM9YJ8H3WvZvOMHTrhO6bsuRCrvhXfuWgZV5fUBqd88WbKsSsaz5XV3KCMrypTgNgsEbYJLz4qKhRWQ8pj+1e0rZ9EzRbFdZFp3MfkPitP7W62cfPasx7w/wrrXfWZSpZa/I1K5tp1XnABibL3sjFj+5hr4aY3L4RhfFUxeXsBDHQM1+KHs/PYs6dnQ/35uw9npj1I6pFC97ufW8wZiJMvAq6J60PjHFDvx1oe8O+XbBfGvZt9/2gVcfJJCGIYQZaY1z2DcwXALoPrjOO46YGXPCbURHmZDzzhGbn+uYXJLy3kURPNGqgbXLcv4YOmh+ssSirx+mptnvXOf0tjDW7Pg6jjXTDLxPYs1ssi+ngsIetTQ4TzCtQSZ9E8/AKFc5pP6xPLJuPmg3P6jsNCod4diap36089OVb/W9MHqbmsFNpgyAE5se8Kg5McUAW7ElZkXmhBYa1lUMwAhZDxIB5Q22zqIY6HkNW1AY0FFGYlTBu8Y6aZ2oMTXjhsxXYQKDW8GF/Rr907JcN+w785vFz/Orh1wApvhlg7uK5HAPdU8jGQNqED2amEigzIzaspkO3EUpNM2mcxlBlkI9rvVYMMyvGhnNN97pgIvm6b5RFB+bVtOppm+nKcCN4ygYmUU3zbjQyw+chO6GJwhFZS07CJJUt31R4GdbuWQE8tVBgiP7PqwnzN3nOfVM43qqOua7V+6x4Stl7fVg/r+90vPQhkezdvGwN+9bQ9otuBFclF4ynp2c8Pz3jOJIhByD24q5XQ6jcz32oeJ1XF7/tatk9A3h2EswKlNHGvLIWBuU4fZsaXd7OBqOJ49ZaWZ2vcAYfBarSFDVB60mwsCkgylEpOQq1j4L/pnx78sp6+zj9VvVS/7Uwrdzt84ra2ShH+httGs7n1eKV0TvX42No++8S52Q0EFnmzHMdGTY2Jd34emk7eMprBpvBLIpdCmJxnKQ6TTao3EzSwf+Q9yXWFkLK6upLY01YoyOk2zLc7qBojCgSSlQZ5aPpCkhRk7l8AXBdY0k2ScJr6c2Ev9BXWOE7nVlHMQ/MQZHUqsL3zYgM3KZkV46b6KWbxGzSeZK0Sa8pNKBwGVQo8wfwkVJgDZPMAOWkEZxw5fQUyKtRO5IpO8vSgF3H2eekVZj1Cj6P+XTZVGRPzR9YArpEW7Xmapx9E2CeMDi1Nq806oDNugSACPGcZS+nepKGlGTPYMa4dfRbx8uzZvHeN2zb5lk75T/91o/VMh40fMwJjG3bsA/GuGwanhnHC7wcXSK2vD8xASj9Dt4X4yVfpLK0CkdUqqe0wOEf2/gaJwhMTWSF7JzAVI4A8YzwkH12Hl76itKg18dhtDH0sGQOmiL5j0ZCgmWeZ5aDKFuimhXdJtqjphvS/L0qQZQ+UMFDYPAcnlIUltSkEa01mZhbttqF3KsKsGUBBptLmQFoVycNwspKLHnsucuX18WQZC7peW7TOKTws9gbNgBw77j1Ae7dBVGeAO75GEMzBaXxIRVUW8O2N/z87a+w7Rs+f/gSHy4fRBE1XLEwyz5SwhLOmMpMJEQocnsIDww5468eFNlM+/qkqPgJwfVaeS735wQ5s1D1+r/ls/Vlyo1BMcNILnjzKmHAk7+Py/buFKPCO1nxXr+1MRtJ0OfDmOPz+4bSN3TZ+5KEzaSsrsp/20vz+VSYcKK2E6zrJvKXszBMPCQrUkS4Plzw9vENrhfZTyHhulJX7wNPL094er6hHyLgR9JHIrMgp7pXV3CD141Sq5tO7++v5ry2mpaiBhZtzYZi0PiZvmkiaNKyvormimJwyzwOxeBSnEQ764BfZZWLuRRlmp49VkR6KjA7ebIC5Hx/0sPMadc0rCQU3zPeZpyenQ+8fF/L5FWwaZwNo6fnVZbJ1RDqOkr5dIP5OtMk3yuajLXgMZq7CnmVxfsDFCPTZ+jMs/XvyPXm3ui8DWeY9Ndgkm+kfKbEPO+5tBQ0vwrjt1unK6LTapgXy/x+5meFrqq+4fCZA6JUofMxGzEQx/e+73h8uGLfdtxenvD0fKD7WV1RQ+hLLcbCawoaXvfejDLReRbYKXjKypmP2fwBn5Xq+CbzJZv/Y6pCIzXAmLPuppF69WLAczik5tG2hquG6m17k7T/zO7E5m7ZsWt7te5ZO5EGgkdm2X7voul3joKrvN5pmgeOQ84qdCf53rC3zXWwlVMpH1dhlxyxFP3btg0XBg4+ZGuPJujKtJojHBjGM/0OptFJeG/IF3YmEiA4LVqdiTi9TeMxqHAsJ6fBwgPAwOgdHYRtY3BrCzo6Xx+H0YaMAP0LuFHibgXzJKtlRNmNkSq4a7AWV51a+dx0dS8I+CRmCqflKJf5N3KGxvC9uCFB5Ik3/BsXmNqGZ7I09qg40JltQqgc3JdYqkPCjI5gig0pLnhSCKzQ6Br22Lsk9mDo+SbR78Gs8fnDE4k489W+NwJo27FtDV+8+RK/fvO5/H74CqDmfRrMQJdVuT5sxW5MTED30OmkXRk/InhGCB8r25p4UCgU4jA8J2UrMfhgdYHR8LTlcqEovGbUWbl/y1VDHfPcDxhDQa1XrLiyT5sZbzKkZ2Yu/TZmCr2ralfg5rxSJ1s9CLFrIX6NEbDNqz1rBnemcX/DQQtLHCSB8G1W9WorVUE0gZy9q/lvhmlu27qVfbFzv1zBIsK+73jz8Ih939UT6WlzwaPjw4dnvP/6a/TOPo/r2J5Xfs90Pqlik0G/wnXAfY4isLZyOE+uooT5cGDhNWNxfnXfyIsV+LMxZDt3gid7N/x75TGFN1o/Ueo1w3ACrNTjbRBXhSgpHUW5ANzAyGGVppQWp8dkYOa+znPhdXrPc68arjl0clas5nqr4WXzZL2Pjjn20uWr0AzOBr2UOdNDXtmIxCpwWbUKMbMeM2JaRabWeU6Y/FUhn9on5XEnGh0MtFF4mTkGxoRn71euM+Fhxo8pehE1pFL0xMOlLy6n/D0vadA5Mos8KJEdanBujbDRwMbAbYjZIpiRUXh8uOK3vvsWj28fcHvqOG7PHmoYOKVoO8NRdJFgVAY/efk0BjovzxLE9s9x9MsK0PJG0JIHIl0RiZLHOgzrMWTP1RgDaBv2LU/2rHjn5/E78+SRo2NY+rzpfBldEnTYKlVrDR3Qw9ttTqbQz4yrbFmg/Ey4xNT/Fe+4x08Wz5nTmXJK/4PBPNBZDlUfLEbnAGIFrig77Lh3+aTnA4d8EX5s/Nv7bqXS3PCItuQQMIPdTp/wlcs7CiIpnQT5TjzAePEdTMG/M+2JMNBkO5W230ymv1YHPiKjzc4Lw9C9AJa/AwTsCSHzRIcSLkM2JybZXOaqDYCvzhkDyyws/eMCHD4gNvAe3UwSDNfBaMiMqk4Pmj2vDpIZPNFG7Vm+p+ktl2yQZhDSMJKQtoZ53205mQDuCgMzeIhHpGsSEfa8rYodVm+HHlCJMUq7BIjx3IB923B5uOB//tbfYdDAaAOd7FwYaB1aFyS1NnGESsxXVvKkj6b8wWEc+pcaYZsOF651JezdmRVZ8GeBakBkBSJS0ychGzsb1w1843VWaohif0FVvgOmWXTld37GTfQSWWhkwTTvHVCKKv1mlwO8kIFJ8TKhl3kx4O9PqxonGjiHeNWVmDsoxNzfsyLomChKqHmGV4ZJVjgWLS6MioA5f5+VkgRHa7her3j79g0um6VWFhwendFvz/jw9ISnp0PChnU8JjZYDfNk2JoieX8OBE2s54byFHcynUPu5t+1v2bCR131fQjpeJ6VvTVvmDs9O1/yu6wyrfu2ehkPLVxGFLbKs5vGQXpITQkvkgEwGdUcC6JcmJJlOM3GX55zYr/Fu5OMSfzhNUdF0Ookb2hlWJxX2k5yqtzH6orTByLqIbefjeWY/2vYoh1O/CdwZAbr7KXPyjYc3yIr7JzQ1ILDWmSICRwdF4G78gObYxV/hJ7w15odBi8gekJsYoxBeHzcse97GLcKyGsGeAl5N52gdxy943br1QhLuA1YQxFmZBqKjvUObDthu+y4Pb8AgzHUwCMCnp9f8PkXjO3Lr8GaQKKPwLhJEeCsAzlk3qYRPBUSELO3GkJWo0d1YKbfNOONt5yVqjTm5E2Hvki1OvuIgUYbmIboHBQRPKdkZZSfpTGYeF7gQa7jOHAo7XEXFZ8SPTgXKTwfQAlJlgGVsc1wUZT32vLcC53sxFMR8y3mYCw+kCry7GXlr54Q4M+YdesLMzaWVSZAnDu0NezNdClJ9mN9F3DkrETW8lJ/nAFHrYGZ0HvHrR+grrorZyec/OPrQQ1AT866hK2z4J/41KSYEQF7a+K8SHzGIgkN2wzSSBoT6FRqv3d9PEab/1AmMuArYMWh5Mqf/LDVHp9kmYitPCNW5YAwBhiAC1wlPAqW4BKCIg48gQlmApPuARt14ILp2mCxKy/OFJTZssePJ7ihhlETtmR+Y2G1UdKXjxvFkQWMSJyiFQ4jxCEZH4/exWPvTC3jJY5aOCm8rWEj4Ng6ju2GbdtwXAf+8Ts/FAbWdGWFk+94iDARdLIaX5mRJ2JdoFGB8/vZY78Ke6T1fCoKfzUkBIyW3nvmPkNiRQMqRSCMNQrS8nuOXhblOVWzMgqywRaKT1VO7LIn9w2aKBUiVJl0PlgUSW7drdPK6zgm+IrTLF1EpNm5ytMJ+igLfPP+N4Ofpqe1brkGmuaCDUGzDFPzudpAeuyI4OCsGFbBR17Oy9u7Ux+Up2wbHh+v+PTxDbDtCOYHEWwdePf+hqcPz4iVJackmLIZI5qFI7BtBD+bidfOhDAYlq8LHu+tkBXjDRFhsFakUs2ucI5kOJz3WOXypU6axz+NPeX2qxBxw4JTjBmd6yjsXPvZaI5Lo+Dz6Zyn6F+aG9P+L5N3oTDHnLLfM41W2BZ7/BbXfccFXJnJXZoN89fnobnxms8fr+ekKZfWMTuVXPGbFELnW97nXEu0U3RMnG6mvqVx1jpyad/rxfD9JhE5s27GEnNV/gUUZ5BxLdUviICjD1wuhMtlV8cNoUfavDjWJje3ww19VV0xcAEx8HK74flFkjiEsR0OuNJTfWjGE9HmxszoA08vB56PG9og1UfI9SlmxvPTi+Jd9t56cs3MJ5Wmy/EkNpaBkdCb9Cv53ewuj2Lid9XY5mKE2JdUz5KFtU2phJWOiB8zFErbWrChoYaLYkVuSbc12os9ytKGwmIKmn/FkOgkiv5yVJajJHIkSMyfiqNX5agnl7Fkbfa8NGpP01/C4JFmEFTxSdEpgK5uJ57eNux2LrDrV7IKt0ESmWybGDWyztLwlt94zogsYwDk9Ol4195BomMGHsYFzOJMOG4dt9FF12G4eg8gjhpQUBxzr/HWiUYMca01UGvYADD3wr/y1VQfuqev3Ls+GqPNhVeDr7hRRt7EJSO1PJA4UZS1dxBDzPbM2bEBvmzj50Iog2CKush9oQjyQxlUHwoXxnYFYfgTd8lJG64gMsp+bT/vItXPYGdOsLTeqd/GMELv0MyLo6MfQ407gh1KzlrG4JQkIqMYauSbR4Ox/OvDb9D2hq8ev8bnj1/KOTUg7LSLoadGtC2JY0SYHud/zHM60wFy2TMxW4joRub5t+czxowjTgrYLGy9XkfbHXgsz6d/cQdyqKzR/iXOYKQa7SlzZfYQiVxzhCoGQ/Y3ZAqhvbP24HQVcAYTj5hepXObI6CarYzjJhTKCe50ZVjiN53eW5hXVrQCzvP12oqBMMu0edtpoCqy5o+l6fv8t8JhYb/nMq8ZASe1gm2/A0XaaOdREp//+HjFm8dHtE33/4xos/cD79494en5WbAZ8VwFN+b1zvwy4y3C3qx/33zlfQZ2X1c6z33NjrS5HnJagyqIK7zHN/eUjGoEzfSzrtPpkcMzr1ALTkGpvpnn5D02OpfdCE6rwVZyIaPsQOtiAGYHCef5XHGR4ThfNNFjrdP6vhqv0v6prbhKHxdjEkpo1HYeNsVOguO84mb3C6gok71xjio7XLSikFnIOP32zLnm1qI+czBF+1zUjAIBK1SMCV/AnArSVuvUNyPqSB94//4Jzy83vLle8PD4kMqKscOtY3ADcxd8JghE7jZxKIDwcL3icrngdrvh+XagH8f9o3HSgLUW9EbM2DZJjMTMHl4T/ddy9pBrXYVX+RfKn7Pib+gB3TmZPL4zbm615rkcK3vWfKYpOTdWZMZa73AH4HxMSIFjbm/CiRtSymmIEly0zD8QdGEZV6v8nPlMgXkx/zKuKl+oMuv+HJv7W3l/boUhORUaSY6GTQ/j7izO+lsfOIYlpqBareGYRbv6nZff1nMhJVxS8iFs2PYd+7bhz17+GA+4Fh3FVu2IxMAbGPi7t/+EX++fi05FG7Ym5/1eLgPXLrkTeh+4HV0OEudEjbMHacJg6bsJdRYn/mXbsO0NtF/BYBwvR+0nkLdklrHI06Y4NhbXx2G0ZRqzWxN0gMpbToemT5Ml7W0TA22EcWbKVJosJK4zsxt0dS1CVawuU4LcSeBCAAAnRXA1odKcGSkT04l2FcSWuIMZP3XoKCTT6GGhW9sjjKXRJQSxa+xwnXAR4iQp9IcbtE0FxH7ZdZl6w48+/Yl71YgIv3z4Vx8DghxM7m3pYYWMWGmL+XlfyQtkxCTBJGBkRY30HL+s4GWEn69gfxaQNL0oQE23PDPtDE8ihYmhybMRsPHE4P2slsKaF2DcEbLlfXiiM4qrgm7Mv/YNDnfCkwJk8DYb66RYmQIWINR2Kh7WymgRHycmtR6YNS+j8ttwUWmLtS8ZX+vL5sZr70+tU7wTOmvTN6m80tNl3/DmzVs8XmX/p6X5h5731PsN799/wIcPz/6trLrSst5QhLIHdw4jfU0Y8KnMiq6jH+dnNcQvNpYXQX/HIFsp9Kv61leezEaDOYRZn5sBmbusRB81LHgLTwr6nSt/H2IqFPlwBGQveRbYdfxWRvIpXLHgIN0RfQMtB+epRt1ZUQtP8VmJPN/XlTGpv6V6znAwV2X3PP8jbEnqyf3PPKXSgNQLFZPKH9OgWnMzWLZCHis4IWuZJ53TBEFBPwMa1sVKO618InM9bIPAc78deNc7nm8db9484Hq5Supyza7dGG582LcMOY6n2X5ijZQhanh8fMTDA+N2e8GHpxe8vByxBcLnRJI4/ir4OxkxqxwzY9i/zPSrDwg5JVWmx9RfH606jkAuY3WkkeC0mu16UvpecePZrRf1mq7nKh0FTs/Uh3JWGDOrYh11ObTL5mJulZgAW6FKQuTMs9e8Np6/PoctI7SfLZjalu9yqC58vG1O5jo49SCys5KH9JNvMWBwZ3STxRNU0mWB93u37+B7t++C0PB7t98pKCNTTBpho4an7YZxGdi3TfMVmEzQZHy6b/5PvvpDfOf6GX55/Vd8tX/tfRkshiC1hn0b2PcLxugaUixJQYb3e75mXqN1dllJu4HR9x0X3rATYdOsz5Jo5NtfTQ3f166Pw2hTHAUNCokY4wzGgdAwYVMYwTSVsiRJZPIOaYx0Y6ixJlQpIVDmsZ/ggNF4xLCXl3miYdJd7anet8wI1HgkD3tTYaD7vNx74MUnkre6jLB6R2dgHAdGH3qu2XTwonrox1iEPWpbjQjbvuFyveI3bz7Hvzz+CtQa3m3vy77aMQZIFUsx1rjOxpVykz1uCbezd3ueLK3VMII04ovrnnJiqZZtlYdeL57ANUVByKsKmvV39e2YQwImpUN6XhOGzO2/Yj9UAZjqnBlMoC8cITO98lS4tbwmEcKmKqJRSYHVX85z4l4/Ys9aFkSvXfeMhm9qZ3qyaKsqj6sqy/joA59OU722n8HCaVsjXK47Pn3ziG2/Yttjcg0GBkta4w/v38v5NDjjMKG3vKu8ghJO787M6bvcBpd3YaSfD3W9p8Bng2tVfjZK7q0GZePtvkDNv+vK1qo/ANC2Db1300ZQ4pmxmntr4ynTicOY5EOhI808Eopq5YGV5iqe5j5Xh0iC8tW5Ye2FI2Y2ulwZzXNyMqhzW2Hc5dCuOq+jbWm/mbI4KUnzilvmcVGHfZfxl4q5Jp4fZxoFYl9a4pOUHCMm1yi+J+akZKfmilMi4TCBnaWH0WYZZ06O2sEY/QXHccPD4wPePL7BZsmJBoFIlH1qsrrGRNhGd/k2hpk5jK6keL1c0dqG1p7w4fnmTlGaeJygTsOznUYMn+EYKrLY9BWu/TTc2L4j0ReKCuc6luPSDEPDeUDpg1RDLONsR3a4uDjiHWJK2UCttxTwlJA4tnBtieUIw0X+HT5nolWX4zakCT+ZJLQGxZQ8pARP7nlWcb/pCmNLMep0qmNsS7uqUEdEl+ggZgvzyNwpKysUc4KBP3n6Q3z3+CzJBim7EaMP4F+u/4qfX38BJDzZtY8df/H+P+Oh77iMDSDLkmzw6V8mcGcc6KA+0G83EGQ1zwywRg1tsxVUxk4bfvf5+/ju7RO8oON/fPI3kvuAdEFDt0kQJA+CfN8xjg1HP9A1+UuikmlAba4Ttm1LRiHwfDCO/gJqB0xeeFcU7673e0U2x4DRkI4zWF8fh9FmV2aGdnp8ImTYRNTz3CTcUWZd9oSKV0pnyIBzyeKZmNf6gdhb5iFkatDotGRUTy8jLWVyique09ISgao8k/qMMNxIE+9Es2ILbc3m3BhDkoccHZ059tSdIAT4GBjcPT14a8DWNtAG3OhAA7DtF/zVb/0teIfHB+f++HEAQzMnwTw0OeSowuAT32XrHLYm/1pfG6UkDHnQz13Sn1JuUiGRUVcEY1HiUMqVGqZyiyKFiYYQzgp/hcegLd/pX3Os3ItemVrWNpNAnd4aRrLi6hgN0nboahVVYS2jxSa7s/LxDQhEUlB8/xCdxmflJfSWvR1Gxe18n968qtxX2splrZ1l+N/UvXkv2dxWxo0pLw+PF3zy5lHnnyKGGgYPNDCenp/x7v0HHMeAcysT9lkhrMAVnJ8N3xpGlPuf+1JXg8+4Aep+rRk/udy8Id/KfFNoZa4rPxeecw6xqwqD9bMaln7mlI2tYrb3jntGwmwAz3ivhob852cQEUIeJAFtWUgDt9ZuzCdYLyi1lxSt4jCY6Pb+Kly+cpl0NlIynOcxyiska7rIZWb+KQaqKMxBl3MIbf4+48IQkI3i6G9KQKAoEnkdQMiqiNZC1QE4Y8jLkugUY1jiGKEZzvJOBHvMm9RnyvBbRuTW0vsI1XO+Qupf5lh5HEzo755wvNzwyaef4nrZk7Tjoliy49qR6oTJAIbynzdv3uB6veLD0xNuN8mASIZjh5tc18o8szhZkw6b8Rg8Uf4Ohp9LErJ0nlvsv7n0sIZClk8w0+DZoctcP7PiflwvJGmHON8m+UJZp3GqAjDi0Gbk+rh+n7DDanwUnmSUYynrU1deFakrEQgusDqdcszpwxGSqiHRk8dIRxkoPI0IAzl5DmMbGx75gv/67i9AtGGLlHy1Xm3zj55/H3+A3wUT4//57L+DNxt9wl9+/V9wYVmJOhhoW+hBmSJ8Uiv8fQz0EWtX27Zhv+zi1AB87tjoNRD+41d/hv/+6V+j06FPp+0MoaD493mMUsnKM5wXWVn5O5jj7KTJOEsKT1pEUfZAEr32TdfHYbRlniej7uciSFy5TqlhU4CDWJ0TyFAPMiMrIZNyO2qPL9Ll5hW3YFVBiMaoSMuCq3IXLI4rP7K2OTHphapHALYUT23iwpl4H+iHZmDkLowfDOYQQOKpMoIQZtlI4n2vlw23/Ybnyw3bNtAvHf/w5p+jP0mBGwOR2n+wrgQalBnmSfgl4W4rhvIohVPMwkXryYo5AxoKeUJTyCNtUOLEQ4oUR1xSxuZVy/ru3M7qxUoxKYrAvbom2PM17nxjjKWs1c2cAAAgAElEQVTWabHyXgrzmMyVmFIhRqFk4uL8aZZuk+BygZ7LKr3NMM99CwUwCoSBa49mIfct+4V745ZnboJ9Ae981fezslLLCV8Ox5KQLoWyzaFYikeu4c3jFQ8Pj9i3Bnc0KU8YfeDl5QXv333Q5AFIOFZK56hvBfiZtjh9P3de5oxl3GKcDaJSU1LY5HDqdUIXKdfKFl+iODB7vapyP/wvr76evuVTQJwSxdmwLOG8YBBlb3ngaEZtEe7pWR6H0I/Z+bvVI/1W6GxvhzovCLZHMdp2pRkxYqzyTYS78ketd2VkrZW+s7FrcHLC9Wp8at/rmMnfmrjBV3f1tyi192hU8LteRQ26zaI091H2W8H5XN72SYj9hEUp9n/gOKzGSm4jyVZCwRkonZOYYGan1dBC3aHHUgLJYPPxMuBTwhFm4Pml4/b5l3h8vOLx4Ypt21ThHjiSXDPNYUD9rmSrROS8nIZkPNz3HcfthqfnHDIZ8JPSpc9rx4tpAaGkApzGpo6XcLnmZShhNCvFcPnNGW1Afu+4hc8hJDiz+9h64tWluZWNfHse0VQyNnbWq+BNoLZs4QBh89DXuBZmozwnct6diYkA7A24XHYMiIxwI5cZt0MMlMINVN7IuMZjqDPdEn6EYRp9NV2ZtH1xMjU1Qm31Vlafttbwye0zgAfaJt/8ycsf4814A7qSD3Cs4gkybP2wMUDYsEGOifgMn+EL/hq2KvoVfYXv9e+CaHOj0KoJPq7wK54bgH4cOPpwQ9v2aY5uuM5qjdR5pQv+81d/jr978094omdwoddAkOM56VZlRLPOZ/TozCDz3cwN1vqN3YWeDRBtaDQWdFSvj8Nos8t7ooJM5kkw2RyXyygZIeWSBBVZR+EJo9NwBc0D5SycUDSN8RDANuE5Mjkp8RYl3p+T/RECKmGW537LfJMl7DEkKYj8HSFU7QOHT7w+lpbf2CBDJt++bfj12y9wXA40anh3fY/PL186M2oarjOY5ewMa2vUs1Zy/zJuzg9CMWB9FwJIfvmyPFn4QmKYWscspOtFLkClXgqJhVCqjNfC+HsC2RyfS8WfTCaEwcHEaQPpPeNMj2G4k91x1V5RJhYKjQn/bHgwUJP0ZNDndNTIxjAQScfPsEd/5z1hXjvyr7Vn/6xcm5AI+ojQrvVKR+2L10MBTJQLmim9mepNtZ6+q22bMmzjMfGLZX3scibYuVRIjbDvG96+fYOHy8XrNKEyxsBxHHh+fsKHl45+697GLATOYYW5P4GPVXbRc13K1+5PsvRtbTcbCmeDLeCwtoynrOjl24S53st6mdsZbMrpokSZc9aHeWVK3lXjJuqwuRk4Y5g30LhVjhwdEy1522Ygg1xRUmJwYS0HaWudpvxTHJ5OgLimp/GofQncWBm7l76MUFPo/nisowcyLYw731n/y1PkhEkZr2t+SfG3DG3m5AqDzr1ioNnULPiX5zFXuTSd4bJumzIXLiwYA4ZtRY0XE+i+GmHcOgFl+h4DblRBZBmx4Utej9Hx4f0TXl5ueHy44vrwgH1rqGs+0bStkFjW6SyHrcz1csG+73i+3PDy/Cz7eiwfwKJOhsn1OCvWNZLyjZUxR2+k5mN7SyJkE0Wnr+dW8x2HPp9loFqVuS4Lwc0DpOaO1zUfLxAQkNchTk8d6EQgRGmcEoxAtOtrjk7K2qbi4ugHGgEPD1cAjOOQ5BV7E8NwJAxE3YLFy/WKy2VXWQwctxe8PN9wO2KlyR0xOv5bk8Qc277hoT3g399+F7RV3YMHwH3gD55/DxfewV11tQuljngnEYsoIfeExGOB5X87/hz/7ZP/6XrhP+8/wu98+X2wHjU1Bov25HpBzaJJpFnTW8PWJLSTIHvB7TzEIBdSffaQM+72Dd/lt/iPT3+Cf3j8AZ7aS50zBPAgAEP0wrZpfgfnEHUEjMerjo6imycqKhOOTryU2oatMTa22XR2Gq6uj8toA0wbdH371Auic5Y7msqVn/cYAjzMEhxTnVM9bkR7uTSd01z3JAKmvKmCPy+tlgnvnhxlRIMllrbbeWlmqEVbQQSkxB5hilCCYOip8Zcd7x8+4EdvfoZ31w842g2EpnyHNasjq+IYXtZ1WE0I2Sx67lKpAhvZ05obaKvLWOjgWCKX8YlxWF9prPgMDlgYEBrMIi5042uek8AJcqL8uN57NUYRhqBqpNpqi+OvKLm5Hm9ElUxTsEzJcbGJUAGq8lLrmmG8d6lCiIq/akCmcMbU19yX2qcU/8+c5GJ4G1djOhty1YDMH5wIDzEGdHpfjeTEgk/aCZdx8PO44i081jcp2a5EUPWmEgGXywWfvn3E/nBV9pDcFEw4+oEPH57w/Pzi9J+FvK0UrK4Ma+31mi7qtxkPtHw2GwP52zmkbr5s9fheyN29Nkyw1aJGY/HeH5Q+rBEVRvm8ag0YrnJf5nlrij0VHEUImNBNnCWVOGQqafW1kqHP/pJH69tqanRHIUy1oSBoNkhVC4HN7fVYxzdlVejfeJWV9PNbmMIVsOU59m9ps66jzLRfZH6es7Z3S2V00KRg0lXDmc4VvGJcmnwvbRpn0/K+eho0K69y/YnenToUW6ooFnYWSeoweseHD0+43Q5crzsu1yuYB47OLgPZHb0Sd9UseochGfma6AG2Yvd43bHvm2SafH6RfT8ceLLWTTE1XSTzG+ZwxppTYuAAj5YWvknnSdCkzp4J/5QqT4qWD0gta/QlctskmZTJOZ/rPm0Z8+H1zRxUEO+a4UTkY6hhtHCWhIQmVB3D6kir79jwfDBu/RmA6IAgwFbCjTJcF3X5QehHR4Os1tHlgsu242gHmDvMoWS4Duev/P4P7/8En7ZP8FvjO0XfcacHh7noodf2MkcyMMsZhIY1F14Mag0/vv4MX21fo1PHvm8+fxoI//RbP8Bvv3wPv/38fYwh2Rx77xiKA1l40DFlCY0kQiTM81V2OEFagr5xHBi3G26943hp2PcNn94e8WfPf4S//c4/YjRJaCJjKSuanYG2XXB5uOKiRw6g0LmuRpJGgqnO3vsRiUzSKvl8FRp3o58ylQWeX7k+DqMtM8Kkd8nWsIQEYwrGCF1bQsosqe+iai0rhpcxHCQm7NUjTXglBElSowbHnJCEAcZwz0dGtiusxmgUNtZ6bTWrj4He7dBpdgNsxs/QCTM4rHvDxXZpaO0CIsb/evsTfP3ma7S2YRDjaAfMS8RmDOp+OE5454zLE/bs2UItZKCQqRnAJB4RF1ZEKdzxjrAuwjzFtrvgpLmww8bz0Cg/pS0zUcD32ixXByjVZdwgCeVpOmaBZvdZOHODZvKqeYwtZGq9ciJ/RYHPAiW3GYbMHJ562pxfsaRQWGgUn0bUmKLPm5NCGLfUclumpKiBabLW4MzKYg4pdEES45sNZr5DL2tDPmg0133vmo31XK/hORRaFfxpn60LOP3QuUcj7I3w+HjF2zdvsV8uUxdI0yF3fP3uCcftllTaBZDehvxz4uvMGkZu+KtzdUVr/mZC5L1VtWgqDjQ9raguvl0ZfvfCIXOX77W9vs7PaxgfYNlcMz3zkJXQRG6F3uS5Mcm8ggqffx490OJsq5iTpPgyQ0vCj2x4XL9xvKSkCiH0FtR/5n2Z52WOTOWburJd+urOodrOeXwqbzkbYNbz86isDPH8rBjk82VyJjlwXC4UHj1hJ41r6J3aVy+zxnB2GtaVfgo6cJ5asTDzkVI3137U/kzzExTJ1BJs/ej40A88P99wvUqSkj6APg494sfGuaO1DdsmdR1HRydZ/QdklcIy1tH1Aft+we3lGS+3A8cYINOrHNdyldDinMecyFefLExaeKPqPJOOESt1Sda6TmEroaokO5az3CLXj4gAwgSwQOu6ylDGTYlYXCr6xExOR2cMDljBw6RCLsZPx6G1dOQCTX9jj6cctmyYqfw1AQIw1MBhPBODng+IYSMJlsxZwSwhxFGPfPeP7YfYaAPtYlQ3Ilzpgr/88F/cMKH0EQOywrXJ5qMGxnFIYpl8fCgT8P+9/R/oJKbIjW5gzZ9ofJYgJwr+5vIFvti+wtYJnz59B6YXdw23bmAMbn70FJHIzbYDl10Or+4jR31A9ReWObPvkomRgYMZR+94y5/gL7/633E8PuOvH/8et9sNx3HIQd+toV0vfnRBI6Nrkr13Ly+gXVan960BG7BfAB4XMTpvB176gX7IWXDlDMqJGVbnbvBej0x65fo4jDYg06IQt+KLiOLsTi86q6qEuafCN6Yyrm+R7kE1JmDfDBA3MLGmSKVQMmlSGkwxydkjwjUb9drAWcZGhhpqckZEtz0PJH2ysZS9AAwehzo9gil+2D9IpsfLjh9++hN8uH6QzFLmHQZAsDMiSA+3ToahYzFNOITykpd6DZ6CXmXKUcZ9yWk3aVJyDIWhVZRhkTplKpeBng+wS8ZRRjnA9SRU67dVyQFP6QYj9ZlSfbmNs0IyE1sVtvHOA2NUEROlkTCva7mhEyIxDvaGeCctk+Z9FJ5DWM8Kb8aBteciy+9WCVF8P5WOe7Oze7xO9jqrbR1zxz12rqQITlpr4BN3M6XDcDSHXZ7xEDCEJ315ZfqmGOuV0hceIoUBJqXgfXc2q7yFiPDpZ5/izeO1CGH7M3rH0+3A09MHHLfDj+twYkzX0CWr1UoSAR4WmFeGViFtUn8+f+j+ylfFZeV7K4PNvj0bhjn0dG3U3TMmZ4Mw97leXKZopbN8f/4EgO4zM5okSJa4wFVWIHP7hfZdB6Oom2PcbIZlmVTmoZb1MCblF3IepwI89z71eQ6NnMd+9VGIk/urpbUOnn7PsyXmP8AnHuIOjRN/Ta3RVKfiUkXx1E6W2/VdoSur15W6s9PjNQeCGQ0eFsscso5krxIbnTOqYzj/TX0vdYNc3znFJVJEHPkef0VKV64zjgNH77hcLrg+XHG5PGJcJOTaQs6a8SXdq7Tve50TRCDNlijJkt7g+jBwu93wcjt85c35HjM2aprtUh1G7qgLfo8U2TNg8sN6bngV7JijT+98VOPooOBpjl8vLw8iMQu5LuQhx9aybx+YVxGTrHEpRz7eSMNTZ3DAivTLIypCwVA6RNTnvfWBQK6ISxv5RTZpdQycH4kWKWIn8dkkDUVu6DFNzOAufWxE+D/f/F/4d8fv4Peffs9T+O/Y8YgHCUUkMRlufeDlONBHx4f2jM+vX+N/Pf4kxibRv4SpqgMv8YYGoA/G/7v/Df4T/wfsY8cA48ePP8XvHN/D7/TfFtQNxnBFjtEPju0riReYU04SDu3YiLGpiaOcAU3l8fbygD/FH+Mfrz8AYcf36C2oNfCuK7LE6EfXlWdZ0ZTnDQcP9EN4e9s2tCYredu+46qLIrfjwHHrOPoBZtsiEPhfsttveX08RpsiG9DVGJmJoRBHtIczhqSuykRYyRgjHk3fWDJWm6KUqNmYbnBLE7YIWBzhVP74cw2xl+QdtqLGeg6E9SnF4QOAxvZ6iI4yoMF6eOG24evrO9yuHf/82Y/QNg17IBLFkHK3GLEnLuoKXE+x/7n/6dbeCT7OrEqqMq8M5U8SfhKl+uhVJheJY+ohka4GuDK6UtRNALCspiIYdTp3+XTVSTMpPVP2uShzVpyh8JWx9C/I5MdCmaIEaQNSGBxzSwYeAI15NuUljMQwtNx4TdIsH6Zq5WLc4DQTisVK0Yu2rEw2pOIrcv2z4Kb0K5hrFpgNwKB4XyGev12vns2GQoQZmOAqhb1XprOHcp2L0EQG0tfO1s6ZRohkf8LD9YJBDWBJgcxdVnWOMfD0rAlHek0AMLsVWOd+0q5KO2ALCSKHeWnckc0vIFJkn5XUeWXstfDIVR1zuOw87qurthF0luvN9JIpUfCQwgV1fr1mAMaKOxLfShpgfBVwUDYEgv4TtIn2g7aQ+jaGeKpHor3S69kIbi1N5bam+zVDDLinvpwF5Fxnxr81Mc/JeQ5megtnyRyGaaw4PMmVbnKbM9QRWmpPQwbPo2dKPlm9zKn3jKwMrzDkPfX+h45hBoo1H20HDqLmxE3TXPAv2SRcrPDkvahhdLoU1DwlFP1PY3O7Hei943rZ8fDwgMubR4BldcGMmW2bHEgI2c3JQSBRYYTWHrDvO263G24vhx+YXELk7XfWP/JzWGgdOT2b8SIObasnjelkvM8BZzlMMw+YZN5LZTmpADnjsVMISvliyLu0jK0INsIRdcW1isyzTBfgWePRh5x11ntOk/NlbO7k4Dj3BmV2GK9zh5CNfZWRA8Avr7/Gv+y/0j4PfKd/B//+9u/APPD9999Ds3zqRPjNw+f4q/3v0ZnQP4w6h7WNTfec9XRmsPFR0uQ7f/PpPwQtMPA5f4nvf/l9bLpvjYgkUf9m+vmkayVHnQ0tg9xoJOWjRlG3ARxdsmq2bcP/8f4vcdku+HH7KX52+RcMlv3lYwwM6NYdBqjfgC4JA9vlisYMScREkIO8CWhiwI0rox83HEfHrR8YPfTye9c9DSxfH4/RxpiSPUxc2a6zfL+bCdL0rjIbKf+0iRMch3Sp35Nw8MShZ6ABz9jDkBWtMeLQPZEbHEwpef2ASPvJmjFIYBa2sm0NP377U2AH9n3D19f3eN6fsdMG8cKmzgw5LmCY8ecTZFY8Kz6S6KzeUedHPOFfJxtFulcPQYJFs1ccL0RiGReL6S3rQIoDOxpkdWXPWOafWZex3/dyGSxUZVUSqtKR4To/53oYfH5j5LVQ8KU+oxHzxASBZxIUsrExMhjigExTLmw195ykZ4ZrFnu1fDWsIitbO9Vb6Wv2YhtznlfJ8tWTQrxW4CqsGS/5aikrgCRaiHJZ4cuidbVnjJ2IUNqNKUxWlXY/4Ltcdrx98yjHV8BOHJG6+tHx4fkZ799/kBCK0u6djIdmMBRQjPCnFaQ7lxkcrxu7p9ZL3XVFJo/len/ba0b2bFBUA+DbXone/ZZcOTMQ6xmIOk6sSpxJdNTwIQCxwpYMZut54S/RjRhPDh5idCceZoM55rg5cioGGGU+z5crunbW0moM85z5t+A1mshz+ZvCVCufm/u3qCfLoCLTbSWFa3m9ROGhCf9cZrUodJyU4qlJ25+uoU/yfEDi2QNO4xt2xmI2vLKBXt/VcFXvT8YV5hGvCg4bsEl+2zA7n1QY7KBsIsLRZXXgOA5crldcLzs2XQlg/zbkjYT0Gj7YlWyBUZKutX3H1houlwu6Zrg9bsdZV6BsmqRVf6h8VvhNHzJzKlgaA2iFrwZGEAiwMc3zrRasjzg/D2opZRKdC682HhZf+AOKcmR1ca6ERTei+M5LJl7kq7WYokKSUyDgS3yEGvwoLJ57kvkyTfdnxHo9CoeJNd86w0Cjhi+3d/hy+xoEwu+2d+Ahuum+EX65/yta3zA4Mp+fwt/VONwvTWWE6T6h58H0T44R+vlnv8CfPv2h0+4gxg8ffwQw43dvv41P+2fBadJ0sfUQItvOAEyhenhpz/jl9dcQJybw95cfgED4ur/H09OLnPtGhO16xYVkYaE1gh2JYPPQI44Uv50lAY+NxrZtEhU3dt3/xuhdz1TuPdFY4OOb7PePx2gDioR1DyU12ddGhJY4BQPybCWwXI6nZW7YvCddTQuDjZCW4RWLhSfbZF0wCWbLIjP8cGtw9SgYUAwC964KOnuGRiNaUMOx3/B3n/4Ql13On/iwP+mB4KKIt9RjS1ZiiUvyfrgayqlgZG0D5af3M+q2kC3L1KPj0iQUwp1lkFWS0JXUcKsuFxEyWi742TlkMX+Wx2ClnAp4SYiTblIdWYlACD3/LjMvY6ZKI6jDvtpnEcrprGhzmXBVucnn9AAWNttzJ6PXLlizkWjDm2Ol7XM3YLMDwgR0NDqRMVejhcN8XjGOUHyT/JqQE0LZGJBmhLIQyHRWm5VfXbZhXSpKB1BPq8b3Vu8yHPV9mFjrsDLro08aZFqxflloRlbeHh6u+OTtIy77rmewQUJMGOjHC94/v+Dp+cVXvrMxn22PFUoYrmumsU7f3e1L4MmEaf67uuJdhJTVi9x4lyx1xl9r/VZXHBHA03hn3kxefn4HNcak7ro/FAjW5mli7SHI9yDmlbJQrCzkLc79ga6OwsLcRp6D+q0imRF70CRslF1+5KCBRgS2uaDfkoY7m/LpvEq+KLg2gRTCXfvBEXI0r5JXWnaETM/OtC3g2Rjm9nAa1/ma6W411jG82nYxOms/5+/MCWr876yjx1zK+3LljQDnuqvD59ygyBtLIW7f+Fl/8qjmxcxZsBLuQm0IZn3CWuJLjolcXR7ZyuQElkZxSLm2f7sduB0dT23Dp5++0UO1AzfbRsDoyuXYFVIbEh6yJ2fQ5vt7iDZct4aHywW349CzJLvCSDWzso+bdyJwkDgc5fGm9BoMpLDT/IWNV7X9K07sSchC7emMfA6aWDkVI5wxysoQsIEoLU5TSODOhlZCRNZFDWaiVAUp76m9njwb8paCaq67rKJ2Flj7yHpN7rj8bgTfckEE9C75EjiXY89/C0B05J/tv3K9hEDgw/BuY0I6rSmaSwOc6TDrMARCax1jKH6Y8LPrL8Bg/OnTHwEA/ubtP+Dz/QsQgC8uX2EfG/7r+79A4zM9bLpPj5S2iQi3QxY0/vqTf8Bze8G79sGB+JfLr/R7Bt0AHOKolj2hEv7Y0EA0JPKNhMlLfgio8+QG9mOA5PLQZ30kyXB2DJK5E2eFmrxdMYl6fRxGG0HDn5OQhljWFjZCdYSFMJxzo8imMNa0zlTGY84x1CAcnr7fp0NDGJDGyJyjKQFb5pjRhcn59FGiREQR57T98lIptjXwpQPU8KNPfoYvH7/Ctm9AAw7qONpA47RXjVkP1DYFaMGIDBWz9ZOFpTGjjDudsPJTcUfQOOY0Lk6UcfZKxrvIxrUyeFL9ksDmAnI8H9SwZaas4xf2fVbSAPSocYykcAATvqoxVj32NbRwJHxw7nBK6e2ZMpMQtn4hMRXva2qTnGYBcxO5UpAMIOt/nHuUItW5rhB621RVQBcbickRQc+AitXbKsSqd2y+Zhpc7pPx36E4xz3SPXvIojlUzqs38V2hIWXY65UdEaS9Swaomuk5YKnG8EAJ2TVPhQmmBM/18Yq3n7zBvl/k+84Asca3d7x79wEvt1tRMqqgmbDi/Up06kPNixXPPEdSz5JxtlK0763AGUQB49noszJ9ALbX/V44Ze5nrn8uM/elhFwCCMdC1BErKrHKYfVkfEe4XIZfObXI4Gg9NDcDJOiRIuzGrK2i6BWFLHpjWc96F4dd21K/0q+ygd2fsv4/UlzHqkjGWcZNxaPDd5pz9l3Q28qwz+Gz4rTgUk126lQYDAn5cPa0Ps2odZGidUGWbU5zm+ZshmHouBuGPaKi8HB5kM07Ln9jfxrrBmmeRstlQprEzBkmGzeTcVxhZusDK5rq2Bh+LPEHxXkArtvYyElZc0RIsoevv36Pbd+wtX3i3Qzws/zioLuu++eHnudmefO9ftT9SYVfTlic+Wt1UIVxF9EuUZnRA7tikCGI30UWWA0FuVneEPyYDQ55cSLbxPPkR+yzCGdBneeVp3fvo3/TuxAvxfiAMM1hcnhsJTXOu8zzW015rWMDcByHZA3f5Pw90nnsSfJyx3QsxKgZ6ExyxlvuChl8EzoRsmmcxkTfp33gQJrboNgVVGS2jVubnjF+evkFfm4GFcUK1lOT89b+78/+G5iBP7j9Hn7/6XcBAt7R1/jrx7/Dnz79Ib5/fA/7Tnj38B7//J0fyTYlPUqBekDN4LTwILJ9dCnUxxAHRRpypwEk3gqAeaRQ48WV5Ikt3NTx/ebr4zDaUImXIWkwxYMzysS0GSZ/KmosQ2RkEjKkhl/MLXyWVuwZpbrRS7US8gjWxCEa+ujAGBWqMgAbDFnuyfvUJLa24eXhkHPTrhv+4dN/EmWAGjbaVVgpQ/O0/LKUGkqIdQYhD4uyklCFMp3SA554G6kSSmh7K8L6/KvFLcu5ZBaOcl47s6KEPIweepQsR7FjyRkeQTMIIa+okoELRoSxMrMzi8xomBk47k+JWZHkCY85HCzeGV0F7JS+9/5Zv2etW7915Ve4hKJTGHsEn/Lp27i3/hIStZ/76GVyJ4Lh2OKV7++avg5lbH6TVS/rxzQnZ8F2lrvF8AIsFDNCx0w5yHicDbgYcx942F4Y6xugKyKTEeh+jIK6GuhbvIjazNBzGx8eLvjk7Rtctl32vOhB9DwGXm4v+PD0jJebhkLccbRokylFDSeUBu/LdKb2Qq3g7iVz1Bwwtjl8tUpm9xlHq1WW+CZa4bRpeDbwVnXm+/gm+iLjMqUGPvU4FMAgpVijyM6gLAvcG072Tuc613Bn5qwkZbhkEKJK8u9X3o2hiRfa5prYBB/XTxmy8DfgWS6JqDiRMhZc9XP8VUI7491wNpd9PZTWx2hBx9nWtTkVoWQ0QRoIEEV6xG5iDrlmMBYd4Q5bdFmu/UCm29TnTE8hy72WNM+kbOFzqfrg9ymMOdOA9UM1m9gj56/BrOYgn0etyG6VEw6tG/f5UHEKRX2IEdaPDqLbfbrI/U/47l2dw4ZYxYKfIQgKueUym1Odlab8vc83KE+r/H/uuQ2qfJbm6dQXW7HqinOZl7FSDoyUgC6a8RxgEy35o9lxd7oYG0Tna1uTc1obAZcdG2nmQQxw7+jeaXXgIJw5NUWcQpxkmjtMYA4TobODrezA7XiexjOHULKjrY9I3GIqSEF7iNETbk6r+onfGw/eN3GUHINPeK2NJF7o+oDcGAcfSFE3rJRHguOuURI/vv4UP7r+VL5ngfEHn/wEP2w/BSwb5S302hCp0TkHk0LniMg7Y2z6j9sY03xlBmhMPBburInIVq2XVhrzgrGm66Mx2gB4Dw0ZsYoAwD1UYZmS8cVMcco8wEAvg53qQipP5EcvWWg3q6E01BKWpeZRkgbYX5tTEqY4PGTR+9IatomTqg0AACAASURBVK3hl4+/Bl8Gtm3DV4/v8PX1vQhw2hxeMzgtKcnwUMsCcVxU/iQmiOmD8JSxEX0mN9JDtiloErBVo0mJ0/+1YnDJR5TK+WqLLvVn0qzm3SRAkiKtoFVxOgtpiLE2k/5MF7PBNdeVpy5PzoDXLp7gLy2kyR42U9COK0saqhVaCqdayO/tdUv3ohylFSLOdVcGJb+Tgpven1bocnk2FjrjNeHB2uUEfciNE+4zUKuVsbzSdO/Kym5R1SaNyJh8wdMkMARGXtS96Kv+3TbC9eGKT95ISCQAkJ6PxMy4HTd8eHrBy8sRq/mTLlLaMnpJyumrcMzfl3fSZ8v2WIQjdPRNO/Y+0+n7fL96Z3WEUU3L7wP/7Pf3Vv7udrY4P4If5W6k3pRfbHWoymP7Ttgz+3AqG6vnjar7hFnCY1gVg3vajpB7pcvSnQpsrYOzggqRAYMixBj3aJ/qPJsamZ1H2aiKMvmb9fiT/8vLslI+ShPlB3OnOT1KK7uYi3HiXZkPZR1B6uAYcVeOoiRP86187AZpPMl1RRsVbnnJcx+5SodoV56bcVXD47gkS3O5OxjQI1okw7U4eBwnzgyUwjnOPWXrl8v+6SJCPtcPub7aRVWvXpF5URo226IfecWOY/5l/N7l9yFMSGV04khI2mKC/Q5NGhAz0K/wWjeUFlOaGThYxoi4Q+0J0AuDNfSmHwPH7SbJOCBRLZd9BzbZAUUF5MqLHGi2GbfYoG/zuPCFqb+cy5Zpd6/Tr4nfxadiCBKAQw2bgRYyyNtXgMic/XFcTvA+73L0mGtbM5wrjUFQIvvQhnfaIgaaz8mmimZemSRFFDWjt9CFVNk9gbTtW41Oaw2k6f8lZJsQ67aARySZl+uE0/P1kRhtCUyGhJ+AZQ9bkTMLAZRGaRCDRjAHZ5rGhHN1/kyYmRlcXbMtRpZH+yALATHQAGWmqR2BDcBO+MF3foRta2jbhq8e3okBQ6HAEKDG2SgrcxiJ2XPAb00s51FFYSrLbqgZKRruxGCEcX9gZEU+K2WGM8WVCsyzfjWZThRPqkde25lxfPcK0WrL2E7nC2NN2pvgmgRNDRvTsBdVLvIqmU1wqTuJ1pRVzL4LSIPx5DCyiZgBWIyzYqTJamdONhJ9KeK//D4pXmmMjQc5FEUpq3XE+4ynRAv+74yPDJcx5PRFwkMYERXm1GIRujyVq3R5FvgBL89EkFtw2ILhJxUjGRiZzn1MseFyaXh4uODxesW+7ciMaIyOp5cXPD+94KYJRxKJRH00gZgm95n3GCDfxNLPva2GfKX91yTzvbDJvOoxl5+Nu3v1VQX3tT6punSnCN/5fNJRDPCgG1WyrMSsIEkYYssUJquTQw8msQE1ZZTT10m25HngKZCdfwXgQgtm4Cg1MUTEm0A/8TT43K+eXQvb5mk+p3mlPy2cu+hTqfw8zrMhPmP7HjXN9ds45NCi/H2MU+KrPFIdHKVNgS8yaQVJDpdVw33itfecVyYV8hYODbICZerh2Cece2YKWkCWZJo/13pamh9KY+z6BkDUXWHM4bgiixoIsr+dTU6Zw9t7kvpr9Jjg9JJZaGg9lMbsfEWfaMZ/mqjscnbm3zon6TwOlnCl4i+Fr2psVnYWCC4nGiPjUyG789iYMn+Pp9jkZMUL6RgwANKkL02zxPbeQdwAlkOjb7dDj3hSnOoRDEXH4sCHPLeELRY6mBO43GOK6Y8zpUq9CUWnaeL6jzFdjufktVAqm8adSHToY7gRyVCjiCc2phPd5aHxPXDdDqU/Mo/2eXyqMGGHavdsj2IfktWRIQd0U9P/ADQ9B1nsAbELaGuStMeMNEqY5+BX1k7bxCCX4wVk7jZ93ojB6nbnIUdzyHaHTbK7Jnzcuz4So40TNwdoqIfCMQIdfD09fAR3LklykmQYlPYXcSKxRAudB/iw1PixomWZ/EyBM2PKwx4BeEyZldkY+77jF5/8Gr98/BWwNfRLxFkSElFqff3WT+n4T+zwJHtin81YMM9QFqpXkugccEcJh5r3Z6mMUcIjIOGQgicxlNsiLNEZbHpmfHtA0qcuhfvMQPK9yp8qz2oBC2c5G1r2XcRFh9EYYVzBmCNcYQ2b1Z9CReMxjHBbCyVN2smFFWavPzMd9jrqN1FGBIYhwhGygDW1dkf5y99UxWp+eH5XxyhWPc/hdpye3RP8NcAmVVvLcMZlwgklwVfuXwndzfjVfo2kK+dWCEDbGG8fr3h4eFCPWl0N//B0w/sPT2Vf5NzbeaUqVJEYn1iJTfB9y2sOz5DffPrtDqSFQXdfOVu/e20V7X5dGbPlCyXrNcy2B9eFg+IxlLi4ZL8mdDHBJiOl78NNEyud8tz3ziLxW2Y9pBeoikLtB2tzBp7hZSD4xskpIB0ElL6Zs4oUr7MsK1grxrDVb0p/Xm2299VbnPl1dlI1PeDWDPMcZjrDMs/uOZQ5X5n3nPCQ6gfbmYRWX+KLNk2yJlW/dsiKc4bDVAmOAf+V4X4t+oK1kPNj6LxyHCclxuGxca1hvAFPlT+zEdMo6DliVhhgMegaWW8yHwiKt5qSNEy0Ee3YCp2zbMrhkvMGwxj5cDtneRwRPpUGY8Lauosbb1MLKprjNjXjM5EynnPZLFli5axKBuHndsxONMtIxOGN295oUnyPIRkCQ5c4pMxGuDw8oGkklrEYArA3xCJB5l0sTt3LvuPoA0cPfbLoE5TGlBvcOYSYzB79pUQm9J9533wFX3OeYtU5DURpqZYk3M/S8id+OZcvirj+ydwz+DBLzgiGrFoRFYhDRpy4ZIwN7Jw++a61BuySDfX2ciASq0D2cWpmTGKNtDs6eAO2ndCQHTKcuyjtHJLjYtwObJq9lSCHexM3SeoDViOwYd8a+mDdE65J2xaZpPP1kRhtqCNGZpjpKxtRcLAJlaYaNTBVBA8FkLLyZhBjHB1gSSJiGc1WDBU8JP5Y97MZIE6cO+HlckPbNuwb8Fff/XtA04TKXOgBDovwH6NrnPgdVneaPVkhsCdUJRyzbow3ZQRC3Gkm+8oLCU7aJICazcmJB8eeFL1XfuaO38xd7iiVs4fSVsrO38yezBkOAcAUpzAMzq3RAm9AKA2sY8LG9NBcdNbwxfTcG4sELFK7GNGy+Tx7tmdh6WuY3ifn0JPHPb67Z7Bl3Nj7oImTbLH+QPetTaipin0Bx8ffVRBOQnHSGldK2Wykrb+pSsXMgM+OhAzwmEbbcJphl0evHI9ygntukhWOh8uOx0/e4Hq5wNZhmEXovhwHnj4843a7pVWc9bVeBZEV4bMCfF9hrGrfeh7eC3PM99XJwejD4vojtHT+/l4o4yrc9Z7RFvsgZrpJfwmYP8+KbhaeLkIKAFBBy34oKyEdFD7SuoCWs8xjgERdxL4WlTuD3XscbWRuJ0yyBqLHyvHMK9wQYYgjyti8tjmcvwRfWaGfkqe20NHE3AtNjDz2QYMGI+nZfr13/872hgaZWx0Z/0q9Wu9rToDcKRNljbLRrHTYCBvPK/DZOx3zv2i/wRQStC7tA18UOM/7r1y99Smme4s415aVOMOHdEZIKNEsn6W7OxwVXkIT45g4vnVkEajZ3DWQkoFPgQLBpynyXNtNylOEhtqXIY0tT0BOp24reg6PKgfVeRMtlhBDBbywfZfBI+kLr1ypI2GOT3gtLJF9jDGV9RXJNGfX7U081rqTDPOzHGSgScbBRvvEzAiHxf7ZAeUJZ2MAL7cj6cDeTPA8R6kY+qH76flhGedgn1/FKZvgAQBqsiokB7QbURtjSbRVcBN1+J4trdGySINW+Kl6gFMfM7h33I6OwcC+E7YtmWxUx8mqDRVOabgRGsfCi1XQGoH2ht7hUXZ9DFAnDCJcLzvaptuXKDLMG08j7484SazxwZpJvr+AgTh8ewPGod9ujH3bQLThsgMgW32r0Sqr6+Mw2owL6A2nML0Q2qzheVQHxumIHYnW6Qg9tEPtIi1+YvfwZdyUQt8oW6q0tKEN7x7fo186bpcDP3nz81BoVMFnsKacF+SPwf67KKZ3FJ56vaIUuYYRMEaMLi0/dQXNXruAgeO02vgRbuYYi3lb5mn2MBaP/tSdORzejD9j7vO+CyQ6MGNxyTz1myzUos+hOBjkbqwxAzx0v8hIZRSeXBslRUafBvzzRKvwxZkogafzJeEs1SDGdO9iOX2TYSWHc26nQhgKxHl1JRREV0JSXTmEw+rIddoXxZxIjZ/DuRK81tqSDioObSO17dvIK8+TyFzgMGDP/VgpxkTKeC873r55wH59kHmiHx5j4Pn5Rc4wOnpdYVvwX+MDBPjZXU7bQJkDZ9irgLP9APk6zcHUp9cMKCsr7PBMP6uy956vwuqyQ8T4St4Hl8fX++iKP/wbrxOZfhRPp5rSrDey0vli6fyjbMbZUOUt7TdSXsJsKZ/TfOBozVZZOMmqYUdfLLwBmfKzkSBTnVzhMRjXqz7sbceVs97NToLAXRxXUMd0FQbrziZEuvD6br7OylktZitU0S9pN7ASa1bzPHAzoM6JExhUKGyGzTmMAhdsIhm6bsSz/t+8l8Gzgx4NL0pZ+Xe5OBRs11k8vZ4o4M72ooPmHHBZz6zHHyXZ7jAr/kgS2LgjpuWd0TG33ADMmKkg6qu8Ehh4nKhFWw9DtKK/EgP53qckdKL6aKAwbPh8xlRs7gZDzjgzWFy6c+1HrWNyOq/mUfrlZrOd9ebfyhgsrwVajGRY2yy6ltVr4xKUCR/RzGuK7LVHdaJwfkkE6pKPz2fOnT6fOXd6QeQLAgAlx4MZzxNv8H9Uz4fQ9bY1CS+0sGHXuZF0/tyHcJAzQQ/1Vueuhg8TSEIit4ZtG46z0NEZQ+lxllFm4AEkZ7A9bLjuF+ybjnM/8Hw78KIhorZARHRgaw3bvqER0AnYWvxW8CIs9c71cRhtOAsLZg6Zo4xEskP6fJhpXQ2k7kk8OBlMaeb6JPDQxGSFZ3SJoraBd+Ann/wMrTV88fAVXrYbAELDVqjWDrbWw7fC+Cvux/OA5AnDWAiPKFQUc1cQkkKW+5jL1tryFDO86CSxTwiuDjKShyIJDxuT+coC1AWYM9cQwTP/JZgQ1Pq9XwluR6ezLjdaArthZLoxwBqrnjBBijPGhJ9kSdbsPoZLu4tjBSwEydPVJzzXew6S4FRXUtSjjjm+Od+EsM1vcj3ZEPB9H9OVld545iqS17ySNyY0IlrZxoO1fTrBVRU7W3GwrE7nFZOiqKdxzl52nstNfTPFhlKljFCsSDsj+CIfFzPWAODhesGbN2/EO5ZgP/qBl5cXPD29oB8HjB7X8nkKvzrBGjDO3mp7Nu+DWo9qvLfx+MZVjgRjhum1UMmYZZPw9fkwjxXS/TTeKhidn7Eco1KdCLVvlUYM9jpHJDJhqMKW+DBHeeO3VTNSAXyCe+6tyRbC7PKSKuRdQ4oS8I4oBnX5W3AmfCREhdLuxMeNLuw8eaOfKkcnfFGmzVDzosw81nXVt77L++aCR8zOhRxiWPmgl1L4qSC2tjvx8tSxCIHO9VVZRYnR5i/t+/w8jyY4rTKkdm21SIyf6CezSTdh4DUUsPJ3xw9imKWEpfRn5AjUoH9OglM7Tg6V6CFc+YbJOc3er3Uz8vExzKzHLiUea30+zV/AskZanXVE4DzZ8Lxkh4YEG58sx7M8qCKj/M6reIYFQqKRE9vKBo71N9NAjVuwDseMCAWAqFbveCwdDh6U9UAZwknHA8OOB/K6dZhJXmHzFbB8LNDUTdclrb3ocQYvB9YuwPA3GTsx9RIWMz4yzenDYYYWbLxC9uZxsEoIDdTEHUn75ostQVOJPrx9lRNQd5KdbQzBE5dw13TWMQOwFP0E3VrUXB8t+obCMMZAP9RhvMsq2vW647JJH59fBvglvsm4P0bH0TuIGtpG2Lcd+6VhaxpKmQXbneujMdpWukc20HI5wYMw1N4Zo8uekjHs4OrqTXYmklJx50N6GwCz3rdG2PYdXzx+iZ+//QW2bQM34P3+wRmBnBzGsJCR0Ydmekyraa6wJs688pjYpEwokKQk8tuPOzYhQFVoeV0LxTYQOylVtictTVuBh+pniSGaEpZLry9jGLKv0HHgma+sp3asAzxmfphygvOKm2RyU0UqKR5usKXZX/ZbpO77YdVaQV49DIXMPsj4rIrOjINwChCIanpyhq1cJDIAYIeHmjFRjCaI08GW4Kunp15nmRp7V+yrLPey4kSL58wV0lC+YzxOypmVn4RuMFh7X889M5+G1XluE15XDmWugqfCv9AxEKJCy/g+Q3naWinqSo8IzobH6463bx+wNcJxSHKR3mUF/xgDvR84jkVGr4JvgdjJz/FkPXllRq2UpmX9tXxWmr7tNRucr63SFV42v3vFSIzwnVhRyXXU8MHKv7jMl6yyhVrlOQs0zLDM+cSfS7+NG5IqWJm6/Kf+IIDahlhhS3MDNoeUVwfViROAQnH2QKx05h4tYCOiEtprCouwhcqNynxw5wSX3UeCA0toUCTPhOs0dxdyJJSR9VhXelgWga9IvnqdJ3WBWvnr3YhkJ2oOsAnIm6P8U6JIguBwc8jokWkz7QHzYWMr7rXOCmDpvekMrHxpKsMmfKycgtzUWs9qr8nnofiqSftEBjs1SvY1lQ3k04ULLbQ0wzJkE43kd85zzjDbHDCclApmGfiqk6nqBlVG6/tJjMuYhGwWmDMP1h+1I+n5LPtjJd1hoLRix7kWbSfVfQ55Dw7Wmeue/wTKGDydMToXKsw74Leeuy4x6zGKsMKzqtPG2JT3iaN3rMdTWXyC8dAGSfxBRICulHFrurWYJQQSejSyTqLLZcdl22RFS+VsH9A8Fwqt6oOSt4KdV8m7jp02XB8esBGh9wNt28G94+l2Q9czIQz6fSfs+wMASRZj6mMYbGWYJEHVbtQkDpCXW8cxCG0wnp5f8Pxy+OHrMT+0ogEwdYwBHLcD9CIJafZ9x+WyFR1pdX00Rpt7r4wgdbYH4bIPDkPS+Y9b9zPT2BQtZY9E0CXMWAGbk340AMc+QJsYav/82Y/x9fVrtLZhEMt5Hvzi7kzWhjoOjEOWXK2Nu1JpYizLogxAswgOOxslSV8/SHeloXmdquzOzAZngTH/Loab8ptB8Ill6UtXxuIgRj5K8KRQKhOfmTdUOYcKCuHd5EpPIXIb1cSDqtFgfUpC1NuoYnAwuyHkxpYLGiQcZ2UFMC96YWiOu9y5UI8m1csFeTLnlXWe8do8NKey1gCRfPNyKP5WI/kHJ1KbBEcWnnEfiqa0l5QLaCy4MUvAz7QKkiUtZ8I3vm+m3aTECAZXGI5VPcjzlsDaXuAQiDYCrxV+/z7RTl7xmNiq4GfIfpLbbeDzL24Aa5bZCamzgrGapisYKBXmeLAsX+jAvi9lZkM3cD/DUVfxZtjnduvzutpHJxi8L+lZdpBlT6LAGPNpHVaa8cGlX5zGPJiNriiIFgzhPax8rKUZy4XmbBWeCJH1a9g5VQJz5XNB8EShBIQjSHlb8rBXXmSKWyg/Tfc12D+xMmh8McYz/+u/XHZi+g7opqSFYHH+kFhAwkfMWSIbR14+j2pX86CGxRZ6ZNaQserYyv3xuknD/xRdOcFf5j25bxMgcCOtzKfzHMkyQLhqKqvOHBBpindA9pwBsgcNaL7XzNqzMZYexoHJaS4br1zpEcx+Xh8gOgKrwpqlEYE9BI9ZjiralKZsrATdshVAdC2Gr+wW/BnOAj15fHDCOTtFK8gZ/GrQqj4jBnCtY0mMiSrkV+x5MzgsFL/0I9M6bDR1lnJSps2YzWQ6tRt4sY4HcUr7VIvy+evaU/b5aTpQdvCcDfz0nUWcgZeOCqOBusJMp3Gs3STftnvSB5V/dmbQGLpyVaM+mvfBKwQz48bs3pTepUIaQyNYLEqG9Whk8rwPL42wGXhNj2dJWaFszrfWNKQ48abBOI6O9x9ecL02XC9XPG4H2pUA7Hg/DvgAEaOhYcPAfr3iYdvwoqGNo9gLpP9nyRnRgE31pefnZxBuiOiDAU44nFmj8WQwO90e/cBxdDw/p+MC7lwfh9FWhFn6rRkdB0EOtu7d94iBOYVlQe8z0cihhlVhEOF82264XW/YLjv+/rN/kk2iyTDqpCtdNul7HAHAmhLUQyy/YVKY98uX4BO82cE4eplGAg/lKQDPCllQt1C85uu0CM8WupaWf62MeZSm89kyDov3hSuMLoDNO4hQaJDAZw4Du4ieLAz8I1kqByQNKyfFD0jMaUptXHGQWiEguVOUF2cFiFQE8RIHAueEN5jRHEwvIDh/73hczE9b2axiMvs847kJ42qIZFpzqVYMtvwtVBBUL2jUEQ4Ro/kU+ugKqXmgw0uUDwG28nZ2UFbmVs4AUwJtpcD7TbVvzFZXwGww1rrsdxoVSkbHNJ9lrhPAHTeN/2k2vlTrjTlufUVRQjNPkvLjZHRZfWt+Pc/FMF5mI8zgWTk4anuzwvT6ZasLBWeprjyer9RS+i1GQB2fMMrW33o5V3ryfM+hclnRSiOgEpMA3ceqb6Y5IorsmoYsDMr5uq9OSyGjE/uOeXjijswfZvjyvBN70Qy+ebVbD+q2qat0E6paZqKpHQA121RSLNK4LJ1zC+3QcUe2ZhgcyuaSraS60yYnz0h98zmVFit8BGd4fHw5jVe8rIZkkkUun9mrIC9oNFKV50KgOqczo0wYjH+NJ6VXVPoZEiZrdJTp1Mbcno2ISrEjBYrs5Ay5EgUThh30m+ZVI8GRrUgLLO00FwG4gWXV2iHe4SDLMgpwx196TPZMvxkW67bkdYjpyNPDwlFtDDKtFymPqnl5j/TfNAfYJVt6E99JEhYz7KqMMT40YE6dUMSl+qDFxL1LPxeTNfA4D4h3scz21DfWKXnmKzO668hN1Wu/t9awbYxD7RxsDbsSylC+13uXA75zZapTE4B2iXDDtjVHRiPZY1giCUx2uf64kreRp6JtBOIN1sMxeqyAMf5/5t51SZIlOQ/7PDKrunvmHC0XC2IBGLgAIQAkQJA/pPd/CRkkmUiClABiAS5tAe7lzHRXZYbrR/jl88isnrOimWxyz/ZUVcbFw8Pv4RGBbVfsukH7gr139H2PUygBO+hl2/B223DdbljbYnvQPGg7aC3NqsxgcnxprMIa7ajxEs6fmIci2+GMhe1BPX++DqfNJLx7ybCDPHxv2t73encZbHwYRhUAoI9IwBziaW3ct7MsC3728t+gq+Dz5RX//ekXGJNBR7Y7HH0+QETr8fomaJP5TCF49EYOYHglg9k+uyC2/gViubgn0uyEeVN3ZPnijBnhpyhnQYv4PP8u0uIeurl8guOKJsF7GCAg6cDGh7pTN4+PGdYa8GgK6DevV+mCnC3/jccg9JuWAhXkCG1KDM4jkmfMGCsBRWQzBPVJZ7/OS8pjN8hPOvM+2TAo7VYxHv/GUOZVPLFIb601K/FUMDVVtipwho8V7fibdz1PpqscaWqC/tRRqfUZEWYwPghqJN0mj5dVv4DcLp2fDMHoJhwkMrCsTAkqEU6GInIBbw7wicF0/tRCFW7WnK4QzsfLv1XH7xyOOV15xtcMlwjixMOzMbw/Vinwnr0fgbs8qnnuu/Q2N8EF2JoC4wTD/nH5QtaDDgVltnlSSh4iRLQAHOiVAz3+Tzp6bnRpyG61E9zc+Mr0VzLKSA7WVVEEfIy/M/yw8zSjiVNXCyqjgffMv6PhxQ4qt9beyQyqOlVPPk/ZDOJ2UOIZqMHSYvALMkWQDMcsZmPSikvFUBXuDEFtW0PYCdQmAFpcRgEtxqDwE0R53GzsTex4sBmElvN4VZj7igCMGeBOY4/oaNguQB7Yxbii1hXFaYCjkpSvsw3r6oJVgm3gWHBmiQgFG6OfmcYCf1zQy3nPlSgmaWf/eQCR8GOkn+nu1c4YtOAtWYo0xGQIaPTc+6Cji93rBYWdPt6xB22NjJOw9kjWzjof1H8OnwCU/F7qdIVIx/P1ivYy0vZaGxkIr5/f8PZ2x2YOePGdm4zTEltDW0bqX06Iw2s4NnwF21uAIc6tMAcN5Nj5/cIHzaCK3hc4VYUN3/uAUwTaFjuAinE0Zu/trePmZ8W7XmC0KeltOVKNY/HU/iRe7tO73/T5Opw2wI7I7OPywV4dJR9gc8UB2N0Nmsa8E41hZ1kWrEvD2/Mdf/fxp5Cl4ZeXX6HHhZjHI5zVTppUHcxxml4EkIJUmg2kfFAUwZ4qRVOAijlo/k6Iac6eo/V3QjT5+A3vGkCNUYsIuh9D7PA7g7RcOQv1Nxk1RxvK54TmwOuNPKVTwlSkAZAHyxwNcgXGCiMbJY5fN6zI+FEakuOnMA8eMAohUynCnRVSc2ashdnPcaMhBMJAn7oZzXCM0v96NJWMvwLxMZ0w6QdRJlZFylhTaQh4ThVaVlXZMDszumm8OegZCITDS1pA2EuOap6HnuXSiGSMebRfsCxzfDe7P4mtxON3F7Hx243fTx1kx1UcyFOFbwQRBMNg1xOYclgBTN7RVUuznXPmq8xGdR3pjBFebWMuIMOi9C2F3ub0lwrrY1WTq7H+52z1K42cUISq05grXx1xkxFakbq69aied3ieRkoq1+6/YmfGIYpUXZb7QDjeUESKf3eDw+W8FzD5ERflhgNvTcbBJC0MhKH7jnPs8AkQhyIVrnd5qDMeHBe1Dqcda4ydaaGuliYczuP+U+VlII3rs8BAdTBnMFmaJ+2UvR+qKHeW+XiD15GBKaXWhHE0S8u8TiRpYHLi/ZTLUBdDtrbYkJ+nEoejMjl9VUHYPX7WfuAzSOehBqNymmUj6EBZECxrzdH0i+NjLAqyVZLn5pXQkBnsKQUNMD6TeZMXGI+G20lhJq6n8Z/QD1xW+8SeEVpFFFyfR/AOvMVUDHKJcgSVDdfpzbOEqLwi9niNcWU4V/2Yfy9IOBQA11VwvbRxumATdN1wvwOvbxoBZvlU/QAAIABJREFUdb++pyYlJuSMOrYhuNCZzgqZ1hr63vF6u+N6BS6LQnfgvu24bXtkPI20XBRbT5qg2WXjfGpk7wMjmkxn+9qcDsehTX6S6Hbv2Lc9Li4vgYB5pGKn+sLmdNG472/MkZ04rZk551laft5FvULM52rQlsKDzhZEEMBtvXGf3gJRNefa6EELxkerJHiYm2X699HzVThtvXe8vd2DaN1ZC0FpEzgO/dgPl0r7ClVbFqyXBcvS8B+/+c/4fH0DmuLe9iCKQVg2UXZvW6RcIlTXCa/rmaxwAEi65G9JMMk2gkHQQ/6cTA85b2d9CZVLohgtn6aRkCApjpKrHzZgFCWKdiw3KWawsHQU5ErCexdjcfQvZDHI8CJKnuc7N7seh5vtpYMfETE3GrzsGcekuVIcyGw7c467suslNUpJeIvfpDob8yqjr5IeravZYJrGTfoSULvQ24wH+DRUYZ31OFI5nWymGils53OfRgpcEIcKqWlaxXGg8TL8PLaib4nvxqWyll4Y43Re8P7ZEDqqstFu0vn7G38pTYUUNLceQXFWKGSTIKr6CYK+unY0Ah7aGGfvvyDded4Kn2p957/P6Zc5xprwOzszXFIPY0gjP1eIDOd+DHOhX4crZVNrdaApK/J3P6lXaKnm3MH1CH4P+TCMiLGKLmGE+hgHLM14q+xvdr6ntuOob99n0egaFpdH8JSrlnSiiac4veyAM8cNMktDfEwuPV13OV/XOXvk9M9yJViv8OqkdwN2bvSMpjMNtgRH3Gk1meqJZQywOwU5VkNztEsCAwPvIkwbru8cJzZHbmBWlV2ll+PR2necN95MBtJf5nA1GSsyHjCNvYosm9QdFBduSTsJzGM7pPZu6GoS2+cSV2JBT6NJEajQzmsDXmxccZdVHV2xI+K7kAwPPKHwBsNb9OPUh2uHXXvQ/7xC6M7Nqf4OytG4Fgp+x1j0JfBR+Deud3gkupxKcP25bgZgUuPldgL1eeC6U3qmQMeZCV2xXxRoDU06mgDNlh2GLrE+zjwvzUUPD1SGDjBnawQWbN5CNtWES8i4x+zz5zs+21yqOSViGWyqY/Vtlz7mtbdxIbUC69LGPZik7/q+oeuO1ta4bgT7qOupgYqxB3nfd9y3DdIWrNdLOG7nurMGBMQMoCgpvl95KfvkVRWtK7Ztw7Ztdvj72FOnAwUxlxoKbs/eZawoPq0rlnbBvm+43TZsupszK/Bdox7T0AM1zaN5/HwVThuQdy4JgL03NGyhVPdtMwYm5pORStFXoF8Vyyq4XW74qw9/jbGaZIJJTbroiAaMU2563Fmyx2lQHGlngkiuLUGeE+03iEZIcHl1Vy5JbLHZsPJ7tvUATyx+4iykEJzsnM31JmEs/I9M77UyrzJGDJ+VtaMt9eiepqmXsnsS5Cx0p6NCayCOETp1GHUxjCTCIadOlZQUrivcGfWhKGk0AimnNg2HpQwgy+k4FzP3EtBYomuei2yHdMT0sFBHpTGG6UzZEZyeSuHC3uV4jfkgjFV26ADaSyG5gjIlOuKI9GlkJHCHsSpn7BTtu1EkyBSJVEInytMQ5ClmLi/8FV8ILIWps27gWoBymiCLhImUz+CvTyqVagz/f3vUEm0m4E/g4FU0hEMgMg4JUcIH1aJPxz14aTCfG5ZF1Ej2GypLZ6ydE0CsftlUeqRzvJvSbCgwMAeW0nHqlraJJCwA5Y7LUangxPdvjCKsL0zqapbl8XPqrf+v926Ltx7ttUNQ3LjqNR3fYeudLh1WXxXwFZyGxS+jVjV5Pq/YJ8/4SMMQSWwR3vzQIceJHAJocCPmLKARSOhxH2GOSQBK0+7+m+HR9+w57Qp4HhGGaOCQfp/HOlbYUxa4/E/aSploE4KyBGF6RJYqIwGul2/8GP2U6enAqfYx7Bi7K1cNPRaBAw8AeCDNT2JhCBjMjlg5G/VSxpQAGlw0a+BpzvSJXnxOxNL6Zr0Sw/QsEXpr/FQM1FmXS+LMB+kiOeFMnYNpHD7dou1036XDONN56GD11j3wQeOPfnicBWz4i6OJ4s5hpQNug7R5abz3jk3tcI77PWUGACwCaIOGIxEICtxXveyygMZicPTxJ9Wyz1U2WcYYK/2tDYfGGr2sDX0f5bqd3r53xb7fsWEEp1oTuy5HsW8b9l2hckdbVixrw6U13KG4bxv6Xq/gEsFYVtx3tH0f/Nhc90mBtC3NUw7yDdH2kPv9oHeXRSBygTRzOHsfAQQFREd2D6RhWcROnx9z0owm7287fr19xrIuWG134yqDJnc+eYhnmkE4I90Hz1fjtPmz9w7tGzY/9p4G1myyfvH8a+hFsSwNv3z6Ff7x+gu4ERkKAKkw3eHb1ZZLdbw7KCB6whgAUA64EKWjcdOgCHlSpOhgBDlEiun7OzbbnCo4f4/4nYjlH/t+KoFKpjJkMHKCI0SXK1f7I1XQxqsifH1jvTtnD6KC0yC93cRt6NIy36Wt4NwHDwmd6adjW6Ts6ZdRjoGKjxOOor3Z6LRSBySkQ5Ij16mEl6sCyA2uNErqBtdDsInBkfqKGw17ZFYyp9Ik1BKVnAX6+08oDDn+zvu53IkczsTciNk0VlaVjiAOo0PSDmpGo3SpY0lzm2EnHnEejgW4E6Tz5b+818GVRE5NGkbnxH3AFmYaSABnutFD+ZkqD/tdPdrZe0Zei+FQNUmJY5Dzwzx0nFc3uI+8Nz9cJj8fRlEMdF95U9U4+vy9J+Wmz4XvMunM9GTY+Oidzgxn6mnH7khkhLwEymaCVyfLnuPYTc4qxp1EQgUJW4pKt0xCTpZjOJ7Sfc6pGZeiwEHgowZNQhSbY0yIzBuOonxqwuN8e72GekNmwpbzDdMnox0PwKvDqkc6czxUxQHEaYAG0GBnFzJZfjjClY81Tib0wxN8fm3ewbJKpjHPXM96UVMuBMysgwbcvEKYHG9/SxaEf659PNLF6seuuz3UWtEbfJeWNBl34kaB/Lfu+zW8y2PdW+v4vORUzCosWnB5rEo4Txxk0Sh4sG8CFslV0giMDMIKeIico1k35R5JF16zY2enjD04SOdBRncCwp8Asow9W3HPb+/YrWzTPODD56uK2JwTYx7qxWi5nLycDVT+5NVt5vkcsQfFh5NlRSRtFB2DAGQsjvTexyK3dui2j9U62bBeVuhlhUjDsqzmIBlM6uvvQ3e9yAU/3H4LUOAf1/+OO24Fl0qLP7MSOhNNI5uo26GHwz9oGNdVLaqxH7HvApGOjgWtLRYsyyBG147traPd9rGPj3StGhyRBo9Khw7dYOH3bIPxfBVOm6piu48jtTmPWkwTtEXwt9/8dNyMvjT809MvsbUem5bZ594x7m3LjYwaxM8R9seGdyrJ+RUrfhCyxzsSRByyQVU4xxRGkpyT4uQJzGi1ZGf+TBEmRaYInEU+T53Gok+1EDtrS470Hh6SIDl8CWVcikz9htDTOidR7BxNk8Ci9ljJUx1nZt4vqbMmeOQgsncOxIbWGRPOnDN1pWFDg5eMs8/dz9Psv52BmX0ZfI5HFrrgvhFGSRg0h/4lYKzzQEL/BFVHej/TVlWBzHuoqtGSdk6Rwx6o0Wguf6M2AMTulDwc5HyOy94qw6HP54iAd/g9XJ4ayiqw6wiGNkHktYPef/n5Urn33rshOeDmlXMf23n9M0qdZIrm6tKRPeoPjH+u/32e0fNEo+F0zf3kd56zHk53KtbYc1J4/SjjLDmpyDE2kBWm5BXw1ZJZthQWRNKQ44EPaZBCrwgabkFVvcAPw0fyo6/kAbB0bXUng4wKH1AzIcSyheUNgWCfB46cIlgEJo+PkqEDjb9YhTg8Tpe81yTnMDdMncnD+D7BMr6zLMlLxwFJ4xcurCXgLTKnZ/pfigibB7dN4hmnGR8Cn6iyjNPrxeaQ99alFRGjwgExpDQrzv11rqgyDvMwEg/oZnl3Zh0PkfkjLRyl6NjmcWRXsiaPkeXYGeSJRtSyaZhGyhDZVgEq8YBoWfLngLUoxtzTnyvPFb7yTSUuY/b9rC4JQgeq/SLRBfIkSa1zPmEkZIjQVHp7AfMkc/19GwsVqyq1mwhwXg1UFQD9o5Txl1VXyXlMUSEJK8t7zbGNNnocGrgD4yRHLElrqsDSTBeOkyNXGfcp3xdBv42tSds2TnlXaViXcRWADFKxFXg/MVXwLz79Pn60/RZUBB+vH/DWbvibl5/mHJOMHGNPxPuBTk77aun50hW6b+g6rglrgpGSaniMVX0Zp076VTCBZsFIFfVxC9BFYg996E4mOcyBFcUP7t/iR/cf4kvPV+G0AbDIIwDtkGXBuq5Yryv+yzd/j19dfo3P62uuWIkdr2oyzpXU1neoXcbHuopFwamvIckKHj3JYkUDHxA9dOWkeKfvcvKptpKAMuPHB5kVxrElYeAko2SP/A9X7jW1bXBoxG/cSJvKHlfhcoVJREqKxCNjLQQQITuMHhpKChtnknTQj3ioEa8h1GtBTws5pCWc6SL+MQwbTSkpTgHWZnxnrLLDau/iXifH9EQhMejjHJ7NZ9KKMYSQ0ZMkEQ6/YzKjPOcP2Z+nPeqEfRbwB4MIlRbcpkoF4rxJjlgZ88SVwRPpnLQmFVaTDb1u0wxjpcJqtAuPrmX9MdVqqb+uX9MwzoZHR3E9iPML2xOhUKpDx6lfM04DGBr3WVTZ4eTytd05vQkFMQMPzAjcxnl/A6Yz5skZHmXIOCCarP34HNQ+fZ6LYapET+yc2Hg9FV5pjpEzS7JG4PvPPCIgcamwYlyFQVFnX5VqmbYn0DhGmjHQiZBGf34QhBtgmQZI2zNtPM6lwzFsswPmx75LrjhLoxQxZUeRdByT60H+07sTHimyUtJJevxwemauTMXbQpds9I6x5fbEutJqNcYMUXoioHkHGIGbNUimndxQfLonnLDkGY3HFfv8NtsHh48imW3N1rbzgrUvQB5gQyc7DuMTdm+bB1KqwwCMPYIO35kKDtnvMtJYtjXXn7w/SAD0JFJERRo3C39/JfH5EQ3mM2WkkGz01JAQF8ELuTLr6acOtwdWNGwYckTDRCDdDWAx4hi3L+XesQKO1c30Ugl8Dxw4Z0uuLpNDwfRSUkWZDqKcfZDJpmAhA5/LTK+en9leq2SZsjeaFv4hce1NdwWaKjSuwjJetL1dcGjHBnS0RXBtCy6XdexxU2DfO97uN7zdN2gnGoLL+EEvkRaqAumKv9f/hm9u30DQ8O39Wzy1Ddv1b3FZGq7LwPhy6dC9QVVx2/ZxmbalKqoC2Hd0KPquETTTbRyC2A2XfpChifqh+5sfSKaBHz+fotG8RXDK5bLplTIFzEcAPu4f8Sef/ghPeqUJPn++CqdNoejXjmVtWNcLdBX81Q/+ryGQFlf6djFq79Bdx+XXm6L3ncwUBeHz2I/mRdDBzM58ClQdRFFrE6QVzRm1dAEgfnoVT1Zt8viQkXL6PJo/NhAe1S3NSAj4eEzwWyPRWWHyScnPBtzc95zO6dHFs3GEyJP63YZX9MM0NSnLwEI+lSn3UfyXULixxhRlGJ9i8HudKVEogMy+Dr366GiMaURxmYozb8Gti6z3yAHPXkcFmeCdAwqVQSbTo/BFimr/Pqqnk8EOE7fBTwpfUircj2nBXDHwjnJkx6cK+lGK9xoB2z5FxNwoIjQEDKrw4/3dNim9TnibU54dVx6x5rp+t9FhBI7j8pL48MRZevQbg8jzV+xC8flVglBKG0kPGmm07zrfB0viOI6Qse88zB8ofWZUlB0IXjHjxbN0pGgsZrRNjBC0J7pM4Jus7DrSzE2ptNYgFgn2A3pc3jenccmeRxrTMBhC7kiu/AHpHkpb8jxj1XKgytjK1MOikkkODzklEBkHFqh16RKgyOscIjxp0X8ffDrzc66MhH1Jf8Z4HpyiWx6eWwSeyFfOMtQf6yxO1/R0qQRe6VTho/wJmpYW+HFcz0GVatQmP+eeIYl3ldvndMmpXUJFdVizCeXxcfksMEbiNB90kHJu2CrHVf5TiSUUpLX2wnKndn3lau+Kvu9YpjGq2Mqen9qgORo3jDP9/5GMG3Pt6X864bv0p34KYeImg3jWVvQ/vkcrR0UOdMV2u+PWRwBnkYZ2WQAZ+6hV6VTJYpuoq5VAMK9p+r8uf+ZMpXbgTY00bG6Dg9ZeuNhL8SPR6wFrVnaa7hkVQv3N2VmO1a479m0f25isr6aKiwLd9WhLB+0uN7zdRyqjdEAbsPdxHVdb2nF8NoplWcJOQ9vxj/IL/Kf2X/DHbz/BgoYXueIvXv8Y//cP/w7P1xVLG/2yHr693dEV+Px2x9vbDd1tCxlhvGaO57b3oFftHT0c0CGDxfA7YGlwTHFqa6zO0QTEQoZg+AgCqLbICLnuz/i33/0rNLXMjUcTZ89X4bR9vrzh//i9/4BlWcZpLzbIoTDt6SOlwnNjte+Hzc1ffvIQ36E3JoOFjBs+slgmyk4hmek3RTB8CetnkAVTlJjQiamTK02n6Y72zO8Ck5J9SKe89VnRsRImhfEoMusoKeanC2QjzvMoEA1S5hf0m9+BpyfwcqcskMJYlKmcFiELLkbwPsYNQgAnPaUTmGlAPXunPQJlfgMBLipdsVmrwfg4PLPAdYcqWiyCX6ncGb0co3ThlMW8Vsujpt5OdUrJVNwccY23pMDIfiFnKvmrTv8DGMAnjkoYe71ED2qAxcM5edjDGZ07HqtBVJyJAw7lgI80BedAgNHAAyfu7Lfa9DyHJ9bj3JLSzJLxBGRKabQuDh/LwGonpxHm8sfaC2OQfkOlBcdVrMzEPt0xlrLPsayUJD48LU6kFXhn/BQcUzucwjdWbgftjP2RgmVZAVGsBm/f1RzK8V7VTj2z4/4X358CxWUZx79vezfDQtE3RW8jWHlZG273ewRFHLFDho4AGMygKEawpFz2/UhH8s2V7PQSBO65evHDSZ1Rl50k2w9ZjO1HMsr15LHVpJ/c6zTKnmfEHPhIfQXDxmMOGczAHsZ2R7O5CRyJjcFgYWiYtk9h8DLQcuJlOA+ub201jFdTQta7zeCGfNl/njaJ/5aiTIe1G3w5cFuujmBTxqeY5BZCT7HDKfDzJLsqsA36VKO90EE+X7vivm/weR3yuaFd1hiTmoF7uax4fr5gaWveQcu6tTzjcDmSHoWogtokd4Ts+4777Ybb7Y5d9xi3Z9w0zESp1HeV53vv2O/72J8kghWKdV3h213mlekMkjKevQ8/5Zp0FWGcy9WAugX/eVXe+NahbWEjJkRp2rCCDHfHISVUHzU6Dr8QZ2iKXNVB3+qy0nrvCtztgD8RjKulYM486Ton4Vidsn9Z/SgQcqwFLgSLKH6+/hP+mfwAP779NgDgsj3h6VdPg76WBU2At2XDZ/k0aHEf8mo3Q2KkWvYIjGnv6G3BsqZsXS8r1stlZFBYwEwt/bymGrt2GnzOB+c0AdZLwyKLneKtuG87YBfeiwIf7t/gz7/7MzQVvMkNn5fXgwyen6/CaWsixhywgx5NuNjJjnsfx4F2VUp9fBzVev85J1NnH8DvbBtMIQPAEPopQ+rR0hz1483eX+j6AYSe5ne2CqegA3vHL4KSkvh+2ykw2ZGLpuOjJhwOgys0H2OxDaQ2EA3pYZ6iDhm0Va1MLZIBzsLjMDbrx20THxK3x8N0ccai1OfYV239AsucDxbMWr7RkKdfjqt0k6g15egK+zi2ahSZCCa8RtBgEsbumNS9bGkYVMwc+4xTJNWVMxkfxIP+LxvWByPODQrNtAT60ZQA8ZIbEcRr81NxNuRFp/Yz5cL5yp88hjt+EYEd7WZGAam54vAd8ZU4qCsgdTxnOLf5mkWFHPthZ+iIh+jBWneDJ+kk9z9JtB1zoEe64O8cDT9Ph6xwjRUNjXmRUn6GeZTJfWcaTos6HNyFZF1/n3Q4nQIZs+58kLLMLQUPFhQnnmD3WHIDsG0b1jbuA7pvI4VKMQz4bVf0/T5ORtOxWvZme7fa0tAvVygUl8sK0Y7bvUffr2+vuL/ZvYGBnwXL2nC/97hfaDglOvZ+iIxT1FqLUx5Vx6Z4uP5qLe5Lapbf1/ce+s7vkvMACcuqlClTqE6WYcjYzEoc/FHlEcuFo1OXNKbk0APZxvjccSYXz1aMS98BnSLvSXMAtQILwLNk3lOjwzkLq4z/GfRSFF06fcG1zL42ZhHYyXuGE1Je7IAJ/OCQNP1HkIgCQGoaKXDpey+nPdsKjPRW12mxJhVFutMtXe3G4nrXQUfjmgEFmmKRcT9uyAoZTtXbTfDyvOB6uWBpHuzMJ3S/0Ui1brxvDmgqFjee1xXP1ytu9xveXm+43e/Y930YCaeTGdgbzoXt+9PeoW1BW9TO9DFe6t1S/qTIhySATJNOF0pKT16eV0tj3DaHIY8wVsydPti2S7vtMJngWWScImA61gRmnrIyHOBweoxAhmQwqynamvfGCRCOkBC+wn4i2mY+FEGktwQNa1Kki+ujDhkY/Wb/Bn/x3b8avxi9/rp9h5+v/wQA2KTjH64/C804htOCntXmt+kO7S5Xh2y9rg1tWQHtGIuwHlimIBhPhY2ttQHL2lY7NbPjft8gXbFDAdkBafjYP+C/Pv8MqopfLr/GP62/KKuvZ89X4bQpkBFqu+R6h4bT5pkhc+T3fYctI8KPVqR0ErDqEUSMaIbnorrhXmi/CO0z42o2y94ZPBVWcMrC+bPIdHeZkoIgQ/GxifcABhCDGSxVsaXAs24RaHEFoCk4Zt1IdhKygeiYoqMElDJ42a9ALcU1U1697ZxWrVg0hIRBW+bTjLpJ7BEmjsjKIRLeXLj5Mne2I6VeiJAHzpoefstl+Kx7Pr8cHa12AmxOlcrE/DDe36E/xrH/27sbPogJznka7ek0p8XRQfLj+D2N+IPTLzQuNsLo8RUztUDPiHovgyaKHEh+GaB5Z/P4E6FiMMRKP801O0YM/0H20HgPToZwqfztPC2R2qT2PJ076ODEIVPGXbR/HPtxZVZKOX7PJzsWB9b+uJxNp85XqmiVIMiEglZk9YcxIGkIqdY9ULPUcDqAyde2LFGmd6Dvd6s67h4SiN3HJSMCb5fC9r5jl0Gf4wCDZo5iw+UqWORieygoyyBw485Vg6rgyR3MDui6kHNhPCHNLqftECj2fRjh3TbU54pQx753bNsOwXDEBGOVb7GTG/dtw9030KtA7QjuRQSyLpBlBDHu2wbZ+0wC09wD9RAWCSY9rqDYXChC5tSVNTe9UhgzbToPe3oeAzbakpSHk2c4nLdIOqWpkPqvjPGkWjpKVYnRJk9yYKekSyvSgZNJRofIoHFKfnfDtfZa077hUKTaCxxKoJtxkbgNGcsNhSqcDFG1ezqDz60PW82QJflbgVy5IJmnXXG/beh239bHjx+x+HH1Yid9s7eN3FMsGIG0OO6A0SiuUcaKxuVywdoEl3vD623Ddt+CXyjMX+cIOlKEBePAD2m40J5IpTpOepUC/Pc82EzLvllana5GVuA4g09K5aRWmQZ/2JISsCQvUEeV/qjNR7ZD8GfBlwepJWlGBIKGpjUA6lJZAcuYY5U69zoCV60tUW+sJI5rA/IQoYGj5/6EP3r9Q3zTX2hI1K8twX7sH/Dh/gJA0Ns45CPKi+Lfv/x18pACbQGA1cY+4Ni3DW9dcLksuCwL1iZQGSnkDJsSjYWJhXFIC/oNW7Nsi94NF7bSrop/uPxsQC6++v7enbHj+SqcNsAuuYYZWKgXbJtcs5Lfs8UwANgwSgUPFl7myosM4evHeZ5aRgAJORL69C5H9T2egz1UJcOZAjmmiZw3nYI5hWIchkSeFCucwgD+7YzhfOix7KUVrmKAKf2WaQCPcZVzE4LR33A1ndQof9FJSD+on+OhaBUbFe6ECovvntWFF8irwUCqFIxha3ZympxOTQWbIChjn0bEd5Y9IoKYOko58guefQUrjXXCuGohxUBr9FmNCFf4sRqkaUrkH08zU0DdqUo4VT3g1p0lCz7PnFrHS9ljNGoAImiyPHCOSKXZRHTldk/wKQBURtq2MRJHxc6uCYpIdQE+FXYxkk46lIevsz0WU807dCI7lK2GhNNeXUmrKZ3nKZ613Sqhj22EMiOYVPPI9SingF8exPaOH+2vOM7jKKvlNz8GPy+xFiyxH23Htg1DUkSGwXe9ADpSpLb7K1Qbet8BWaAAliZY1guuVzuSGkBTM/bU6RpAV7RlvIsoFDBFzAWLH1wiCt/n0LEc5JNi3IPURIDWbeO7YFfNvZdxnDlw3zuWtqD3Edn17QS7KF4uTwBsjPuO1/sbdN/jwtqn52dc1wuWq1j9ezjZkyl2bkCHjGR+Q3GsMpiV4zsGBSLxrMhloiRyDMb8V7piOWwQ2dHjICcOREZsePErl+2pY9LaTX0jqPrP1oWEyqGyY7HltUoll+cpTw2CojJb4A/ql3pjyKe4ypcGmMOKtgNyY85ZfhV9Kh64Hqd5lxQuDlCx1qLx7nvH50+v2HvHtx8/YFlWNBxzKHy2u7W0FwfW4OyaaZQ60t4UgCwrrtJwuVxx3zZ89/kV+323BUet4zbe6d33yjZLrbY9VDwnxU455tqMAM8YZz/UUwB9pJwuC3bALjtXaOyLQsxtWD5GZvW+de45ASvzxAAzjcUrVzoU8KJiEm+0VnTW8MBsTAvRpR5XqllaFPsgaHW8e1oWXK+A4AmQcV/b6+0O3REyv2HBv/3uz3HVCzy7ogcPAIhAr2Nn6OrWgd/qPyCYFP/L/S+Dd3zcY5FiIO2vPv57/Jvv/tSCgUNee+BITY8Nm0Xxs+vP8bdPPyWcpwzau10YH9ajy7QJSTY3ohPeT56vwmlTVdxu90F8c0S6ylEcv8nJ7/NvElZhh6aFyAyzyGTQ2KspQj1BPvVVKoYSD5Denwtr8p02o9faUF22n5tLAg6Qi7NCfRbhREpoksGAGX/vMPejH1LcjHlI3qecZ7hPPGQKAAAgAElEQVTwCACn8SLhZSMAqBdgH8BwIZUpXtl/TT5ghZl9akRy3NiYoSv72CTxnqos96voVIbpKQ61saezgxodmki0htIJ4Khy5RWPgE6IISfJjrwlw8zbGgdqaPltfjKtjBRKPENF955C0mk39jHpMCDj/iQBmrCS0oC17mcbR/G7wSO+hEFmgfKARGIO/Td/TSdlM+rAuez+dBprKUxjzwASTnAS0KHOYfn1UO8oj46rcTM8x7pHZ6e+d/ri71Ofs+LXXGVm55BF4ShnvzujFACINoCQpRzoKA4BKk2P/kccfV2GJFmWBfu+4fPnz9DesVwueHp5xrKu2LY79m3Hvu/Y/Rg9CJbLBc/Xp5HS1YREZOVRP77e7BBcRNCFcCJmfCnGpbja48Rh1RFIWxqAbiO2Q3vG4Q5jT4UCENuUDwgWQ4YAwDr01tPzM3rfxh6NXfHd6yv2+4bPtxsEwF1uWNaGZWlY1guenz/ivt1wf9ug24ZPv/4Oy7rg+fkZT09XPD1d8Xq/4+3T64A1JP4JzRCNstM0r57D8QUFXYRIFJRG3Rx0Y6PvjMydV0S4rL2RnLPZDyguYVV59NsUMgsyofa0wu7Gf+80VIw6ubfLevOmZ6fQ3rN2NG5CYMhoSYA4bc9l26zDGU/8+FUooa90Hu9w6kZsYvCHNERLBI0FE4o2Defj7fMN2+2OZVnw8eMHPF8vpEfDtcqahSwo80Ux7i7j4NIQ0lAILpcrvhHg7e2O29sd2x45qKGceatH2DiTOTPzvNO+lvnQbIfw2+34+HGAkaDvdnG0zeBYRc9V7uPjgsPHX0ZqH5nmxrsy5a57uId4MdpnSpgtAy86cWd9T98BYgNJfArTqI5hPe/PQ2/fgD/7xz/Dy3LFIoK/vfw9frb+HCuuaKJ4lTc86xMuesGlX8CB9XDaAIulM92Q4pWUUQCw4DKoWHhlX+Ar7v/rp3+XMAeKhZrMfv/w9vt4XV7x88s/GpIauiRIIYWCxwynocySrjsj+8HzVThtANKIOnl13GNRqnkpIta6AuP7XKC5giIWqjnu7/B2Ty2rnIBD/wegg1i94rlBdVb1nXLHJYdq4Dxq8x1icIJpyFN8DGSwQom9ICRAizAIx+J9WB7BJwXEM1GSLcvhg5eU1Hv6zphNTqfR6FbV1Lu9qsoDWZbmI3AsJwLNOvQ9LJMUjs9p2HNaj6WAurMR/HDOMfMepOzBo6GV3nNPEDMACz42hAyt9q5JvjuiOuFzoy32vFklBcIR9g3fUL43CHEseuKZgASA2PcyzAcBRTDhThjhKsbtX1gNGR7k+KqO7yAJpn+znMgRM46T2kZdJchymQr3qI+T1qn/NMSOlDnTYa6SeUQ75WOmnzjx6ISHWXSl0ZgOdT8o18rG3WglDe2k0WGUaNBioCWME1h0tOHpehlkJg377Y7Xz68QKJbLBZfLBU9PT7aX7I6mim27Q6VhaeMuIVmuSQcgkugaCrfDabMPOm0CNw81BqbQcVkR1MdnVnITl3u+dNDtOH+r130TvMlIMX4x+TXE88DJdr/D85GkKf6nbz/ift/x6fOr3UHW0TFSM3G/YxPBhw8veMUbboKx182Ye7uPgyZerlfst23sEyoUk3Ihf3c+Zt1L82eyDELpQEVOJX1GeiQ7Hk5Pqtke0Zqv3M/BkpMY1cQxUt7Nb6TJOHgBlA7o+mMYEYNHmzlUToQxrDEnM6sVOYOz92dBDUeu89/4G26dtRFBPg+gaNpXJdhoc5CZxanPIsWbevOMhmYpvj7XIbsLnIoyIQCgHfs25Mh3333Ctq14fnrGZV0DpnRQLeNEFTuGjB/xkT6sFZMnfm/Wbqh2EbUsK56fR5DifrvhZqtuRL2B+7KSO4vEmJvcuza9DXzFMAG6dkGAJthuHbf7NlDSBJdVsCxFxKRN4Xhg/IdtIdRfysLD1YchE1nmZ+jFPx/HC8OOB+AyyyjG7zRDvBbBTkJZpC4L8Nyf8IP9m1G2N/zL158UfdFten58/z38Dn4PAuDT8hk/ffoH/N7bj/Fxf0FHr/LA/g1aAAFhcxuk6dkQ8Sr5YFb3ogl3dtYDawgnD/jU3vAmN8Dk60FPe9FJljniFMS/c58nz9fjtNFTjcOjJOXoOEdf7G3UiTQwl2amLDwdsEZnkcg9gan0iZz0dwcAMriMUdm4OBVqX3xO8FG45RHcVek4bLXlojGsgVq2tbyHKgXckYlG32TUleaUGGUee7aQapzdwBQyB8PBiOZ8cdPnbV7RO+JekDoWPlYzjlxbR0TSJSUPXPOD0jvt40Slo4Kz94ow8GeDRhnaQOkZ9K7oeRUrxwXUE+LSSZDSdxo8+d4VRLRFDig7rEdDI8dXn4kPNTc1D3iMbwQlsuXjzPY4KScAYMgKm9WV6fOovcNVgh0zIpmZpe5rc4PDrwyZwEonjOTO+ZMvRVIhnBqkDNP8a7lUurYxt5MGc34Pp97p6cSgrDBVwDgtrjVOrXXl6PSVUegj6kk2TOOeqcPx35ZlrDz1HSIN69qw7WN/2P2+4bpeoF2x7TuajM/Yu/HIuPhVlwFLW2Tc5aQws1EA7BlNVbU7uMT6tzcKoCV95jQOJ328Pkn1hpizRfpEGiDdq5ts0qA5R4t2xae3Vzu8q49DIwRYlzEXy/UKWVfc7zv2fcOuwHq9YJWxujfM4I7bba9BIOv4UVDzMAskf8bltDyHcizzoMUStLE5yEPLmqOc5sDwV+RGSgkFJhp0zUD6hO2PkvkgJJM0AwpuZ2A49cK4cifTyg2nasIhBTNmfLp5GavVxE/jq+2tM/tGrGC3d3l/WKrsMIuQK8mh5z1o6JNjcHuAIKSSi9LJIGXHvc5B4v5223C/33B7u+Pl+RnXpyfbR9oiyJ62BiwNeeAgzj1QxWzHOF11c+audj/Ysox9ddv9bnd2aSnveA68cBQ55tbL5cxg+szfAKDvg4f2MCQsZXnvkNXmkuszXZKKSV3ge8tMTmeB0MfvPWeyMhqIefZRplD3ftjZjb/C48hmfafopa/4nz//IX6wfRv2xZHlmdfHLx/2F/zJpz8iDHSDJZQ54puMu/Zi1SwwNtm9Vj5kkfM5BzOE6zvWKOhNAfN/Wn+JX67f4ZDsG/IKlslFgoUMs6QaGtc7z1fntDnBsgGQTpyW3wCaGs1pZUZebDM5R22LYOaHbLnckOiIdSL5MlIfO3MGMIdEfgOH7WyviQtRRtgcg6xGqoFyIrAKnP7PwbFLBREGyWkD836mJH0pJVnMENPQv1JasHdUkGESLud0A1c2R3BneEqHJr2C3nwZW6dpU67jOK2EHApulmr8mQ0a+pQ8kFg6TXvRLD9oZdobRIVzWvvh3dmTbmPFoRvTIwpqGf7iZK52QIkQ/9LIbFy5AgQcZ9p/y/fsrFU1aXQXRqZQXRbaaifmtVIP0zdO0SQLJMoWftShDOr+OR+f0SWJkEzHOBtvPm5E1JUHkBN07LMOJ/F15uidbpSnd1w3V3aV2kunIp01PW2vjEq00LQ/TWCnefHey9EXp+fmHJy0bvNyv2/Ytg1La1iWC55exj4gacMZe/30Ca0tWNYFT5crpAm2+zDqhlF5g0iDtJHqtIhgvYwN6QDs+H8Erbne8VMa0QYNiWqmf7Ps7xhpU2orrDL2YPh8LIIwFFyAqJ9gpmp7wBHyv62CfduwbyMavW1bGuGLANKwrOMktLf7De3tFTsEbV3x8nS1A1iSGu63O/a+uxVDaCfn0OU6zVU/oTMvc6Zvzhw2Xh2eazi96UQDtR0WjChK5NxBrGmfpYymPvP00DQUTyLr0VdVOLVbK6T1+9jNNejpEGBlQUTzwNB5CpeI0R4jaAIk59B0J10XU5SnQyi5Ohq2gQ4cZPiObCaXwbaH2a8xTJk96P5227Dvn/HcFc/PT1gsC6qHbhA0GbrM9x2njcfCLrHgZQBz8lTQloZLG6eU4z74hMc/PoeyRbkG50AzVROmrIxwbpRptlJ7uV6xrmb8W86s2DiHz+yyrhv6s3+yIrJPHbJXFWNvLc0nz+6XdHshdh6n2IoqjXciiVNshHOjwE/e/gA/2L6FqODD/mE6TPARXNU+D8qScU3KwE0LPer0yDYTKtQhBxVE1p4THHj3BRalfr1Jz34YjQrGeu+n9hk/vf79XLrUTzFgf6W8BTvgWsnq9PnqnLb6VANtNhaGzGCjJKNNcUxr4JmVzvGpzPE/9hzwboIn89j1/Y6E4PUola+miYy77PwOl5DA6ayVSEo0o9nWF54ZPhZqnYnfWWWKwuawj6KmMvex5/KZOPHUuSTFTkDZb9lMCpoTAaY4XiEw6UeWaTUqbIVcgVq+gHqjVsTnJV2MjLI6bhOyVGkFb4fxT4YCwaYxD7XOHPzonVIfaFxnkW//WjaruzFj/YkJI+dLQMLQZoctjSyOdjY79U7ImapwA2qOlFD6pEcAc7JG382EuRtAiFlw5YoyD0camL86ntJxGj84uOnkzXPlof+GNCbS0HjvKcLeU+YMH3M/wzlkJuDmj9rgcQDhkZTw+Zv3uU3G5YNRJU2dtc9yzMfCfSbM3Q4Z8Rl81JanUYbh14YDtq7AelmgfdDTvt2xS0fvw6l5en6GwO5t2nfsveN6WYJHOuxghW3Dgg7d9/GiLePAkmUcw78sYz/argD6SPPVvmO1nCjF2JumZsn64ShNO7rkBbEAoGInrIngdrtD+47b7QZPWY87sHqHrCtkfUK7rHh6apBu91ndbxj3EwFP1+ehShRx+/tIid3JyJllDMl2p6fQKUr7qSRoRHDkNM52qM8ZjZ7LvSx1lH8ZQCD5arxa5RqPL+l4DF3jt8KD0ZZGNU9cyPuulWCh9v076ZuyHxKk989Qojxq4nTVMAQd50VngewE/yUCgBmIVDca7U3nzXiu0uK9ydKuaLIHEuZ5TVnRIHbxe1qxEsJ933d8+u7XeNs3fHx+wmX1wyYW4+OhH1hfeOCnNZerA4KuQN8VrTn/D9jG4vmC62WcEHu73bFv2ylNSP0apldgniplSp7PyIl8b5ZO2GwfFusmJK00GatT3Vux35kMGnZIH7hz/G73Pk6CtStAZrk8f3JqVigWAKKKXcoEz5gAUQsWbWhYYtw/uv8z/OT1D+yQFcRBMk3HWpuoQqXjwM6Ff1PIh+xTBYT2/NkGYYn5Tp0Tc8T6NXA35k1iYEn3yU2mMyBlo5Aa/N7OGKJbdR2bbPOgDk/hvjI1jzN9Hj1fj9MWWrrOKht4tbiGcA1ijhO5kgAeRffee06N5QeIPctTnYsGSYWOcEatRl4aPBOXAhEVcKJj0ZDEVVcHYXpH5Qj8ASMHhy4VWmVbMvKtXhFs3AEZXmcrWmWIk5KHz6/hIi845yXs/D2Z2htW5+Awdt9DgI+ljPaMbNggcIUb9CU53gnnZ/PtNTihIlNueKSngBx+KQEzH/6DmsOwitIpvE7rUKRK57S4Cl9xoMHpQ4I0oqlfqjf2AGjo8jrS3K/igtXhzhVIk+kOjybP+L6UgN0dRpKgnKRCmETvucdOpNUVOK97ovgD8skJPaHGyVFWmj+GxmnZ8fGINuw98dxxX1xt+1EaeqGR03K5oj5HR38T2euQlXm3NEo2kkoikQI0qVAde8S8/7IHWrud4jVqO38ty4JlWdD7jmZHM2vfY0KWpY07RDFSLQXDmUPrWLrJqMUMS1pKkDYMjdv9DsUyTqq0sW1v93G/lZ1IiSZ4ul7R72Nl7G3fcb+9YZHh+O22h25ZRkBDliuWi+BpGVcCjP1mgosdtuKCf9/vuH9WWxFseHp6wtKW1BdqKVu2l6064qnbyhQaYdZ0W5sRMvA9C4RNP9ZTZ7ImgykJX+GBCHoiaJnb9f1WKc8m/mBnAVSRLqEUTIc+gdOE2UBMJzT23Ma4GsqJm2wjGEjF8UPKM41aWZ51Eu/1RZmzBFKRYwm9Jo1LnUgBsx6829gnTMaG+vg4zdIOKHFel1lu8chqj0NeG25kyGe83fFp73h56bher+OSb5ubpZm5qjsUDao7/GTY1A3j4Kq2rlCM65EAp+8Viw1jxbg37nbfcLvdzHlTgC+RD7wwfmn+Ys5R8MHCXAjhYbPIuHKkXtuUGBO4Dyxx8akyP+1j9xR0rLSPwNOOvQmWy4q1tXF1VvhuNIbx30iftdW5pQG665B7dvQ+D0oAvPQnPOkToCNl9Ue3H+LHt3+eMyoImgcsGET2SwYVkgb8IKc8vXyWCRrHL7dFcG0Ldh1ZEug+p36x/Kjv6eYhdwLpdQ6VS81iArNkSXuKrTUF8M3+EX/6+Y/xn57/H9zaVurI1CZZMtVerYW++Hw1TtuEovgnkJuaJr8IjJkFPOEs5H5zh42X9ycAT56aHpXK7rRsjCWN09KG9T+O7xVgEgCRKkKEqS5YrYn5LjuHjQCerCsm3qpg5lEY+yEiS/lPNbhMoZzXzyIQIWds6m8yGB0irq80NOFyPetxexUPCAHK6aUsi7UA6x85ZRYhD2I+gjGn1bXZQUfFD68EC/zaCQJ5mtaHB+WQQRAbxe17igxyPMmWiK4YbTT+NIqqkK2OGIIX3AAJQjlCi7j7x50604iZbmLtTMd4OxghCtw5E6RzCLVTviUUf3U65hSk8srIeKJD23dS0woZNsfPLPYRjunhxdw3/SZljEf8HerQLHMb5/04BzFA1WCJ1L8+reA/eKrziWJ4z++4TplMaufYXkJe4DVa4S2mie8BufrKsqgdLNHzKFOMPTDrsgCX6zgKvKsdWKXQbUeXcbeZiKCpjHTEfVQep1PuADTSDPd9x7peIXYEPwCsy4LtfoPCnEAdBpnuO9Z1wb4LrsuKpW0DljZW1ha7G2u3i7HHeMZKyLIssQqxrBd4etWyXhFGvOFj27cDXh8FeDhQ40gWHCdR+AOxw0F2Sa0RQYfhmRc4GDZvIOSh87bYReLD9HO1E52GvgpZmilVKbETvirH/TeHNZCSe8gDYAogawapRnlzcKcAS0oIAsL4YJJQ53xHco9QGHQfKYzRrn8LiWwVDBIZ+yoVeSJfJI/TXAyZ6g5alZ0jw0RH8DwUrB764UEMJ8tGLQLtHfdbx77tuD7dcblcwnnzul3NkBcfj8J3meY9iIBfLi4Aukg4eMA4aEhF8bI+4fl5HEj09vkN2141V2A+4O5EC0ixpR1otj6jUxX6Mut8niOXUT6vIrBjZRllQ8btvUP3Ho7fuLqgBV2O4XdzetJaEsU4j6ONaxuaAOvaxmbXtzs2pDz+3dtv44ILoIofbj/At/u3ZS/gBBb87k2eX4SzOSNDop9SnuTLwGUGP/agQzY4GtcMmpQJjmPgXvIv60e3LZR4/0C3FeofbT+Evir++uVvsMtGci/lTMzBrGr5+5fVK4CvyGnTEH4UwVM9jhvIkx/J+JxXL6KJot4fCEDUCaLKZ3bRwydWyB6+tyaJUsSiHDEGG6/6MXtMqABZMw6jhvLwfXjZ2WMq8P1HB3xoOhrejuM5c8zZNZmqAyfbdDRAKnXCOXSD3cZuZVkhZX2NjegPx+e05F9dCEu2n2AdGTnq+WRFPUUFMOvE2ONz5cQ8RCJXQMZUnom00fiY1jml5+yhkU5a5ihgXchzhxJNzJHbgSKiz7m10rxGG6pqFw/nqoYAdvqTd+tIlPhe9ykotazg0zL9FM6CH6l14l1pk/lwMh79s+HjkZNUVlaZPl1hGF0MB8IUcpy0Nq3mHjF6gkspbRenmr7PgYEZfsZtCRRJnfMsP+AYcuVYhh3ULF/fldUbZh8Cbr6rzfuRMDYfOHzEP1pohSE1hW1jV7vzlFPVE5Q8dGOkCnaIHWagELQ+DiVZlwW33rFveQ/cvu8Yp4sJts2Tm8ZgRffAww4M4673WKnvXeMAEzdIFruBuO97pEUqhu4TdIiOu4NGHzIuCQbQ9y3mDSqV1sOBk9hfBMDgqLpr5nWXVaxNc54T8ceZpCCXB2WIXkQ8na8GJHmVNulwJoKUkxoJZdmn7xP0C36rtV2phAMzGchLfg27RJAnqHJvRzVSHM4SgWN9Ng2J9/A5/Re7Tt2lKEAbDn3VweZa2JnNq2jUKqTNMxygpdlF1t1tMYLJFJZh3Opp/PVGBQi6ZjuVbZ6Yz2ilOZHCbZveFa+vt5EKDMX1coHvY0pC9dVDBWScfe1oHqt/G9AugLThrJC8H/p4ODqtCT68vKCJ4PXTK7bIc6U5PaFt140h/5Wsm3dEvE7figQzh3+JQAPtTfSU8CboOvZU+aowbGVfMZy6FgsZEj0sS8NlXQDt2LpisXJNgZ/c/gDX+9MIColg78APtm/HxdnqNMirWs6/NBKi9wxckO4q46/f/Om9w1fs8qRtxb6P/wfPWPnS9gnO0zZgfvdW020GvQXAu1viMJUITFgLOe+K37r9EH/z9HfYlvvUyzRCPX72YIdnKnzpeu2vw2lTEyos5JgZLKpT03Vm44GbY9UiYPFw9si7bx89Q8AtfI/GWSkTnBr70HTMim0grz4qkbMef7cvCXX53f9xBenmzihbnVGNMVczNxvLuswgSH1HStC/sl6SByxa2naYg0FMRUrOSY6gtuJK5cDEjIOAm3ufRsqDe4cAwtjmiKkMwXK2l+kYOEhjvawEIzESK0j/Q49HwohXpjG6fix9BTkJtcOCOUt37Zamwx0MyyjIozWwc5NrwAegxrcpfDk7nwyoOz+qaaiMNlhReGmNMfjn3aLB2S/RmStLHzuDkdYSwglweGJTc2VLFJk1vSvjjuKHsQ9QNcbI8Na6RHOnmqKOc35/5hiF4eaUOhnTrMD5A881+cEJiWrFc4BjMCqrxzFuXiGRoBFS5DpokI3mwQpJayl93Ni01TkZOQ6+Z9IbGfssrb0ObNsOkeGg+cl2YSwPkwkh4En6eiRr753ocKQ1hkMWBKfjvji7KBkQ7LsiTklUmDIRG78i3eo0Q/KIjOGkIeZBgcCNS1q/KqPOT36vfJur7j7PSUP+vlzA7CUm7ybFKc8JTE7R6ZugebTuOjspEHA0vQQyQrYnrx/pdcgol3vuoCi0OmkHtnNjG2nccnGmRRe+/Pt0qGvquYon/4nd3qCXB3rLqTj0gUEbNommDt07LKXfAUu9pAZ/o/nO4JPRj+8zsv5ELf2N9iPNB0YE6IG07Nt/63vHr371CR8+POPpuqK1JYMYIZvcYRmj3LYNt23HZRFcljYO9BG6i0673aNV9xhfr1cAwP22Ydv3ceKqzrZTxW/oGjmuKPG0NLGDuZRXYh3imCk7tKgPbblLrHqO+xoHxHsfGQTr01hJ33a/SkDGiZTSAVksA22MeVlWPC1jpb/vDbfXz/jLX/w5BGNb0XW5YsUKEcGmKTtPg2GSadPBXm67Sj1LIgDgrzjqvWL70v2NUmguSYM4vcxGtjEzaupAi8fAr5XhVbmD/isaoz4+hUrleJQ1UMHjyz5DmwlnG73/fBVOmwJx90wcpdyap7MGg1anzZnQFeT3f3gfWjFe5hkLgXtSFmr50u+3X/eS2O87tavHOnMUuoBIsE5kiuqg0pDElXQaOJgEKD+uXNWJiwQkdT90SRAgGSgV0iKPk8hJV5MhForeaEFonKGwOilIi6QmgxBz0FgEEhupZ8VawI2KxqyaP3pUPld4PR//BIneHAnzOr9VGM5zWds4gbeWKJ/5SPWgCBHbI6QVNwHEkRqqIe399BMIR1TbFblHiJ1nfZ8ew1jSC6P5NIJjD4aM6FvsbbCVOsZJRjcrXsecj4Ic60iloQHLiQ8z5pr50vdYyTDyxAgp58bbc9ic57PRYiwRfsd+LIQRVZJndaaWagzW+TkqsSyfjupxv9n3WdEdhlhAQjLu0K8O2jtTdg6zGm8J48YK8OXTEoZVlgn5A4kUOUS7E4XHBdwOX4eqKxeEL6BIXg3lbjC2BiyXNfe1bTt0z9MZ08QdjlYYF06a8NUfg4vQ5TI+XilfCs8XUPs7mnf11YZup1wGlsf1AF7WCKuQi8NRjAuird7JOM6UO6YdPhTGMyMA2N7CpK+Y6zO61YGoznehSc5beST/jVTlSf6xRGVZyzB49zHbZ1e3CCB2f17dO8P6D0Fvbq/w9BbnkZ7ZsUvYkjDSaHba58DfqJjUBmBCl8s5z/JIZ931mOHR6CPWU0imHWzfAn/lGF+9Lo6q9pCjASDRm68KuuxmTREjVeDzp1fc3hqenp/w/PRkKZjmDAUxjD5aa+OwERH0vpmtk7wgMUEO0+ixNb9YXtH3HbfbHW+3O/Z9H3McZHXQZvRL7h90nnP8jFRtl3FZk+cr3ocWyNVyBXAP77lBZay2ydpwWVbDvaAtdJCHtfSyP2HdFP98+2387uffwdvbHbvRwaoLdBdsUkd2wg5pT5Hj5qt6OMhmkj02tpT7xhNhh9o7t/UdEp8v4lv1tkJ8JG2HHCpIrQMJ8WJj4TE6THX1nALOEcxJWhYo/uPLf8bb8hadVhpOHyD6jXlW0j/HLUVnz1fhtMUjwCJtIIYQrxC0drQohn1fHbbKQKP2/JxtkD93zKbqMwXz72xjZQgIgEUET4R3NdyO/XRqJwRh6b4CcRwpgphZYXJDZ3rR34cCpXENQ1iDOWg0p/0DxF9sq5ggn5VZHlNcWnCWdv0ev5USLv+pU2caLpOFUObuhL8D8UFXk5Fz5pSdjd8fLpKyTMLAe2Q4H8iyCKEqDGfl7yMaqwhE5zT2SHglA4J6KJ/juPYgTF8dTeFNZ6nVNszA5/12KUAThjBXLId+5OwXjARNRC8lmpWD8378l+NR/3rg2YGfjq4NByfb+bNnfyIscjWNEbbuAGQkrU6o2zSh6N55JJpLB5DHeeQflzXuiMwdnAiGAz0kPqsMO6axDbisuPpR3bOgSWDDFBXAV2tFU2mnysx5iLRiGcZW777KZWVGI3VIaT4VRHl6CqdMshBQo9f77RY4T3gIwS5kO6QAACAASURBVBbxP9O8vbwgSWODZMffHQV/X+iKGk/KH+a8r9q4g9HN6SoRi2ICAoKOEXAZ9O4BumEXtsDjWOlE1k1BHKfQNmITD06M7qtu5WDlMF405oPldTw+hGh7ltH8g9YfNHd5cxE2nsY/JgOMBgeciMAMwGhMWVYcK86pOqqKwIGWxryfmqjFkXn/va6FWv9lYIQrABonrKaN1EToAIxMkOzIK1t4iPVzzqNfYzFoZqzWhGSTlnIeOARRSO0Y31adjajp4nPg9t533PfPuPeOl6cnXJYFfmw+46n50fAYPOkrqCEvc7cePIAFILIvmghkXfG8LFjWhtfXV9zvVJZVmVklYR8439pcuDxRgOQLP16IsWJz0/IqhSa2JUCHLOnWrjvkKyRPkLU2RQTf7B/wYXvBv/z8E6w65nhXoK1ryRgsKoFAyTFaUQkJRUEc0klaptERANZ6OVeSf8N+IPyxbjegeJ7ThElHb85iymBEHV7SWf4V5mvuw050KXdMprAyGSBlTN66WcokdSnIAcbTNAfvPF+N0yat1e8+ohMF+N5TIgWmsA5C/kttTEomjf1j2W6RYLcRz4StARa/ZPTBKklNVTg77fEUgKDxLy+phlDMKl+sMMlqeiVmbOFECE1T5jg861BzbxxH2HzaXV8K1LKeNJjjbLUKyDkDYAdQaOjTUMBFafM8j5aG/BzOWLQ3of/R6muSrCt+SpMSXoXxz0e88OENjxz7+tvcxqQWyrynQQJgvkrIIH8U8wm1GI4Vwzy3Mge86nobRdxAwncWfWIbzrXOS3F+c/Z9wAgCLo4cw3L83NX52CcpDUoPYNTYqDci8AhcxZZM9IHTckkrU309DiX1ytHRnIMsXm526s/2JkYL0QevNDmUvRhhDIt/z0OWWDUZNmQchw+MFKaQMayrA+8mu4dAnIyfUbw7LzN9kHIvuKUZT7w55TEG0rAKI0sVuZ1Kqc3E73jVY34bQcDlAqQwZNiMERRkFBmj4fD7gdTuLMG51fWdcBtE68qYmCLG/rtqSX1OtZWrfyknpcw9cJSLSU9yoFdmhOIICxfTACLSnw1dvgroepN5OfGdvzIP+fzmXj8PdJluU+86A64+jnM1zIOZPmjWiTGFnklqzb9kG1CjlabdVUjYlAGPsUoEQnObCcFiePJDSCAYzpjrJnKmhfim9T5WahR2MbeYLq1oGDBYWmAc6iTBTzP38UfDEjxFUQBgV9w+v2Lfdnx8ecb1cg2nrU7DSBEcoqyF3G5uE4QOOdPjidPr5YLWFry93eyUyR4BqFlb+Yy5fSeSJXhMzqtKq4Pu+DMNl+HYN5fNzQGEAHYAyZhegUjD2lf8i9ffxcfbB3zcP5oNNXrpE+JPVDeO2TFellaqc1hjnu3KHgRPodg/UB0HwhB+jzL1GAQM+MhhRuhhtrmY9di+OGFYsh99znxVOPbCwnkGpGd41C7PU8/X+atQHTWRzx9vvZnn/Ph8FU6bE9t4SBSZQJP8Es+jVY3xLhX5aX9hJJ0RpZ5+PtAvC3I9li9ErSe/nbWDo6jOIimIbRBHbjtRhg8NtOSj+tNUnCNLPC9e+bGAQRB+kKIkq1WwT7UgvSeFVsboysvbPQBfGwyrFcfJMMPMn3ZyTDt/CphPSIQhmVcqZ8M5Ve9sACU/1BMMHwwxoHM6EWorDRwRjUpKs+ECibuubbNBz+/7KY1FuoK0mKXanv1KRmaucHCkqx340K6VKqIwle+ACfB6joA06l0xjDZSacdViNaQwxa0pYA0hWodb6wcBPJkkmlV4MeY1U0RjZPQHK9HPLticXk5/u8HhMXc2WEYp0/AeCZl8uFDKrKurVgIR6mZLtMQcIpWHzjU7tdV7Mj0PvINAj+zBPUAxywjFOMuJpGRUO9R7tg0DkBAaWHRp59Glu1m6mJiToR6dbyTcWWlrN103CJ7wE9FDWiSfzxjoUZrBX7MevbpKbg+aC6Pw1N41+g1Dl0BMBwQwiWlIPvKmI8nnFfdwxATb5dmYaZz52va4glOgJ0VaTj6hjde7eF9dR5Ac9yEvCLZwQHTDCpV+ZvZADYSZchojLzZbHKCk0imCQjxVYMnY0zH4MnBnmFbgmAbP3Q7gI10oFOv/RQyxME0I70ZQniFUw9/nQZGausIvlj6nVXT3fYy237OPvJZx0mqEMjShjNmObrz+UKuqQWw02jz+llmK1+7ao6CuFDZ5npX7HrHp96BD4rL5ZIOt8mLseq7Q9DouvIOYDE6yfn1YKqncw8YjE5kXLXx8vKMy7rg9XbHdr+hjxOFUAjC7SF1fk/dwqtIY0iJnOQvk0feJNskahkHdnCK84y482f6y32Mv3z9U3y4v2Dfu10D4un3vYzSdWI49+EwEXxtSSdRNU5P5el1edpVI626Ulaiieuy65XqqQaNvS7bTy46j7b1BFi8VfBeufhZAMAPNlTEHnlFsQd9G05MDdV3CcQrfRU3xvcnUBUsFHv58fNVOG0AGwmPjYnfpK3Z8OHHFfVZel4pR6+CBqc2PY3nUSuPmvf85krAx3b8tzDWprYLMziIJ0oB009RZOq0Cq0ccJmeInCQCoHbhcRx65HqJIAf/RtwzvgUE+iSwrRAJ9Vs4CXtigQaFuMtwqwJbRjuQ9NZkWOqiEduI000RA2KUjnABqA6ZPnUAz0I4BjIcXiP6HrUkDKf1VEgw0X4oIJ8/FCG4vCc9UMKqcI8G4G0ghrOjadYTnPmAKeURuSYE6GPZvgUylYMvWbz5wopHSItbaQxmQQT1KmeeuJ1UQg2Vk0j5dRTNMaR7CJLwcdwuHg+K+Z9Va4oS6JbN4DcQfM3nDauplCdRavz71qux+c5K4DiM0V+iik2lhmqtVDu07O5rRYpPECgtn8r+8m0RMnOIGCjT8NBZfUnflut9+YDcLmq7FS4IZXoQNAAq3wN2nXsSAONhmschXus7JPjRm8BS0VU+CqiBr6jiNVi9R/GlZ0al1Pr/JyOEcu6ysGNJRbSkfBuiBvUVzVbjslrNkFbF+C+j4NaiqE3oHdZExCSAhkZXZT0etD93kYGTny+tO92KiIH1dzhrrOUIyIssDNtMGop70SfsjLox3HgOoQ+jnH53DMMo8WOcUpfgkLZCgq6q0otfbEwCRDH2E92guaetvm8lKKltVBGHojh7SNx4fpXqZ0mY5Vq3/cxnt6x2anXDcBF9sFrkSYpdHBJzChUkTQjTr22CuQ6dABh+1qdUTmoKdCu2O4bfvXLX6O1hrY0rMuK63VBWxq0LUN+R1+jnd53mgTbQ68A0AN/JVgg6Rxdn55xuT7hfr/h86dXbPs++Nl505sSABRwccfTeVpsP56nOIaulXGeg69CkonowGAEFyXkXGg1ARoW/MnnP8QP7z9A04Z917jmRNVXCFkimLOqElePOM9pjD1PgHYYmMbKDCuGU6iKWOVkiW2AlkAZfM6Nh5uid4muWC6WD+Uk2/y9rtLxI/BAjOva0MsspyVzJNKWOkr7sB3izxj/+f5Sx/tRX0yDe1iCn6/GaZuf2eOP5cd3HC3OF2fDNgXy90HJWbsoAjPbTWPHy53V9ccjuaEYZtn8Tv/ls6Z9UgsilP4Ma3z3aMnhxUSUQi9yBAawMRmV4HkRLjsZSUAaIKwsGT6/bqRyLVyuUdkDwCHA6vCmzp2WlGimqLh0yNipYkMkUaLlva/gEEipjAjUbEbznxP6YYUQmKX23xMFMyUk/YHohJQGEIeYeOSXI/A5ljqeAYtHLusKpadHKEPjxo8PwswaAnV8bDNCHGIB0MwoUPieG2+gT3PUzZgM45ZSNuISWdXYe6riKzYzkwnRF68cSuBqNDqlbzLCTxjvGJyYR1xpaXbs/F+FhFxweilpeW5lItOxEwx2phIur1cwqu52JDyxr8lhtpMRI91lxofRXK5kOrzUn+NLXS1aHR2CoPCXy6XAo1dOxDpu1CY51ui705rvBct59wtgC03xXJd581Q6oxPDzdhrAnDoNh3JqtCDJ0FOC5VwI09p1e0R7RC4geEaJOopMD1FBy6c1P4T4nkMg74Duu0HfZpJbDL167RGczHNy/hXgx9dRjqvxSqAkJNuONQ4El0D92Nlahkw+XQaovywsxK0CDvDBICnYroa46Eqw5ID8CsJahrsGEsrPMsUn0K8GqjcjgQOefXXGb0h71aT2gQg7qDVmSoZBDF+dZCTN6y1fR/yUYFxP1hnXTImSkXHOEkXB3p4zISelNrOzKgIz30nLCUC34pxwiS2Hfe2YdsWXJ+uWFdBa3scMLMsSwYgCLfMG55yvohCtRXGGuAM2rxer1jXFff7js+fX+1ibqZXiUVxv1S6rqpTQMiAiFRdwwHP17p4AJBmVzXufhQB1t7wk7ffx49uP4SqYtu63d3orcwHybtGFkubZDpweHgbRM5Uzou11Ic+2O3Caz9MjsMo7hCLyxUKvqc+y6tIuuOzwEyyZebJoIlZtmL613S86xJrxzM1ahCPeA1EJ1JCmIaOsxyybCXWWjVJfCr0vZ+v1mn7TQYBpIFxnMzH7X/JiTs4S8cmHr47lI2ZKubH9+6/9mnpA2S0uJz7Iij6PmqZ1L/fb3JeABZ16BnZj8ck1Sw2M9WFRGlMqBRqjzG84zEztk/xYnVGsK0KtXbSnk4giQnnhCppcK4zdZnvQ2knNx8PEfHIVKXvR3R+LtCGwEuaTUOAy+SdWQkoG49zl2k4n98uEg6Liy2/l+kAJOOOnIqJdBxWTrkS8MqdgDnriKIqrGP11BW5zUWuvg6Y60XlNHbhefNxpFPjyjoNMg1nlOwbwtN78kS/OPdIEMJYG9FPdgxrP2xsMI7m/kLBOe2bMBEhZz+Am6SbweSGojpNyTRHaWEjzjOwEjVlOZNO5oAIbF47a1zqxWnHDbGoBKmnCIZCJ5QgYTpyin/RODRgwJScM+CNkAAPPD91IJ1S3jlD0emGwL1BHpDEHARgCeGBf02u6kkJlpuxDwiAoEO7HAiVZWjwliLmOQ0rgkdnLJph5hPE/B8OUaMxH6EezTiPZfssowNbIQcQdOP1fbW0BmdsDHFMp8kbOVKEAEDTmmkJymJwerI/sz1SAy4SOGS9U8YWdJsCR2wsTIHZftIPlaarBFJADdm4BDW2ZimwCqDZ5dUddpQ+jYPgIbYKmcp0HmxiMjJFZtIhy4vsygIZXbHvrjtGKuSyCmCrP6J7UHQuCnSj27HqdubkM57jkzRcLgD0ittdcHu7Q3WHaoPYipnXGGmLenIY00hXdjgQJ2JKwXtKZZJlMgKL7hB86C/43dvvoNuF23uvs+pyrNi0U64NOzK5EkxzUOQY/fErCjAOc2H4E1yDx3UlUoeRaMXQ/Yxt11WpM/lfHo1T8uHeT2Fd1DIApYkXn6NMIZ7mSgh8dTpH/Nihj6pYqXP76bTwF56vx2ljpQg/Kel4lP+jVbbDJufSNBHv9Nv/X0/SsMSG8GLMAUcZ8U5jJN+KUJspp44ZmBEk5V0+pwZJKHiPp9IqlSk8N7a5vTa1l4LblcRZvzXNR48FDkBz+VH2aKLMVc8UppeqSnPu5TH9xJyYVsn9ZXIAKXXj2eSn4PV0P45CP4aGFY+V4zqzcDHHJVuajDHqTCbKcAEZWSYFqqHcPN1wNDbPpUY00k+LOfLmPMoatc272qojcZSirpQGBfjfksbaFU3aSHIMB4tvvALph0loTfwcKXKWayS2FwEo9hCVn5UqdWNj5esczmjJ6U2t4lDUExcTe3ifQZtijgvy2oXAc+9l2Nwf02o4RVG3JT/YnHuf0JEO5hepKtxotGg3GZ8PZRJI/vGcgPsTk5ta8JnHc1ddHYp9QlqmGyVPpF8ggdBwNiWN9FwhoHUMtl5QEJvGh9OWCUo3jHxl4JRmCAsPNCahh9KhAMO9yz5BPSMs+cXvL/O76WJfERJHs8zjNO0KYZ238YsiT9LkEY4SbFzXEIFO/6aeEpHUN8Kw8rhDVED8DrBYfc05ZoLk1FDXJsVvprEDEot0Yu25M54HTDiuKXBygoXguSqGIgAV/hdyJdX5jWUtyzV1+0J8rkZbDnuzsQdZMP2JUJ+YD9QkfBm0MXcumyeZCoU0v1riOH4lJAuAbd/x+vkV0hqWRbCuF6zrgnVdTulE0QLmTO2T0oePi+s53VyfrrhcL7hc7nh9e8Vu96VxMKkJsIhgl1ZW8ceEt2iL0xFdIA1bKq8QKQ64AKKCBQ0/efsD9J7XNcS8+QzHSjph0Og8L/EZfVddPs0eiSiFy7LUqyJJP6UFox13qHNPnqOjSiuyIMYYSIic2lHSwNdylAAX3nkk2+/7uItL2jKV4TGctXHym044JJwm6ddCKT3ef74ep61YQw8A/9JovvCcVT9Hkjx0GqMtNya+h6d1MOS83qPxzBMOIpjgnWrCxN8JnC+hrBC1Zssles2lXfCpQifkzfvaQO1M4uLQnzXglQ7wZYorHyTg5poaPL4EPXGCdcopt96rG4v+5LG6LjykNhLjQqQnQNIRUJof37+TdfXQRjrcbBn5SW0uDL28w5+nTp1ZZg7D2ETOZpz3YyaRugLPqNoQ9H5gQT4BAxiHBLFLohmYOth8xxH2KfJcUxTO2psV+7Fo5U/fByRRj2eVo4hjhcYMt7ZA5z07bPSxtRBDyrGUOOZBTHxRnVgz5/QXrWjeD6Wc7Sa11rkSHqksvLrqePPjr2NlDMTLRI+xAqsWqSZ+PzgrXJmwAJzRmtEAAe9xZZYXTHFh64x8R1KKSU+qefGz+njZUPO+OUXSDYDIqk2+52nktDe3XGP+4iCXKtt8iEZ98/REgbLiAo5G5+/ZpgfVyHkIom0Ayb8xhDQwR33qy1fGNUN17CzlSPzuO9vDpbv1S/djTeM4e6o+Ffi9jHmwkKdGjj9+R9+YewWvjAkZ6czrIQOINlw8zcagFtlpE+P85bKmcxkt5RxXPHYeYgnIGMx5mbdO8Gj55Jx2DGTrgZbYjGYHL1oxIGmWC51FXXg5/u6ivZd+xX/zA29ozGkDaOjadBbcBjG9Yu1kMCQ7UCBPTjK67b2PVbdt7HtbloaXlxcs61pk1KAbxkvD2CudAYvEFI/Ydb2lhLcFT0+Cy/WCfdvw9nrDfdssyCV2RL+O8y5crkrqqLws3HnNj06h+VYE/fM9posCf/Hdn+Njf7G7jt1mcBhnfooWD7SVafqVTlXnulnHrzIRslOgPoNHfvI+Zif8UTaUy8XEV82oIUICGAYAvslBrE46dQ0eZBpuO0bmj+1DH/NKSxM09UwRAZ/Tsx7fnT3N5uT3P/8YP779Nva24X/75v90q+zUfOLnq3HahrEkgeSFT/eZBFBZFSEJ9N7qWSje1M9e7fB8yT8PPV9gOtgj53BMZcrXL8xXGoU4UIaf1nVK+JOW4KAhNxUNyvSdMUuR77kOnStX4Z5aO4My7XaNA0EetxKFq1RwI+5kIoSjimFYVmi0CASjGRIUsSkXwO4GPClrNeXYqZ+MMDnN6AE+bmU8ecqRTASiyoZTiZMenkzxFKL7KsgjyexwjHH2CaTgTt44E035mypybpgwac6GApMQphCmHafzGaKZ1k7w53Mj6WyUI9gJLom7vdKgy5QUduaS/+aYZZJgt/ltBqdkqhHhKMqTEMnoaO4JiBFLGmYH+4x+81O9cnbMiHG4NduKSGy0S8EBjL0FTexKk5i7Ub7skZsUPIJfyACT3AFQ4zKRrMRNxTUqiYAY0ERTDk+O1fEVFb2ur3B6Q5bC1Hvyse/fArTsp5zlQgVoeihwFXvvYEYBtcjtjGAAr+ZWx+OgJBgxRjPpGLvsaYfAymioF9pAyDU3bFg8jUkcHNIo7qLTCoorQYMhnJPzlGnGlfOnO9wBVgl4DVhSlrpRm++Sp3isiTuJ+cv4e8WkmM7p5JwjZZWRqCmY8V8zmeG/Icty3xwACGkVK+6kKwr8x+e4foET2MhOICexOnZSVrdzqJ7WLSggIx2qpJ2kc6jTaSP68ZRRN5yzD9ftTtoe/FRyaFIDk/xXCzRCsYyNfJMstVXTHVAZd8+NIE3Htn3C9fmKp+sFy7JiWRYAuS8s5Zg7PBNBpiCxsWWgs6GjS8MiwHJtWNcV27bhdt+w3Xe7nFvH3XZNSl+OxNgzK4RXJC6XwKmAtyL8m09/hm/0Zayy9dSj/iHuVD3YRJKHDZFW9/qkpmPozGMelHZnmh/XyQl//u5vl0hw8MZtp2mKJRpLthVZHEqvC65mSJilnP9Ni5tM0NDxEg35IUR6aI1kf0Ha8ZktpGu/YNUL/t2v/jV84QNQ9LeGf73/Mf73b/+D0fjjNoGvxmkbw2pxGaor2PG3mEmTvjxPYZtar6GlQHZJc5mgkbNVNrYj5ojp/EW47Nz61CxLXBBBMhdx+1Ib/dL4M43ljLDfBS1eeCSzjjXbbZbGkqk/FdwHkHnrIayO6TAMnJaaMs3HewfUfK8nxueap3QQDhyE1afQ/01xUDpkMSiQND3QOeo92HE3gWZRIYo2djOqQq0Ilx2dxb5H6oNTqtgA70p7WIqUdi3aQ2+1lmZi/FX+wKxgVKAp0GPPAiFnKJFUkmFsTDwl0aLaZbGJp4wYV9wN+sgIZdKlGctRJg06SkIGICU1Mdr2duLPNJfFwcnxquGgtKVOx25oWUn7PDt0ciLDkmyn1DRjmFmBk9t0eMLZ69aAK01e6QXKat1QqJn2IjbvvWda3EivkrTOgmZM7pszO8s/V7aA5oFEkvCn/Wb1ee9UYsiubagR23kfxBw8EMBz18LwzEbV5EMvMDtkcS9SGRPgFxO7PAkMcvPkzTh3nBthDHOmJrphAJ5nN0pDTjC1171audIkpZ9maYNxyA/xXMX6ZLw5Oxth+kl4vsLttOA1m6cqqaVVeTMxfxrtjpNTxYdGcu4xbCEb7GVmLE1CRYSOZ3SZzsGwEw5iEUq4CwmjdU7nJk6zeWxsM2ULxj4b0kT1fcsVvKraSgjc+Dcx4OLGcz8O6a4JLCBip9smp07oCP53dPpx9VAN9NJ1eYM3FGO/l44xNq1ZLEHm/y9z7/OrXZadBz3rnHvv96Oq+kfabtppt93EjhQkIiSQMmESwQwiYABKJIQYIGXKBIEyZ8IIGIEiMghMLIFA8AegzJgQAUIyEBzHxm5sd+xut7uqvu+773v2YrDXs9az9jm3qp0wqFOq777ve87Ze+31e6299t7xwwjcwiZ/bqGz3r/7gNvzHU+vHvHq6REPDw+TpxwShC36+jSKPnJ34ADgfmDKwUyWjDFncDYzDCs+4Cx/C2aGT7mL5M2UfQYNqeig0JkZPjre4smfktf1OSTtWCZooJyqWvXg0U22yK2mKhFN345Dz1JS1CybB1yrFUw+PmGPvGow2zNppHqy5MubjFy5dIYQUavv1Z/Oqo48ciLdNoXL622yBP/SH3GFD8Anx8f40eNPcNhxOvvWAbw9XuNX330fnxwf169mcWzG3EznuE8e+oICPwBfmaDNE1ZDrWfzMPg09jUbdxYkVREV4lVAl7N4yYjChD87iD/LYwBK8Xzp8yJwtjBbMaxcLVir5/i9K+zl3lmuLweQ71vBYkArP7yiALym+KsJArWysX4r19HXx6kMSOH0YM/tlEOuBhcpceQJVcgkkx7USwS5vNVKN7wcYgZSORYxxAyC1NFgcNCJMJqCXg2jzgy2ACQVUZSYZClkbVJTO4fVbFrOOJ+4pcasJqKm7Eu1jWDOHMVLsiS411p34i5tEt3DdiaPIwnUeLdmcopSa6ekcwUQp4yg9/U16gjVow7O6zHw0NIZ036ij3IoCbMFHDZnnSTRVI8Fl4u+41imXr/Gr5ZvEg9Z2mvaFrFVs758hkH+XAuvM5tljAl/9y6DDqM+lzNT77a1PBbte63jSD7wohfbWK/u/HOGiePy0E2NOYSyXla28TO6YR4Q+qh7wXGKExIBKB3/iYL5qa/t8PYniRPP52bUoX+azuDj0tY1O5TUFhwyftpYo52ty7PM8GRwULgtTqcunLCV3jmXPwYhKO9yjwHMBk0udEuuwT7yuc4n8C22jPeYveeMUfRFerh+9Tweg7rRWV8MYK39r/y80KHZurQkMj7BYcDNtYi1+bg+s9QxNB3EH5or3zAdqnG2Ib6BS4LBgTwjTNeD5cPWt+qf7Qq1nXTrHZHfMslGYVqw4xrcGwDu6GjLs9mGyBrIu0HvIbbUD/BQZOXLhiufs1Ef3j/juN+xbTse9nlcwL7vMfu2XrM9sYSJu16uF7+PA8cYc6btw21ZQrFgg7txhl6a6kuOQxB9mno1xmdm+OaHb+Dp+Sk3H4E8rxU/+X7cd8LihjhwLvyu8ggqgD3rmuk/zHHNYI84OtObdBijaKI0bTpB2zHkcvCiJ++qPBRwqc+oK3BxGQDn+lsmMQPa6POiabgZdiAO9FnuAfgnPnwbv/f0Q7zbjxN0r45X+HPvfhkfj48l9ojAMdbRvT5e4Zsfvo4/evjRFdTt+ooEbfMq5bC177xOAcHJsGo76z1RoWdf4HStxmFVku27vXxvbTMdg+WhaqM4hzX3mlFS5bvqXCSTR0ZnHaj0u+hUbWL5ztp2v7zPZk4GoAF19fv1s4WX8lJObkAG4rqFtSMRoM5aGq9O/7XNXNsmz+fC2gb/mdg6c3UeUTlNdFLcHbuVytIpfw3Ycoa51euw1X6pgrxymCx7l5DMGFiqi7MEj/DcKW3LcsbCEWFO5pKu1dCmQ6M6mhDmqx3ZczaqZhdzcxCTQ5MdQiPeu8ANjZQaAFd+Y1BLeZs3xgGYjXDwwhgsOiUzkcD5sKSJKKFv6a2CI3A9PDYrUVoicE99hvZuyp66f1azwnlQ9tKnGTe6kHJQWzlrSOAkSmixWjozlmV5TCKEMzCfIZwUnNUl897X6Ttf05Kp4O1UkiVv/MHVsUp5NaybHpyl6kxLvlG6nBpjJgAAIABJREFUYhI118YpiogbIdq2Wewid9a0LyjQEy4cDotZ49LKfDdoFgRN2Uh5lBLQbNWTR3PTGFD/IHmncX00WrjuAd9aAVN0TuZe6FSGiXCmVm+o8pN8GHks9QOqhdRVfD9KxqL1DLY9dvLj+dXeKF26Tdpp1QjkJyHFdDrLCfWBCo6xXtTPCF0XO9qJLFCuKK9i0YqOaZOQfgRhpP4/neeW8mxRzi19dhCl9H+rFq2StTN2cKm8Uf1MO1szlR2SM05Kh2zJj554Lvpv+45j1JEbKZvN0nvOaj0fBxw3bLbh8XHH06tHAJ4zu9vG0szCb6lzxYrMlrjjuA8c9xvuURJZZ4QSnIWv4HH4+U7rks+z2mugEo8uNtIjONTjP0otnrUZQDae+J/mPLWC2A0d79muGAzbtmUSJO9xZC4jifZnqT2afDpRYmc+oD3S1ZDF0vI8ZYGxQfyzzjw29KT8MHhFsQxhy9+qfYc8c2kvcPnjKzzia8cnAnbo2+PAGFPuH8YD/ty7X8L9zQ0/2X961XJeP3PQZvO02P8ZwA/c/a+Y2T8J4NcAfAvA3wXwb7n7s5m9AvBfAvjnAPwRgL/q7r/1Ze0rk+1xHkYFAoaBDRuORVn1L71UqGPvJMNfYCOHlBGsj4hvcH1dvLOCe9VmOnTxgwsj8VyrUKu9D6/PdBZyBuFL4ICjMarCopeeUUSns2WXCOsLHa5jXtXWom9SH6vRtvXBNNQCMwflgSeptGQpIXvs+OlGoHVFBeR0YjTD282vOguVja/ZsN0M3NnvpFSao4BUJsyIaeaUzkaOrAUb1hSjyRlmzCq9VC6aqjHA6I7o+o6feOclxq9+2Vw3YslKhlwHVDxuOAadw8KlzYgTts9GddOB0eg5zxhKnObuXCxlC+fDEwQCBndgf9iSVjnzDwBGHbUaOD/jwegMoTleLYiLUrk26xcPkDfWGbXUFVIq5CIwDAZXmc72UEF3yl7rh7OGQOe5MDrsrwlgLap3Ol25rX7HiYcTtW3UabSG51wqYXVdQxHsOf3HK21S1FSNk0kU1pM1zbNcdIJOgXG0GcHoxKEFXjRjTRgmjU7rC8dowXffqS8AWAQt4XfJ+m7Fs+GOpL2oIkevYD15P9qMtTbjGHFOpkeuyBa+a/OWJ2SVWrs61qCPo7+q835SqhZMtjqhE2d9/mvq3XFiBUOVisGiDDNZiLohgoLV+BGGVYhEYWTSbTispgfiducZ6uqtHSVQz65VN91OJDPGLK4mLxzFeQuimFSRZScqjqFRZWih76zgt5Nem7qJoRt1EcDtH2JEIl8ur7bSVlgeTE6YU43JrodVfdEMMDYz7HBg2+Jg8NJ5hR/ObQIsQYQDAwPPzwPHcNye79gfH/AYC65s2/Dw8BgByhzlLH0csdkJ4McNx3HH/QDGOHAMxx44rbWa5EuHQTdIItdSNsPGgXp18qTuKMr1/t+4fR3fef/tCQeQJadUSeo/IvnKk76bQ6qzZ0g87vfYMGWbY8Z6sWKmxqRLQCycqqmCvB3bkOMVHvLWsur66ytniJM3YmwJmzoSIqJrYBe8m78tHTaYreycA1UvTFlb/eCOrn6usF7bDtsBs4FxTBo+jgf8hc/+PAYc/y3+uxew8Kebaft3AfwfAL4W3/8jAP+xu/+amf3nAP4dAP9Z/P2xu/+qmf21eO6v/in6Odt3zECuhejyV2dURFpPykkZon12nJWkdv6nuWiUT6bNTuPSTnRofQt8T0NPZrT+aulyvpPjoQFgJn0tbOlNpLKoYUggiXLG5CG+pyU42rZr+2JEv/SiYUpFvd7vDWXpVjiDiqfMAiovtGU5K1JRONVb0WffWZLUtpy1mvBs5yY93jLMnaUs1gLmGCp7CSdwVEBUujNQKLpswrPEtKM2ltBZUuLxTIE0HT6NomX3G/piY+cGdKU9RQmeUOhAZQ9Di3ndpBFKSyNymIZAnBNNyqgfEwhsGTdvjmDOk0U7pNO8txkPqJ2Gdd5jGcj8vm2IzFg/M8ykB5nkqCGFLI4IPlsZieoyJmvyazVS/gyD/oHDa4ZM2D/f7cF7YWIcIR/Co9wsYTCI4KY7DDrcYyYw2ts22Zq/gJi8vZrePjNQn407XsfjvfglbySPAZvtyBkOszSq9W7NtpKdUvf4fCrxhThuoO3cGnKw7uzkotWNsloBAku8Sg/FO2ntZfwx1tygwgqulA11OloQSx1HoApG3qur7KDqj3S0YiFC0jfGyAq/XHOZ/HfWGzXbPducKBOGbCPvbeSOlm6hZ4DZ+dyJMkedDBMUDgc6dY4YsDVrn88mwDWPobDl5iYUINFrxPM5KKrsu9U/Mm50egROSg+fLy0f4zt0/Mey6YPk/uU3wQH12SJ8tBtGWMh6Xu9XOWwFZDnboLo+5Wqenlczl5UUSsiaHJBfLHG8zs7GEMAAoAbgortmEHXQJm8lzVNPs2zZoh3O3RjyhLZInhw2gPuk/74bdhhux4HdHfARm304jmPgdrtVkOMDmwPbw4YHmzDsm+G4T/0wz/TilvQODC4vKLty4ptigEkD0obY8w0+uAFd2G3b8uw2YthQsl92eMOBOUuH4dnmFgkN2zfg4RG62QqynaLLtAkj9V7NAhdNS3QrjUTqGIpXabev6K++sa94usQZ9Vwq5+w5RZvPKB8uLcC9zUoTp/XXWxX1J+MjvN/fv/yCijU8ph/7TN5uG66KdPX6ku2dogOzXwTwLwP4L+K7AfgXAPw38cjfBvCvxed/Nb4j7v+L9lJqvw3hTCyP353/acAwX6v/IYbnxFwcR3/16urR+YWR8t7m/FGYOEWGRvIa3C/DSDYcsGw5Q1D9JTx8cuHl9n/8eIZqeQdn3Jgaw8X+ps4FZOz9XVy8SkEvpU1Tav1wxiSIdog0NBsNiDj8OR50Os3n+WXBhND9fHFkYVhKZKch9eo+ecDr/zGOmRFL/p3GY5Md4yoKWnpuPK8alGqv0yDh9WkYuFFA40ofrd35vfaLt61oQwf+pCgHDYVk4q3/nzA1OV2EMJ9pmhmc0Tz5oF5tkKeXVrNhhYul97Uua7nyUZklWunhFyXHAtxwT8M+uK7cyBNSbmOEj+M7Z/ta7X8aRb1X+oAOF2GEz/KebduShvUe+2Twp+8GrWHtN2CW8GTAxjYZPCw6uDSSZ1/6UHKzif7ymWgYrljfksensa7ynSpvDLjZJs6zFSmtoiQrDrTFElfv6a87SrNRLoCciWIDG183Ga8bj+nr4zc9IqErSNvq2bM2ZiIhHKRN4MT5Ks1hoW9mf+7Sb8pLlEBb8Q7SCUwrHOs6mIbRmbjoySIYN1r2dLdmO9wtkDAY+WYU0CIzi6LAvu+1mYMhdauOFm6yMyDhH9WH6HMPplP9XHqZ32TtaPC+QXalXc2Jc1aVfOC1EQjxEH8Hg+JEY7VZsk0mZ1LPO1ZsTQdVU6pL16fqmwmGy64aSm8UdrsTwDdVdomqjTIVBjIm1UsXG+1PoXHlZAfSllXAW8HGnDnbUkGp3WdLm21za33Bvp5Htj9seHp6jVevX+P161d4enqa57tthnHc8O7dB3z22ed49+4dbrdb8afZLAzZLBOQavfm2qXJn9u2T/natpn0Cp1aEnJiI8E1ebjbcID6SZMv5G3SxIUfi0APDw94eHzA9rDDHnbgYcf2+IBtfwDl8mTUCV/QbxwjZ/t4c5XYhZLJb1d31W9afaL0RzY77bAMTMmuN87acCSu2EeuUBMMdxydzL0YzLWHX333fXzn+efbb+Ybvv38Z04+BzWBbTZ9wX3yBHnji66fdabtPwHw7wP4JL5/C8Afu/s9vv8ugO/G5+8C+J05Pr+b2U/i+T/8WToa2LDlLlGVoX3JVYp+xKFxYe/OHhq8qY9Y8cEFYq8sYeu7PS0wiwHh2L6kLSo2TsNz5G3NBttWwNHHuK71W0vx9BK3L//wUe6wo/lvx0uR/pWjZGV10tDp0+yrspgVLJdwaPaOkkV9QgO24sbB7En3ZqbP153kpLNZtG15kK0JLbrxRA/gvBwZRaRL9zSA/TlCbTnW7jEb2k/yfBn09bICIXA/cUuZkDU4IlyJ3y04eAgfOyKrxBmjVYY6DwWam0NQo5qftnVcjTkEa/JQBvpew+SkSBo2rtWSA2ybdBou6FTw0xWLObX87AJDp125PBtx58Ef+mykS9fflb6nEkiDtC6o0uckwRExDY6DjjHXZ3Q8n527akNho8FnW4nnxtee/87fhbu9jy+TFsbAoa5rUziVtXt3siaOC39AbBTDsQYuqsSr8NTwTZjj4YxrHO38rbl+luNzidXm2LdtC+csWw1c8pEz0hNfmfGZThwPdE7ZlFdLFJZZ0JDduSZS7UPpUA5cZ1cIrYRUmSAoHSl4Fp2cPOIDW6yvs4BnJnWYdYeUCUUQmImROt6kSsoXHGV/7ZdMiGTiiLSRcVxdqcdX3SN83nk7eChthKtyQ27AlaWRJiW7HvfLNmkZXOI1Hs+kUPACP1PHarJtXaagWDOgDslme43gJQA1NiScaYvVNnD4EB0mBxrT3miQl0eHQXhByOL5DmSr41SG8lzodgfMtpp5ChhnVX3paJf3Jror8VhsHTzjs/rg5jfc7wf2fcrzvu8wAB+en3G7Hbjf7zAA+8MOs1kGPnwGIWOMuXunV1ogkwmbwfYHlNFaeXmGXEM4X3k3yRY42Mcj/uz776QNd4syVfLYxVXJM2RPkyw8xshh274c7n7ZEqi/bdv02O9FnLy90e+TGQ3pW0bwS0viAcfJ7n3Zt0VH9dgAye9VLcZ6gwlDqdyyRiYoIQdVhW6Xo+9/+B6+fv86/uHTH+JHj3+MP//u+/jW8zfjUTU6c+3OTK7ucaafzky/fH1p0GZmfwXAD93975rZX/7yJn+2y8z+OoC/DgBvX300yWeGTVIklo7z9UXjlIczXkTf0ZQY4CKJ6GngBWbH6b4qkjaibO/lll66Y0vT2pcXY8ctmrorh5c8q8o86/kdbdwlxyJg6yAc0K0wqbSXbutzGjUtoTC0nR29C/VqrCubJspgQV06YzKmaKxgDedktPel7RgsX2nLEYwzI4KMhb2YwaQRY7tN4QZBtlgzMv2zznk6i6IobOO6uFrZIxR/qAxBaTJJXDmqrKz5Hul0uQDAXmxYa7KMMFuog3vb+kYPV2phltVp11kuy7Ivh2jzjjPUBuV07JloUCM15UWxc41Vk5lPYtQUOYLndBgF+i39gnqWDiJlow45Luy3d9QIiNOVjqbCxJZ5qK0MjU53c2x95nQvKv/ynQo2C1UV5IQjbux5Bl7l6HdDXUdIrPc0371y4ezMNsMxRpr1enNIGbLSiiicTown0iw3g6BDzNnnibr6fO2rFM8rHKpOKFuieiZ+2owZgryecjNJpoIPsAyvBfESjCufZAmntDGGZrIdCqjLKNTuqfYwfbvp3JDJhqii3WjOOmebD8DmDF/aII7p5JAtiSxlwP7Y3Fc69Af16WRxkyFbfymDjEWfh7LgrA8kYcyzo2BT/Dfb6uBrlTX+54BZlZymfcpt7XHuO38qXqZzqQGpoZIU2q/+5c2ipOog1SErVqdMV9m/tO3FGdvSL2mWXCB84WjoTXs2Ui49B8r1U/3twnNixq0fk0L7EBqFScW8RduUwlnjL16c37hODQBu0da2kX7Tfu/7JricCRofmDPHY+qlO3dytA1Pe82g7eYABmzbwbJRExwNgYU6sLCYWnQG9mPDm+fXE+bUsQ3jy1ibIMtzvDdnv4cbtod9eV/48vR2BBzeZ33XqwdOldyjHUmKxZmWamsJgfado0i+KtnOFRGGhOvqqsTUKP0auxC5O8oTYFu0mRE7MEHAf+LhzTd8y7+Brx+f4P7+wKvxWBo3/oxY+2osk/BZeaXp/C+6fpaZtn8ewL9iZv8SgNeYa9r+UwDfMLOHmG37RQA/iOd/AOB7AH7XzB4AfB1zQ5J2ufvfBPA3AeBbX/uWx2+CVH32/Fs9WGrKfX1mbY8lQYux1+as+uvwvmTUeRMN4QWzCtN12+VccRwuv8tGCEH3y9g0nAE9/mgo18l4m7h6ZZD5aKqKZmc8f0jDgo7DVEJ8rtnN1P4nHBWIMgMkhLDFWKnREr2d42GgNGjI+U4a7EXwCiKkhKoSMa6VCkSJMVWq6/a07cqsNM3hWckJ54AlYBwjnOGj1qu7yIs8q5bywunxOF5AwXSngRInQJInaT9tZJPJUa7lhBqor6goxVa2XYTKHSuZaTD7FfRr69XmInAadXatzoXOTFaXbdNz0HmZ9yKbaHO74vpeCn91atffzKpd/lv3KeMrfXT2tpBR+os7QlrqjTEsd2Vj0wWnzPoRmiD1usGQosLbb5SDEiNdu5dyS2dEdNAUe+2nZEAdknJU4pfIYJPPaOwNXH+oVKsxtLOoDLJ9f7m4Na/vyJ16ufB+MHNfssVt2isw1V05E/QzArPXUqTJE+QVM9SuqlFKKc5G6gKrpvn7ySaJLUynqLHXKpQFJxuqbrrdMsFawq90JUzxbeKrynNzRBpB2JTBok8Ba8lPA5xyKlmxJp/kGziDOS9eyoPuL7Ag6wFzbBFMzmdkM4YYL2e/POjVggTQXgv+wwHdtq1t/z4vmaVCzaK50ISbkFUSaln7Y4nKaacKXOEja7Savwgdx5hlWYmnPjuRVoGyTdhAW1FjSmy4x5p0b3gug2Lrn5JlsyhZFR1plcwwGTO12mk5i5ROT3Y4wkkuOHRjljYOi319nHp2S53GdzNJsBkOp+5wxL7wgA/cMflzsy2n6vN8sGVDftJ6tj0xUcevlO4eDhzPN4z70RMliStd0SdwNssSHJAiGTPDo47YuPLvVNeSzJloDTm99pG1yie+z47lt+Sy/mbarRjnRfs6qi3GmvtAZILIz+8ZwE2P3CGzlepnUc4oE37i2TmUFHjADcMGhh14tw+8vb+ulow7h8uoEwXnmcWr60uDNnf/GwD+xhyk/WUA/567/5tm9l8D+Ncxd5D8twH89/HK/xDf/6e4/z/69Rz+z3ytyZIUcDJWEIVOAuJvOg7trZf7OJeFAKqs1nstwBP+Ez+0X84+xNgJz9a4bIGBRorjQLfjXkqaTW/1mLxb7V2OwZa7NGBWD1L/luHr75bhRs2W0CF3NKbUoLD0YeGmoy+9nHpZhtOVSynxmrUjkLba8A6Q/G14WbVY4mKhhzqeIO63/EVdE7qusuS+OWwm/678q4YdzYHrSjCVoTBvvWqy86Cl05uzoVROphBDZiStYkPeYMAetGo86uJwCAHS4PAZ3faeuF3kpmS6Ltt64LJZwQl4bKOt7oE6omXWLNpKvCqDOsp5UFh8cTrZ72LpmqG8MkCpxMLQhlZnJYI3YzvHukVJXMLcRENwQpuStmzVAz2wA0FJpBefVom0ox0SjDkjEEVDEwI67lcKYx18IDbXA02hKtozuFjZPOAw88zGDylvnGOhk0xn2MAylXJ8AwxuxhF49jFyh8U0rnxp3dRowff6eSbXSi/pGgY6WRmA5jPUM+KIKW1MKa2yLtzNQCe/Cx0YlIpsEFrnNnMB39x8IEZCHWhoB1MbIEksCXrlPnljjmdL3k91osSwktf5njin2TdHO5F9LjVOLyF5NUnjBtc1ddJasdjqUgZdZKf5RH0NIgNLCzqxkzWrXrMQDiSdJv7LhBmUSkLBHFfxRiUZyuallJavwONEintKDxT28lPrO3lG7H4HajnyzgQHQLUZMhuBKwPwCiT5eG3oNHWhSzMFQVJYzt5zeG7qowGYfjI4dttK1uPZa7d/8u3+kMptOtQ+133mWjdTeWLfe7Spx/lMCDa3XJ/Fe+SBcR+4HSOfnYd1z/JO27esJqgqFk/5UpyfJwY22DYiuN3RA21itX+u80DJZyNsgSZTSt4K28ExjqLfCa8rvoPrvHhQqUI2J7tUzFCz5SddLEPcXuCHMPiXV9nG0GOhPz5sz/jNN/8PfvzwE+zY8Cuf/zLejNf4+PhowWD/1y9guLr+cc5p+w8A/JqZ/YcA/hcAfyt+/1sA/isz+w0APwLw1/5RGs9MSu7SVQbH9Rmby9XdeUSxbPkLIa4BY+DyUqdC68Xr/uq8ePKP/p6MmUqHRl44CmTyviDzikzp6FrBeAk/QizC6FXssTJ5593sR2Bff1ND85K75SgnjgEkh579BbFa3xng0Emvue2VDuW8Ln1rZGX1fHdqlEhqTfg8lYiULulDGXGuirdUUZpSxdUXyF45sNXfhUugUNQH60qtyQNh4B/h6XNGgrN+XLRfqnB23QNIDRgMqPUzVpRRxzydF4hDRCNPLyffv8AXA2O3Foiss2w923jFzfN6eRMRJB4muSfNyQ+GMg8cXkdjzOrGDY0Lyd/aU+kG5IMVKIjQ5F/vOKtuKzkjCQeTfjS4TH7XTWcENDpgpEXKBGWqaQB+K3g94SiuOQdr2ma4SY5lvZrBGoIpXTW74sPz3J9EY7g62Tt3Y02wba61Iq0h74Y8O53C6COVIFnVRB3kltbKdzJO8cc0QeVbczdawoXzidqciXBU7FpylCjnjLwj1lRZtju4S1wESaR91+mN6ZAaSeWLMqZO4bLNPUiFBCTaCoe8bAp1taN0qMLDYXGW3Be+oC6QTRhAnNWxAelL+No6qLXzm2FuA19yZ4m33jGVFmf9HVeObtpfcU4pY0y20GYmZBKgXjm1yYfifBC8ocmy1NVqRxjY8YHkwlM/s71zX/U8GUjTXk0CTjq9S4gMhl9jZ0WT5NMGmamxbnvLT9RWywZkgkR4pJ4o/i4dOTcXe7B9GXPXuxVy68ZpDh/AQbnMjaBUf2riQpMHojetnq3uN/jhOO4Hfun9LwEWqV6nXCBm81SWi67dHxLZTd0sMu8O2AxwX1oX2jES8GGWmNq2F9+v/mf6Chq4vdhFrtfLNvyqxLbUiSc2aZcsX6YtMmlrMakN62qhxUR8wbXhZjf8/Te/jT9++BMAwLCB//ujf4C39zf45P4xDIbvv/9FWa5Rffzk4U/wo4cff1EHAP6UQZu7/x0Afyc+/yaAv3TxzHsA/8afpt0vvobQxlrd8uywPvbyozNDnIIv9GdPiqllI+TG0s4Xt1skT8ODZp7ZpDgAyEyJOjNlsDvcemUmNW2vnZlPfmj1xm2IXZE1pVWjkfeqUZMHu4I+fYhvHmOdgjbL0agHJYPXFI8OvgRT+1q3fCdAqfTtTLuZjZO2xGFCe470lOw9DW0q36RcgCsUNsDafIUXgU47vSyKL8dLTUT8FG11fUmbbWm09MQ5lA+MFD4HhsVbVv4bSCvDvs3F2c1R9mitlRZVgwZtHPF8KVbQICW9LN9LufXYkh+xfi0I29wSRzNCjjq4mzgdPsJYAbUOs4wu50ZrDHVY7ohtw53GzskXlRRICopsZHAN5PJR22S2JcY8YoyqlxJHpIHit5O6nrFtzkIJIbbN8rDt0jfVUAVjDnJV9yvoRF1lxgrea5llQFYz0rWToIl/vzW5TtMf5T1M7LRS5uiT8NvOw78HINyh4y0nlTqYC+6jvQHkdvlJuMAcHQOzbEM/g+dOidfgEbVuVvZNZ2EyX928h1La+ZMJvry6mPSrMZZuN22wXUoqbps+/+qB61P/VAsrsxFUznjM8Y8xd+vrvSHLvVro5tRfI/GSyUgzrMpy0mBLFOnsUfkDhPisP4UsUS7lot9QD8rVSx/r9wqe6121AT1cvJh5A2oNXQ1o4opbree9mGWe9bVnCxs8yaNKSk5Vt0U7wFwHWqyd41K7r3NrK/T5VPBA6upAX2oSqqEokzWbNsQlw56JwWQvL/URDV7Dyo7LFiY+BnDkM1OxeuDbfGDYlqdQTH4fqZsZtLkz8BbdC8z1YWNgHMeEIxiLZXEezDTGMa3HwwP2bUPjK9/KfwieOe533I+Bbx8/h8ryMWFoMB/AMDzse54tt7nV7KXSO5BT/imv2HiLfK+0bNTF6Rf3DcCBcRxiP8tm0G5mgiqqBtSvLb6o/lttknd4chsRl/c9+BeLCbxWddGOfDD9Xi/R1ly/OX8/7MiATXXoZ/vn+Gz/HIDh0/0zwIBf+PBt/Nztz8T9d/iNt7+FD/Z8DaBc/zgzbf+/X+pcpvPJfyK9WZkBIlL+pQ4WDrv67Xy5/KXzocYc58xvy+yss2CWwj3HUhK9mLX8lhAIA/N3VZNNE8a9wUxLYy79fj3kNGQNdFPboCOqoEkVeBuQN+U1z0CCEPF86evlZJSyaG86q41NhIe4rlmyqzWLxrEByUOpuM6okT7VGMgdZgITX1dYlraDHuqQqJOdAd+SdUTUmgcBGl+o9tKxqOJbr4rzODCP9SNaNhWqsk1/eI2HiQezLO3yzbBHffjmU1nbFxw4ctwP+DFKIdNhVt5WJRAD9thVpmZqOHjH8CibMs6YlCNQjoXM4ooB4+YWOvNaDkrnRHdPYw1ZsjwNZeHrnFuy/r0lJLSLmu3JzORyKLp7nDGmmUa2yV0VUfRmyalT6BO/8/M4Jr25M6jitV8lQ30mst4pw+ryVgXHc3ZwOmUVflnDl8n343DYVruN9gRE/zjRZ3LAeuGrXWprQ/nQwZ6lppvc5sxbtWe6KUU8OfmLayTSOwFLtCz5YyJow4RT02rpSxMWVElX6caSAyFJwDAy2XAqLTGSNGa9mNgoI9vtjQQFFbys/AYJSkWnKPbYBxvbpDyLtDHSfJzgmd+2lrAlnGobeXYXtyDvZacX+jkWgHOmTx/ZFvp3nZFKNMY2/5+ipRZN9WeXJGEDTN3l+U5f99mv9GUkGUs+0XNCkXBrv/rF0vinrvPokwnIiIRyfRD1VREzWnIZqvByimkzUu2aAa/Xa5SvrfRqwpq2X5qjjGSSEqmTfOGh5B2liSMCtSNjpDEcN9zxwG3Yk+e8Zt85ttQJBGZgM2B/eAgWEfsWeKRsOGYNKveUAAAgAElEQVQyyA6dmEjMFC4A+HHHuD1j3/bcIKWOtXB4BH1jOA4beHjY09aVHVNbdiEP8Ttna/mOo9aqlf9dzwuUoRdHJE8XmxV/j7Rb3W8ePHoIwEy80Be4gDVtU9E9VE91ZiIvSJENHl4w0fQ1f/eVLRGLLK6Hv0hsg1qe++n+KQDg09efYR87Xvtr/O8f/Z8Y9kIp4HJ9pYK29WqOOEr405ejEV+E8Kqd5ReoA6bR/xfCYp3hqRjOa8ZoyGqxg65VagpXFOmESlglwZSSK5QhN0OrF+cQRqyF0HOVfOGm5CEr9Ua4ct2aWcO5Zyf1rkEUB38vFJVBUE3r6M8mXOxLnhc0NPeQgMp7cxxbwqrT8FOA5d0aRSStxHD4+gztm4lzEW06+VONOpvx9tlao5Uhm+2P7JrGkWsNOJejZtLi1w6qQx3WPoRYGyGGlBBTgTIoTadbgzmhm20b3rx5hddPj9j3PY14oTeYb3FYLMZ8v9/w+efvcBueGVWTfyuonoZ7klq0L1AbTnA9xhLQpIwK3emY4IJWWxi8LL9z0uGKJ2dg1PgmdZIE1mItOp8Hb0RZmeqz5PTF1vdEQ9FtBpD1btN0ZhHoiAyThkb4675zhptr5NIxRTpGlfmfu6kdBzP+3SlY9ctsh2PeUmcWihwSqcTZZqTxbLdE1PLso3TM2RAPS6vsRI49HWMvOCEJCiYzdDOJZBkRKJ43X+MGYC5HZLiigkINH0cGVC68RcXNM5XqaAHRWUV2QLLIKF8j5bVlz/NBK34mbOLEGvTgZpP21hlqZsiL0yhXOlHaxqnyB8/zKR26yW21n+WG6jKFTcrkiuCw7EIUYVv1f74If6wTq31OEmWz+oAKPniIMsvnpFxPtYlaQdpP6oMGhcoiu1da6itGGpGUlWDTPxr8w6x8BEkO5cyjsh76M9kZDFscbTDGxIW75xpsjoFuLOlzRrmdfiLVhg6aY8ggSZSg1QhzNroGlTqDNKjDtxmUGwad7QwMQgGKKsh1z+64H44dTOaxrXhY2Gun7nDAHqf/sds8LuDwgfvhcGfwJ2Pdqkiu1PrZ8XB3HEf4C5vhT/ZP8fXjaxMP3JWJaPSBcR8YpmctMpGnbesUcFNU7blJHr7fFu9evM9xIcrQDZXA6QWBnjRVvp9J19ShvLfatezR+vfUa+UX9HHVs77cmfIm4yMPmr7TcThBVkl07L7h7fEGn+/vFoBXugIHBn797d8TeT7LydX1lQnalIgAF5EjlTWg4+5O0LqDzs/Y44t37Avv1s2+zmZ5Px1eyTiBzCUlF8HY1aGMTYIMloCxITfts2ZHTnBah+1qnGS+pkBUkVovmcpAeojjASw0uWbBdBApqD2Fg8qwdxMIflMHOMcm5ipLp9qc1syc+rzf14N5IUiDFNFfaSji93xn0GiX9WVWNkmrCvBFrvL+jOvzduLvBnNgqgLz6vvc3VS8FDXiIQOQeGJbG2Bgb8C+73j79jXevH6Nh70sF7ce1kx850/HMRwfPjzj3fv3uN+OFzhHFfKESP1vrtW5wk1vxeXw4o7paq8CaQNk9oxYXg0d4au1KPwOsFyGKNuwZ2m/lwMHzDU6pQD6qIm3IJKu0Uy44+/8zPELdFeLu+mU8Z9goTKP84NlWyY7ndV7zay7y7bYdaO2Bled7AmDboFucFlzEjNAHmVe4vRSG+jINMZom+U058gbb1OPmmyak7PF7m02LfG67ACsnTuVkIhK/pbP5j/kaMiX+Ctyy7FIgCxNyCyb6BY1Mtp4I26VJHXd4LluTC87HbVROlodLm5U0pzoOpit9dPWPOZOn/P7nHERrXCRvJuvyS4xK8zxe7Y5sxLt95JdHVy0J7uGzvEF/0QQ1ARA+Sy3NC2YWwmiMGHZL09ZSzk6jbfL6pYKoB5qozDDLEO7ytpbJmJ0DQDtWyskyY+x2UXwpc7il80+X375TXXV2RpemiztI/6xgF9FrJ0X5o4jdovcIgkHmxt8wOesFGfCfdNFCvO8Lo7QfWAcPhOTC7+RDPu24eFhw+PD9C+2fYftM1jabPL0h/fPeL7d++Hjy99slGNWZJhN/tt32Ab8/be/hX/2p38RucdD6K9ZcTWDo+N+ALtUDCwqKWlvqqMXesS9CqrWREglOMsWhYxHGf845sz/psef6KDJymK/aFFpnbrOX6uXLtBXRgypmOgfiQin+Hr1K0MrW3kCul+isfDkj/jeh1/A33v7m1cgJlyeY7NZ1gog8XuR5NDrKxO08UpHaAHcQrNwyMMdl/t/Xrz7cjBXDo6u2SgYFCZ1OgqeWkza30lmlGzv/LlZagWl3MdwXEpRqoN4vlyYK9uI4XBXrWmwW3fSlrquAeGSFZr90DHw9CeACye/jc7ab9lj4lYypxdjW6+5W5Jk6dZ+WuZjVgjpbIMH/AmgFU+1d8UutnU1zeHxbE/lO0ejuBdMOArjvjgqORQnny1KCwxHjUzYHNa8vPN9DlfOxNHZO2/PlUkxUYD2sOGjj97gzetXc2bKfa4/8PMmHypy7sDtuOOzT9/h3YdbO0vqcqGzI+WGZ2y1UjR6GagxpqEFnwfWZLvuJNaJad3RTgcJDRueztWoyjOzNFhbOyJCe5Z1D+7Zfjn6MpbgNZ34rRdE93DECfaqt0yeZ7nofKAcxOq/Eg3npFHDcTybPM/3MWmSRl7OOCpoBCMZiCwJuOBraqIay6Ihwomo+2I6r+QhnjGYJJlL5nU8XZk3RNWGHqlcrT2XTXI2jg1a4ClmM9chlcTJv5F1Nqu2vMGDdq+UDJlCSnkMAjf77YbuvGkAS7TCwSCfKcoFlrY5ifuS7Ra9yjZNpUtgT7ojg4w2E1TaA/2y5W+tb80ZJh5iy7aBRo8sRQXSN8iYPIM/k25EMJfRalKoEjEhPzq+xYfo6KASG320ghNF/Zx53xqJe0JD2g8dPvuyhGkkH/SZ/Wl5rjZ6K3ii4TaeTBIsJd7VSPSXQXokocM+9568+ETlYMVe+FBjE7Hgs2Y1Gt8rwDXDphuWhDyPODyes7fJZTZxxLPZDMDTtmEbjtvtGdh32JiHctc5cPH21mtnFA/rT0wED59r1HIgi0Q4yN9R2gqPEsVtlu4Pj4QH2rv9uyV+NeE2P2/X9GvX4jtwcxI5/oPjok+1+ttJ8TY7WDqIQbiOIUxSbwAy8zsqmb/65HzFob2VPNjy0BkD5Uc82w0/ePp9cMfbNifuXN4zB2spm1+G0359NYK2xOuilkWpcmyGl3eBZGM90xq/Ujmd8KN9dOekGExK1xTmdkBxdz1wmdGs/l4i1Opo16Hg8x3XMeSYSkla8pqF2fAaixe/ZgafgWi018Rk2U53GWH2p6Zen1l/t2JZgd/SMb8yWxo0ZPuuRmq+tYUyrpkuS6cmhZ1wi/6vrKWlgWswikOzJGE7nNGgGjEAESSJ0yMotY2wXRgzq7Hm97TCaDdJu/Sm2otdGSn47QkPs1znPdb/cDzsOz756C1ev3qsdVT5b80k6TUzno7n5xs+/exz3J5vAGpjg3w/ZAkI/rWNGIsZFw0Qei0TUQLBzXSaNdvsGVhQPjiPlkYAcqiuIY235T/TIB6HZfnUxJeV7vACY9U3xKWo8OQ3Psezv3RdGYNVh+42J+U46bB4wq7XmqhxQIzopZ9Qch0MuxRjFRwwkf849BhIXshqA+onm6VViC2qYZMXuGZNEyNnl2IpL26w672QXzKYUfNQT8RYpMQvN8kgklIXAHPWwpNfGia7lZdn5r9bHsarMy46MoNtXg4H+x5SajgULpfNEMhYDUmt7VJC9bloqRtneH9f7Z5jWV/EwRKu0OFjNF6rdgP3GnRuSpEF/qQZTvfMEDOCJVR9LRM18JmxnTKpwWQqj8CMKe/ULBht3zQXrkPrcLMjFLZzzGpzoGbH9bX+kFyVxhBet6mLLODiRi6rTHtCI0mtuLeh428mwzmTWXQYdDil7c2AY9TAMllDmEVHMSGlgeDsRmyfTHNXwo2YJ02Vb0rf5Vo36+9N2d3meV9hW+qROcvmUcpflSfU3ZRNSxtTpfZBjeCrwbPZjnvMhgLALfTt7EvP3J0sce3IKo61HNjGgB8D/9T7v5g4T76UoY+wm8MduA/YHpMTFjzQ/OPCZV/m0/UUWy7no9O5X5Y0N7OYZSveTRR4vWsLeZvdsjnS8tsG4DWDmCCKjKWtT1Vggtsq/V99y5JetSf626JXasRwzI1IPt0/wwm/0S9hsVXPteYvjLJcX42gDWel1eRvuW9itDJji9rudml5+X5hEKDEpb3Rw5kbJC1IKnipWPhMtZtgpEPg/TfVQWd70yGvf7qwiNKp562B3szuhVFbn5l8xcy6gL8I1iIv59/yWW+sXKsRLPwZdVC0sYnX3HDFvd3iO2mcBDElnBDcqROmCKfRDkcEuA7ULgwsFVTijE68Z+is3RerRPe1YBhlixo/liJqXdMgLuOu6KRw3ktGC1/F91UeRojNDI8Pj/ja197i6fGpcJbN96x4OcIzq/f84RmfvvuA59sNJhA02yo8neuewLIWkoNBlp6lJOMkLI6s/QfohMz2tk2n3jr/9/WF874BVcooUDtE73gZboDJAwgeSlanGBWNnEeZHAYmVrqMNhBPMst+PPblbvIqtE3HacV9Bvqtl/ifUcPklLkzJs/SQvAjmRI59iIGWoDNx8qpmxHqoVvwS+mVmePIZVaOWbjkAZmnvkPrIeQ2+TmQTh7y2OFznQUrgFELeis4Vs+onGfpX3gyZ1GEB5KzrMoTy5ld9GvAobNnSSIqDCx6j7qHwTKzyrreLzwYb2NeUEDsSsLBc3yW7Vis30tiMXha1M+c7Q6Zz/0caqZx3pcgItd4Bo6d27RU2zo/wTWAFbhRb7vI49ZpxXuZdBD9wfPnZAMmC56mfvaFHrpxkV6arMlSaB/iJ1BmiyaaLCsb4tJe10+l0WMDqA4B4F4zae5wDGDbaybNi2RkrVmuXIA4tkiy9YANmOusps8yZ1H8qAfIOxM9JjPPTF4SQZyl7+NMGT6VeheNy0guinMx1uadR7qdhKxLr7fNbG5dD+AYA8dxx2aPeNDZ45TH7v8dqfNdjy4NWaj3NxgSZRo8iNPBdMm2b4A/AAa8ev/UxiqLBWRMxR0e5UZ7BFA+aoY1R0Db5LLMQfU5n2lwSgUJysbV/cTmHIWPqNDaTnReXXdtpz4T16WQJw9fr18t2Ejf6szDpqSdzDbz7Q5Q8BFEXntN8YTnf/vk16WZUHJO6yT+z1WV4MkWX19fmaCtX3ZigJeuLBG4wEFr0VaiXD1D5qWyuW5n9XWaM2zyaygVKtmN502pQleFL8qY99lW3VHjw1tq1MQ1W4QuM1/S3fJIe1av8FmwvYBChYul/Tqz1kQgANlO/QYtdfc7vswnFkdIFW03pgV3v+9pgCoe70rfWPPuQHp7thoX0qg7cFXuwrEXsyi+/UJpUr8lWjIjXLMWmo06DTAckqSxFZ74zoryMtYdm3RQtscHfPT2LV49Pi5O8g6zIXJaMI4xcDsOvH//Hp+/f8a4H9058BrCJsaUuO2zYCKLjtp4pLi8oYHf+kyXykDx1iQN2+l44gL5CYf0lXQnzNV8JhRc+eOE7cIxM69JO6tYO52A4i9Nfpx1Y/Co8EqqhuVAZs1jNIiSFlaJhCDAZnPm0p31RmHgrTb44AxqtTqy3wyIc7OTyvF7GMCqrovZZwDMcvvGwG6UfhHFn/JKXlH5jxfaFtMcw6iAo+lwF0fVe5VB6RniXGaoB2EnIQMjhmn0RcxYyjY/S+mUEqW815XgQceayeT7aoOSw5WH2UHAUuOO7LVXwGArTMGPaH3UAxy3x86trawxO3JMx212PriHKPlSaEIVnet1jDIR1S9xDgYTGOVrCneJPJZcdHwaEPpW+GnVMkLSdQ0t3/c4SoPHfpACJvpkjrnwobrvJRfl9HNkHoZjbhaSCPSmONMttSlL1qLtpV2fPMDlBNwqPrfGly4KIYWFfbOcO6IGkvQq8qw1AGt+tmiILm/UpxsXomjryN9sK12Z8q96B5BKFUu+Lx9gscMyRoPh8AG/32B4yA3CAPW2Fh3C+yb3l4sllWnkEv7ObxYw2+MD4HPHTWOibpsDWY+pS71Hnj0Gxr7Vbt7dFE0SBI4cReNNxlD2qBIMTDx90dUDNK9y09MzCf51OwpsfK5nLwYk45i/Eq9iILA1Hl0pVa9vi6w4eJg4mNSA4Ru3r+NHDz+S9gn1bLcOCPqC6yUExPWVDNquSq2KSc73gNU5Ov9ujVDtiXguHKZ06rw3cIIRLQBag7VyZOv30nHW70sgkEpIU3UoOtKR0r7PsNWdpvz6L/2d+KeNCUBzWNEKfZqdMVnPs+XNwl0zlM3gCTxmkc2odpWBWyaffS1j8OWRl9o6K4AyBKdrocX8WBk7ne1degSNVdL74llDOX1dEa3P9XEUWFPTHrGjHmDz3BcdZ9K/y4GoN1Fqs/Gnxx0fvX2D16+e9C4AYLORu8J5ZJANc93U8+2Oz9+9w/PzXc7bCegdcqZWlOrBAPNWCqKy02YRGJCsBycTbN1MxNSYrlit/GRhoFyZVn8vd0/Oi9X3WoOK0+XJmC4yzDINpY1AaFo94NmOJSqrzEKdKV1bUv65OksFA+V2Suvsq8oBZf1NwF94odM06TccOJx6FmhEQOG/5M4T7jnTSH6OtXfGXU05YCFSeEe54FyFwvX54BvBXRJQcSQ63zk2mRk4eznSxWqnxKFL/zBgTv71eo5DyMy2y496uXSausfLPlB3rjupNJwswX7yRkhvBD0OyOwQh+NkSMG36AsAbc1PrIczZ3Ix+kdxThqdgJGBjualEyUSnJvINaxm5vq4i2bG8nvraM03FnvKdW8AwuCN8h+ir+JqKxomS5VD6gHzfKh0ytW6Sy1b8wy8ZuNzMxCxWl4zDldl6YLZwoghZ5WiiYIh26vRZdAptrwWUBhJXE2kjNcYFLJavhDjl92Mq6zTq0H9k2WYJa81YOKLvHm27mzKtC2za67pXcM2w4659u24H/B97zuMIvSgyOfVzOTVxUSUfCv6BIydYI4fvP49fPfz76Re0QmJk9ogTodjYNrsuUHX9WY1rgBAj2SJdniod1oMO2PbOh4h980MG8akfcIutjBHXvrghDNBx/nul1/etp7hWJR2HENPZWsLlOd5FZZ+9fPv47ffbPiDxz8sg5NKddlDc7GtTf9/wfWVDNquAjb+7dOzfP78/pcvmMxW5L1yOrTJ5XzMRWesWL7Kgov98xKuUvxltI2f81FRhdbbm7zDVti7ONfe+8yVeRL4Xqk3jmgs48gspjyoTF3GPt8on8gre35Sutqx0gP1POFcUSuqPGCMWaJ2eG5/KmFI5bIENG1XM8epUxmDpREraFqgrsPyWK+0CK/YUXD2ps4Nq3Gln3OFAK9+N9borQ95uuXlwBgdDmbQ5rtPDw/4+KM3ePX0iH3bar2EjJaLsIbPnSF9HLjd73j//oYPz7cTA5WPkOaz3UveF2PvEOWZZZOFXz1oOR3hkDWdy1k5XB0KS7nfTnJhYeQ4gzl1i8NdHTOrXtyEz6mzqucsWUlHqM9OaSwBqDMzf9isj4a+XcHGgBYNT3Sq6AzqTplFl9I2G6zR6+HhoQfP48DhdexEvp8DUH1Bx2T+OOnD2Tx2UcFjlcVK4L5NGhFXFfBSEQrf5D3dxp44FASnwip93GQj8CE+EZqGFcJqrJiJRUPwZ2UptNwc4E6IRN6iR0rzA5g7t+77PAfpfj+kpLccKJjFTBWRKShInqvNSebvffbp7MjoIAWdSTybM6c25XGOnX1w5n++O/dHoKx4yYipJcuUATSYpG2dZK1g0JIqskmG2krouX1CzBHrf2Jmz4cBIPydFh0XkYgDeY70WvVu/Gpip5PPWo3NBMerMrNbdRmL8FbJFntSmpFWmmyz4ocYIs969KS6S9GYmuQXbDYUrRMfIxI5J7vIkZOWyRCLjObRM17AEpAy9CVHhm5TksTn3ahU71fx9jK2Kx43g8WunOO4A9s+SxblmaqOUHvGf7Cc6wh5p3B0iWerpwccP3z8I3wX3wmURGAzmDiNVr1WoDksN4IYAZ9tewuYrnitg+LLjSkrCFnXnFOOpPmx9T2PVFnltSUtIK11/rjyr1/iz9Z8B0bul5Fmv/NcyPWIA23UFj6Zfx5sxy+9/x6+/fxt/Obr38ZnXN9GPmjOQPgNJj6DyPNL11cjaDPAbYupexE+cIxnRcgXi7hipQBxYF5SNp0ZrgJAkw9pzhrHaOlH/Z6LxYsMwgz17zRlVnSqfy5Gmi2d1L3mO8ImngLfdZwqZGtnto5HGVSfubi60yuG0mQWLZSx5SN9zqOPjbCoF3LuL2H18n3UMZbBgPNzNB5NUEgDZnxFxpWe8iZ0E4rK4i39W/7csp2FJnHGU/PVb8nGasM8X05Dvu7wdMr4eeHadNx8ZzO8ev2Ej9++wdPjY4K5Wf8LzOzvMQaO4RjjwO12w7sPN9zv92agNMvt8X3aeBP0GJEmPB/f3es8wtR70n44DEppBkd1mRgRj7HQMahqcy1/VgPYdX3AJXhe2568s2r1iYcsCW3OcmUbWZrEyQ2dyXWhWRkigUv6Ut+zG9JSdgzU+ayW2ZT+26ajclo1QxyPDPbXiZ7EJ0U/+jPIuWnxPDeiyplYlklvNf4KoJVIdpZ1QywnYOndVmKYTFKwpW9IRgu9xZ7nIcDL5h0iC8h2ehCLOBQ69a2jHVpfM5psU7LtPHQsSr/GOOB+5FEMTIj5UKPPdifsusaR8MlD+Tu/c3MkyJ+merNMVslc9KB8l7BGeOKhO7bitZLjXiqFkEknEWM8WVPY1JboTeqTIkbpZQZ+ou/mbKrLcOdmRCx/Y1B4ulqEDuRZf83uoJJInDEwxA5+c2lAaazZXtuBN/nbOHqxhRcgxSBSr56uTnfai+zTyrppDxY2ZerhftRCT2aE8lh5DPqMChrfEh2aj3Zln7mZhElkPCBW/eYXLbrAUGWr1HdkZtHHXjjVJRAWJTFtVoiNiV2qVNxWM7Cya29DHdYvcmk32wbco3olN7xU/E0aMA+U2tpjm61Ysnl3YPMD27an3ahqIMG9Ik7xTxn0DTPwOqZ8b/tpXCJu0hz7Y6MaHPkyrkQs1G++4vDeI9pz/sL9Pl4XcR8v0+RycKUsH/0Rj8cj/unP/gIOO/C/fvzrcBs49CxMdSgoM968wBevr0bQBiy11mdsqVNCZb86b7zfHZirq7PVmnHQzECxr9VvS134qhjcBzhLsoZYTaEYyklKopXS13HR12pJEWPWvoyXBk1NOTizz9G1rRkLGta+lgOGOHhW4NPxpg6mgSoB4/hfpETqbpNWv+BhgXMOX6evLZ+yFwwbs16JX1cYgJb9PBnmej8NKqGWxfOE65S9Shusrs4FjGJQ+8t2/q0wAhiwI2ssm/CXQ6oK+GwsbTO8fvWETz7+GA/7dKT6uhTH5oBb7JY17rgfB+4H8Px8x+12w3FMp/IsxXHQM+GS4TDrRjnTkrYyHisi41n14PhY7koWv0ltPwBo4FLruAotzDwndl2fUAe5BKBtKJFnRE9jlGsghLU6Beom254GWLBoce8UnJavUBUGFYR6ISTfaDNYfF6ct7kuKxatI7YAyRktgjspNM8/nOsCRhwerXZpfqw1S5MesVPnSTHMPsYRhrnIlHJGod12LXGxPLeSpWkOj3VkFttrx5uykwQD4tyGP/ZeOQf8MfO9GRSdlYizpI/SEoKyhIBKM++pN2RC0HL0VVfHuvbCCwPdJEvpoJVJ1hJM6ni2MEe3NRDXS2e2q+25tqOSMEQmy8S8cJg1dSntHX5pljhJWmyo+nxSOTYzcXnRUkY588fH0/hlb5oMMz3rdFHB3v+pX2VDtErYXMjnRF7xZpoXLUOW+anUgZGJl9/TWq6gxO+n41fKOiccqTPAI3Qsfqc+EzQ1W4OCkej04gvVWaoHanyCoxOchaf8TJ5lEnBTjKqklkZO/pZGLbEQ4xR/xkh7BYc8KnAbMAOnTLoRxECCwjxaSN6atIV0Je/998Y70c++P2A/gk8M0BlnRaKh478057QhM0F2xAHc0r8oF6FkbxckR9kVPw4cY85sblucdrcOQGASLio+94Hc/An1txoJu8BvTMScNJWffks+aUG9+CkGVJg7hHn4vxij9VJ/Uz7vtmP3DX/pT/4Z/OHjj/EHr/4hfvrw2VzD24JtysTV/Pv5+moEbS9CaZ3x2oGKtvzt733x1U3ES7NSyiovPXPdPA871FZEaIFSbmFMKkg6m68MFuK7bE6ef92pyD0ZMxlaapfLflX7OrY2xQ4pMfPCVZMTAdmBtqHOhGluL15ICGU5/KS0f5bryqivQdrJ4VchXgxdGvn47yyA8R6z4xds4PlsN94Z4CU0S+ZHdQHPHaOdyPeRvoEFLdvZOvJ3grHiAumnpN8yCQtm+Kfh3vD6zSt88tEbPMRBodT8yZp+YMAxBteAzLVz92Pgfr/jfj/SMYH050AtxC/r0aHMMapiFufh9F7HbSJPPArnOYrCZjorLRgqgImj5fDW9VqdJhq8dBhtQ1/ZYuoDLI3VOM1YQrtYPEdvS5U7cSw6wuXZ6z4Jkgao08F1ZpLpILpnIsFc2y/6kh+qz/nO3ANkyyqlFtgoc2Lq9zZU1DvzkQgOAEl2IZMKa4KBOLfeWOHvbNur9+AlTeipzoQ6+BzCFyzId8WbL90aZ31Lj1eQJ3aIQY93ZyM/R2AJWCbfgCjnhaojfvD+bCqlZKjG57rxS43LJVCP8Vm1DUB4+aynihrxTc5fdMzDi5kEgmGeXk8alxGIsY0EPXWyXTh5khRrNsjQdnVrAbA+I3o5+Z23l6M/uC5TSy5Zwo/EAHlOZ+8l6QZE4FZ2RYP1NstGuCV5Y7DcsvG43gMAACAASURBVB+hY2u9GtIWp+VXAeQ9A7BotCSBYVnsDiV13RA9VcNclGjaYR2dCLHqDyrMpL/n/ZZQkbYpy6a/0fbk39nfBoPHLPd0DRx+BA9tWyu/R5E3rrkoNmVZd1x94Wq+7Yo/ZYZ9j0BTAlEtF95qp+1M9mg/mFUcI9Zrbts25SyaL7/VlpiZGmW9DLbvGOPIaoSZzFNZKvw0/gobNPX6zEgNGIsLqHSgEqPjYVzQEWSn5yav8wh2PnPWIwWXXQ30hcsppNKrIB8DP3f7On7u9g384NUf4Ldf/W6AKXrYC66Xrci8vhpBW14nc42u6H/2li4Z5LKPc1+rw1RtfAEA663eRJwHYvlo2g4T9bw6atBsQyiNpZ+0H+7JO+yDiotK4AoVV3E9TYCZL76JZDZMS+Xk3dwm+2zcayQG3co3lTQNoj6vQRBotyl04UyV19I5ZkHntNV+/r0ZPZPPHr/IGpAMyPinxlmlQwsc8i2D2mgj6WU6ZsGfxSZQY2CYAVnupdlXCC21ULZDqDaB391mwPb2zWt89PYNHh9yuqH4KoDaYrfDDfP4KMOO4+447scM2JrM9DKUqX+v5UfJoc9RoabuhsqL0qGUX+ZeuY2zWRywi1KMQhxTIkzAE6rutOnamkLiZmXQNs5Ip7BJRjscxdxcQ2S6Wpz0zXJIKnVhEl9/zq68gkXb2vEnkr+fOCIKJMiZcG/Zf83aMSsYFQaio2qWLlsWSp51Xd2yxPsM7Aa22J3LNuvbIedAt3ZmJWdwcm1e2sj5wXRXRm9dR5a5zy5OQZsPXLFpznY5HVxplANt2fqaoc2zBuXMvXIm4x9DOfwD4MYGdLCkVWhSjjJaenny4JaOdukMOrU6t0bvaIuz2IhjJ0KTeFbDc7UtE5CJ01lhklUG6/luYsvm/ZKBVc+Tb+gEFr9brgPk96RF4wPRD0xCeK0t5DgnnwhMwnoam01cda2eOoU5yVxLWrZYrR7xSB1F6fTsV21P6SzPd0k7mW3jO/G+IY4qov610orlH1qTCcBhQwNGuei1Q8YjOskHn1kQhws6YDrzLbEoqCFvFWCebbXzOV1eLASJTlEc1mxcwp/qMAKprIyYz9VkXvE3MT6YYBsO53mDqYAUb54Ds1hjrnTUwSdVpZKghhnwG0tUHTe74Xdf/T5+4d23F8KkONdOvi/Y3LQZoWtGyIht26ym8SnPRM7L/nThyWyfm4yMEbukT9lKSi3vrm2aWXTnyIThyXvp8tfrTjouLiAEvE5kPW1s1xpZ7wU+bEdjUt0O3oQXGhyl5/7s+2/j9fEkCDAcOPAbb//B5RCvrq9M0HZam/QlgF+0oG+fEzjoTLPeJ44vRBylJLWHpXSSSkAeYjXHFR+pA9jrd010lSjui4BNp7LbQBNGGYHJr6egAuUoJiSlMNvM0dKN8plhhftcn5sZQislucI6bRjLNDVbVso41V1o6BZ8XhiQNNaiBEqQyylZx6VttalrQWD9Vl5DJmoSdV7GJHEdpYCxBe7wo8GkZZzsLndNzDFYbctbj59KZEZYK9nVGgDw9LDj7cdv8OrpFR72PZV5VpmbSXBugA8cY/Y8jjs+f/+M2/Nz23Et5UgRAGuBacJHK6qbjAi7pYtC+YqX+tqFeK4xdKeTBjmBETgPe5VtNxR2zVQicJHw5wHTSRkZj8ANjtkBp8NpXR7BeV5DzqSQfwRPbaZH2nB4OjX7BoxFUais94yntydYtphynBuNeAZ3ura/cESZnkC39Yyq24QAbMvbLwbE9vnnxfFeKHb2Fbvq5SYYlBnS1cT5QX0mXKGjKkBFOrpj8MBtZcaSOhXuxucZ2QRdHeHE1+89ccRA39twE74o9ZUIjUpkyvlmgG3YN2B/esSbV6/w+PSEbdvnO/sD/LgDkQV//vCMDx/e43aLcqaW/VdnJThyhQkApCwqaSOsmX5Lk+1lrEpHsYPuDmzzcHsGyYDFDCJlb+7kl1duf77Bx3Hh/HCWo1tw2lrlrRxOC1hR9FLTEnppknW2l2stHVMnt3JOYnUBTz4XhGUvqmtTtoNYEQW9rTtmWXOpG75T/oTTflzAA8ud4xvOeQZf4kh9BdKfCSwTq+tLiWj4GZVA6QFp01WiZxyebFggkEBTSVnSQQIAR85Gr3RnO2JlQg92febu8HFgyAY81QDaszFCGNuibsjhcDY0fl5ma0gtiAoZNvCDt7+PzTd8+/nbqm4E8rjamZQCYuBzbBtsAAPHDNhskzXEW6iczhu9NV6laKduH3I8T7elVxVWOWcoCYBti2UYV8J3HhGK30ntZvmCz6iOkknRiKbjMtQ9tusHwGUoq27QdvKjrNeLd755+2ad5hQMvH1m+L/e/CbapgwvXF+ZoO3la1Vx/sLn64G2RcUn4iCNiLovcjd/9aWPdNZMabbOIC1QhhYuBwMVLJkIF20JpN59GfX1OOZTunZp0TnyRfKtwZxOhebV3nQM5HXZLlc+LlcxXiqIBLQ9VXqztPr1GE3eGef5QdNPwhZiolAWVuRNBY9/05hBnKT6qPdchJfOHHdanM2llpBAzlOAHR7ZMR7Q61LlV7M71UOZaFV+M7taCOWMpw7PULRyAI+PD/jkk7d43Le5hi1kYdgGGwObGlNMB9ndcRwH7seB5w/PePfhVjMgHcVQQhAWNUKJRyVY41WOmQZcYaHHLww1IDtMFiHHpTycCe68l8qaRN9zDCRfD+aK3ixt5KxUPSezcFd6CJadp+ESuU/VkHwl8ArGj6UER2cxNFA0010NC9550pJXQJv4jGIgOXeoVKqjdiQMvmsHUamiKFw5IGtJSg9ZGLgRQeSmGUwi34BtL91OHZKzjN7HrviIEYneMWw7JIhHBo4MSt0Lfo5LZ2k53r7AHovjULqplbdRfyz2Id8p6OPvBphj3ze8evWIV28/xts3b7DvO9w2bD4dFo/Dge/bPkubx8DDwwOeHne8e/8Bt9uB+/2O4xgYRyUTuh+zKOxATtm8ZbdIUBFZvZBDqaCaPDfP/V0PbkHxUQveLQ7ADjlSZ5q4ZvnkiISD1+O5s+uGTDAQrvQ5C7x5i0ensDvayLQvcVBxzlB6BS5qLQIQM559JVzBL77AoWTIz21+DesVEiZCscxVjOmcrxsZanrV2j9q26L6Jm7lbKXQT+0LW6y9uTzZwSPQzqCSSTjqKW7S5H3ExZxqaCg/bQQhS8h7yU+swgi5znv0t8RrYD8efW/5PY4m8epjMX6Fh/oH1Jydc4UKaXu0ukcfC/uwA7/z8f+Lj376Bl+7fQK6XIQz/aqgXZYb6kWbhblWHT5PTNw81qXFb2VjKIuqIES3MXEcSoSyO/FePEId65oUCzrPHM4GnsO4bcDQ2Sziu8Gks4qWNqrzNGECFgUX7aqNaiRZrpxWL3xc6Urob8JPS/tznbrjW+Ob+BX7Zfzg6fcv2ujXVyZoOzv+K2KVcxfNClGqyzXG2sZVf9rOitR105H+jvsFXQV2izYbFKn1vPMeqBwLltVopXKHOrGzAWauSukuY6IxP0HJvusf9+ks5XEHXs4Fec/k3kXyJInCtgr22bvBS9GscMgQzPqNnLERAc6s7TJ2wqCzHoL+eiw7F/xkdpsP8F2xTN5MSr3HodqqOiY+ho/g4C1nXzxTmhD25gBnt9OlXltEo/lqLHWQE/4NT08PePPmNfZ9h+17ZRiD5ogMYu6YFwO+RcD27v0znj885wGhlRUuOrdingvZPGHuQpfq00m4in7LfqsiPhk6RWg5CUDxdKO5IItsW7/50mYYjYTbU1YqmLALWSYMgTWDyBedkL4uQYNW4vVllIX+yTUBLI9ywPYwjMnRZfq8+DagDBkeuUHLJkGl5wHabMGbASYOmxlNhcGbE8npyMU62BmAjT4ki/IyygrhSH3EDGvxuuK0yi8Fx3q232LUBzf+CJhs60GgoPryaut542HOUpxcH/5jVUZWpVPZIrbd8OrpCa/evp0za49P4A6f9/szxnHHcRwYBzCOI4JYx/awT14wx7bt2HbHNraZjCF0bWwiC0FD3TjLnKf0lf3VhNWKHHWxJppljsgaJspOUf6Cp+YMi8xWZ5RpiauyjdQZXnyAAR9MENBuki8LwqY6lOZ0/AIeJhSq4qOQ0V0MKRW1jkfyBKCz5DmSZgdYOsw1S3xW56f4du1i3eEw6iOWnuL6Ur2OCBQAW0rVL94S32A+J3yE2ej5p4I885cctxuyTDsDktIonjSmToIkY0PHLokbdjDRX7v78OzHZh+Mlq2cAQM3akL9o/Y6+p4JrFGzgvQFm4h44zpdoiEoFZxEFxvwk8dP8cnzWwweQUP9aNU2A5/GIeK0uU/w9sDlTM4ObJvNsmkLP1ppFIA12wgtkBdb5Q6Am1TFjpPLGEse2dosuTZsMis1Q7meFKtxTq6YFRKtbkxwse87DI7jKAfBG0HEcbgKKDhA9+Wdmrnu+s9kgGe9sKUPAHzn9vP4zu3nAQB/+9xzXl+ZoK1fisx51blCdvnGvLrir9/shd/1b4hlKpSX3aHLPqVrvt/LlBQOlBMRn81qg5EyJr3hlVVz3ZBd8BcdJ7F6OmORAnx6bzIgcZEgWJUqrOWKZyhLMaTehUdGtcqt1uuLKAtQ1I0ygoYeGfg0NOFojFp8WgqiFEYpMs9flPwvcoEr3xQS6PNVa/qKGCt5wN1i9tDToWxKjbgvmw8Gw6TtLGuYswxOBWsr18wyo8fXr/HRmyc8xdlbyD7CLInBG9LfGAPHMfD8fMPt+VaB8qDjznVEZRRnOwS0uRSkVsEo7NhdENKsnksZjb9tFy0v2ah3+VedHVWw1wq6lR65l4zmTGaZWranMzwusOoYqowonA8vvGkAVzmjTkeWF12AHNAQmZwxM3EORpbLXgw4HA2XwEFMslMOa2wvy23dNExH82zsy3ng48TF3NVuK/2WLHSWozUbn3pnSTqUbij97CMmaAJnHntjV/Zfi9yDhswqJ91MYBLZy9dCM7qjByiqOcnflv3P9ifGt23D/rDj8fUTPnrzGg8PTxjD8elPP50yeXvG/X6Ekzhwu91m+eg+M+dz90vOps3ZUwbkjkokUf/XullPVBftFv2ymlPqT9LGqI8Z4GxJo1KchT8nHZegiDLUkhzRDmfsxnBOfEWbW1WIUA5Vn1K/0nevDNQETRzW+W4pYhc89LWFynXWZzpE1jNBc2HQck5G5MyTNiVTPXjKOfBsPu1etF4bcYjOWXgRZnW0RA2kbmubix5Kea2Ir9vRNZDiGR+nhEH5CUWSWdY+C4IWzZO7iRTpfXjOjHYwQyeaYZ3Vos4jOUxABlC8qDBaBbNUrXlky7ZhjKNVFVsg7rSOs5AXKCKM6zVH+Htvf4j78Yxf/Px72FY8Cy7NKiFgAHzrySQP+77lWuAuBnmWX9it7GdljqYT+tjaLsJ+HlY9Xoym+xVsrYS37Fkf8YoBa0IwjgFWxtC/XqsjILCerobcxSbQY7lKXF3Y6n/U6ysXtGmgss56nYOTbizP2ewXe2nt8P3UqQmDME9zqHprKtB9HApnwbtAIr9MwtMZnL+QGSS7bp0xdTZRbeZ5vCVM5NE6MqgrybYjj+C3YOo9lkKe/2hte7vUWC4wnzOUZVwLg/UlYXdvMJWhh8DgtSU1nSz0sRvQ17XRYU2reFZGa4aTA0qeiP55WHYpimjPUbtWxff5yEjFUqVonnXiibg0MCMOhy0noJtvALvh44/m+Wt7zKRxwbI6nJplnAZ+4D6A2/Mdn797j9vtlsGEKZmTHgsyVjgWju/P8pPesWteEtlv4upoa2DqxfNanDUxQmNu8l43AWcDOkaUXPEtZoPNslwzF3VXFNecgs3Q114g8B9rA06Zw8y4Dyi0Tu/zBLuVc4py2vgcwTzGnD2wbW7j35ym1XFxTIcrA8+STROlmk5M4GXLtrv0mCpXE71mdVzEqg9qgxCAnnf5wxVAA8ETsplJw/WItqiRU6eyz5o5zWAKesV4BMdFgKInTM7jqqxLBn7bvoVzEjUTdiB20pbga8PtduDTn/5xBGYjNhTopae3+1wj+7BZlSD67Hb4gGPu9jp5qAJ0qt19J8+Rv2TEiw5PZ80QON9KTQq+ikeJAi/mUh7jODLoFliyRE+kdEhChQmowGnBsfClnuHHhiRhNZ9ZqCyVN0pmAHIOl1Rr6Dij3QxcITRzbfWsM8uBFtwJvUtAy2ZVEqJgIcyDjBA3lAyB4lCppQ8zdeF6CDjnRDmDDNmcZD6zIUELmCRV19S8lNTRNuYtwQfFM4BZkzqrdWm+mwyWAe8M3mTxl9qU0IHzCBSDehpJ5UBqljzPAwmhu5232S8dckzX5RpK9OqKggM1OC0XxMAP3/4YNwN+5d0vFzwqexMJNfHhArtcczZtZrAyXWOIsQg+k6kC6aGvci7Z8ql4rHRSW0spSQ5Rk12xqj3I9ZmO2k6kI7U0ApEgtgndPrRyztTXZKIrYHovp3tmMcNbCWRrz/fLY3xXa/y+6PoKBW0xPG7YcDGQXucvghjPD2yA3y8TEyUILxNBTVQtyT+/k2r11BGNhLCTMHuVhZSzEGqxZcQgcK49z7GquiRjEn9so5x2sYHqJ/ShGZpgNmss75eBKci6jC019DQ2oZDnmp/CXcPVFXCL0Sn8ab+m2h10sFIe+Z7Xoup2KGsIqdfHaWy2MiIvCZ6Ov8PUb3Rnv0xzLcwuI8Z7M8E7oo5+y+wqgHDwAy53HI5UGAh8WwkH9n3HJ598hFevnrA3p2nuBLl76tbcM2qMAYwDtwE8357x4cMNz8/94OxhQpN0RAAG39WVGI0FJ9fSxc8iQuQP3XTF+c/sbM06178Ip5o8pcHpKsvX9J6b4RkO7gIoBmW+NanIktEyrTT8BzjqdUYu+TwNiMqhSlSHawvBHeOAI4+Nb/CkEQ1neoy58U1DMw1veG2ZMCARlxkrEp082Mu6TOi1gG5zLZO7ZQJiQ61fqzW+lo6Mkr6c17Os5Xg1AztFB/0tLzDpLVg9nzjJ/kvw2lq91TzYfL4qOuU54dOrxCNlaIvdXCk4Bzc7CFyNMfD5n3yK4xgl44G3nK2HNb2ZsXSKieVzM5CdwfpcLwbYCP2y8dkYmgSEfa0er/mOHy6DNMArgIin8lZIQ9EyN78JnGdZ4VkvcHBjjJJfCwIovE20mQ03IVuUay07UZVl8vNQ87FV27BDrZVY5IBwQo7UoeFBf/xaR6KsfzjjI/hLaQSgZqT87Ka3Z8Umzx+pYKcNmkcOWKOEp8yFHdPpxHhwQ8h7ufTI2d5oaepiSMxkKbOqYnXpB+eE1ShyucUJYb7yTQWYzVnU6K/RyYPPSh/2at4Yh5B6BifBB9ShpFGDJXAPrZwpsFTiTuMitnbgj9/+GL9jG773+Xdhvukb0V7YG/QVdes13IHjwEZ/2gyGIb6htX0i3Ies5w0A43tL2kDkPj2lvqwgLE/OcVWwLXQJpV3+bE1IdHe8Y+CMdVWQ+Y88Qd4MxjRAj7zoz8q3TJaUjbyGyS9oee13rNdXKGjjtSK/ZKl8sxocA7GBDRh3+U0Zf5lpQjHxvDz7dVysBQpGTQc+9YS85/35s6IolaVn0fAOA7nrCe4yzsUMFZhcGU/I+NL5gtWmIl5CZVJ6elXARLDXUrKXWEz8k3x2rqsQB6NuXTa0KhbFitqF2R7HXD/alO32HIybGlwALc9M+F9+KA2p1XOlhgTeYAP6jJOHvbKgQDOEjcTpgKEcWeVfkF8g+87XtH+1adgfHvDRR6/x9PTYDDPb3wHURhOGYwzc3CPIBp6fP+Dduw847geOY5ZTzcCbjr/KFYLFLZyILXE1JFV7NVsLR1bKcHTlhka9ur3Ec9XmdAC8EcNJNzd5tkrFWvIAVyxyVUJRijpxD+GPxMlU/Opkr8kAj+c4c0a4LDZBme8g4ifJztH523bkTEnCRiFgqaYDfsAQ8khcOnExZ7SOeFYPGefMWwUEdK6n4S4kU4BKHFP/KKOHLMC9J44DqSkhzrKe0rV8eDfRE/l7p1wcES/BvZ+fS55Vkpx5ogaRKjjp677wn7RDl4Hrd5M/nLMYVRVwHGMmBdwFhrn+6H4MHMcdsHk+IgEzQBIpsy2uBaQuq9VngRcrnOzbDrNJ0eO4T4d4OMZt4PHhIYLI4i0PmnF3bkVIyoOR/wTvsvtGyaPg2KY2MjJP/gYp1fPWfs1Q1LPJW8kvEKwXzWtX9gmbHiXDsWSw6bSTgmfubLcoDW5GlvC5p/yU8RLHU+DsBV9qUc5GMvVB4GBucMIZwMWCKilaD50nZBioIHC1PnEPZb8aQMITHnaCstzzOKXdXcbeeophUz+mk066K0qTJ3CiyQSPvLPBNtnVDy50tSbn5dsRCA1GAPNZTTHPk1fdfgWCo0qtKbej0WlFYU+OXFm+gN+AP3jzh9h9wy+8+w427O0pld0vupj4nf70wMO24NMAsx2t2ivkbU7UefqYpNUWwTF5ACBeq9LFjAeG1I7FZ76jTgTaJiZthLZ8jm8a/S+4q896U/XS3BilDpCviplripCHrvbDkJn65k9X/19Go69Q0HZWSkAR9uUpxBjgOACUYF88gZXIGgya+VysafSBy4lxIA/49BNhPZPQmWloI7pQtmEIOttZ3cux08eRNgJoHSLHYWIYW5aitbkoFKOgWipi6ukT74jB/Vku9jzKzp7xIPjAAutsY8H30rmheGN9eyZ8PA3uashagCzK5IvGeMWFbRdNdQRMevA5zpbMlDFNyGoHNjPLWRRVcKU7pbQsy8AIR4YkeHx6xEcfvcHT02Ot+QhLZ3AMC+cr5Gc4cD+O6fiNA88fbnh+nmtl6DwbuLEMDd4cCzdVSxmk9k7L54JucXJRMqvrKCpGorxsFSBYGbc53sIdbazF31pMzue3wp3Sli+wJa/giMZllYmkIY1J/ELaVbt6MC1lkzvTvaTbBLZ4To2vrlM7PZ+0IQwbamZ+vQh/v8vxmHxm2wzYWxxE/oudF9PXYUBjEnTpLrvBu7MJbpACcM9uMy5K57olr3VLaybTOcMcZTC1ih1JLXc5W4u8WU7ZFY9KI6i7wbtfoi00w6wnl7WAwrz0ejoYDFzn7o6zVHYehKs9UCZW/qBjsZaqqxOjNN33HTDgCFk3B+63Ox6eDJtA3pNkMvbshrPJMhTKLNfMjNJ7qcmG57lpupayYJYf2Ibg130ssjT5nzseOtDoyeNTKolQx5ZUqVfprsxNqO29sO+9lylzI531SEEZNSn1FB2N3pImmIpvOhCULXePA5c59nyg+pZXyfZ1W/AkusuSHkWrIizHIvptQUdCutiylMeTbxC2sNmMsjcapVWSGdImYdcZ3pVOSqGpL+bPPHLlWie3ZGDqsRIsUthDl7R1zAie80h8xvNDiFJrrQpnV9hcfWKLBNvvvvkh7nD80rvvovPhbGvb1s351kveGPN4n+nHWA5wHJ60rHWBAAb5qzYawWYh12g+aVY85WAmKjmBkDLi3Q53GElnFZyuo/lU7oeQv9vyzhXTCi8tfkFVQNn53ROcJSs5wy/+2np9EXWAr0jQRjvVZ9DOoL/s2zBXw+eUIC8ZXd4791NKLbIH8q61TzjzyKWM+YWyf6F7AbuxmDie87aXYIiymN/PO67ppSWOlQ3tQ7L1Nb+mSW3niwawZqhUrqs9UT4TqBZwE6SsO1/7SCNU73Gr796XzC62X6EYFFp0Z6QZIm9vpJEjuxE+jx95wC0DkSuj4T43DWD5Y6CvkaTQPn9Zy/KAOkxTcbQ/PODt21d49fQwD8YOB9ZcM1qhZGN9EQ8PHePA/XbDh+db7iA5x2NlR90zUOP3Sa8q7/LspWEz/i0lrjNQ+ZSXg1NYM3oSL6jczhf8XD5G4V0LUzVg4yyYoxwWOi9ArYVoY6IjEoxaitmaPlntcdE2HG+EAx8DowPlUKdW9CTxJFk7U5roxSTC4vBdlXjRSQPxzU0q0s+1nG1hxWC244gzs3xmoYUWgLVZIaWJarU5bk+9Q+M+wtpbHPROvGeQuui03CL+pID4QA9kdXa+ZrgbuIEfZD/ZH8rJADxLLxvVGYiCfKH9i2MQ9J7qYe6QsT/uc8Mh6asRBROWAa9xi/7PWQ05U3FdE7VtO+zBZuA25nb+99sdD48PpacD3BFliAW6FS6Mzk3x6rbv2HfL8QyschTOF3XM0Puq2CkYShNv9KlZ78Uv8PAVzBJHV25FBTTaQvWX0JK1XN6DZ6Jo7n7HwM1rNo/bkvKw6RQ10U+JitBE2WlBRL9plv6fOan0UIefdi7hVPRl7SAqWRM7LPq2Jx/w+YxjGh5W2Ra4zMCF3IEuAUuUosks9EJfDmYNYOdrk9P9KjhhW6uVpfPtsQX+VGLhB1G+z9UWHskN//+oe9cmS5LkOux43Krqqu7BAksQIAiQBCiJFE0ymun//wWZSTKJpgck0URJxGMJLIHdmemuR4brQ/hxPx6Z1TuC9KGRu9NVdW/Gy8P9+CM8Iob2FWkHrCne9Evw3owx5WXxHAt/aECprdr4icaU/jEG/vLjX+GwiX/+4z/rdCdNB2AS8NDV3dxbH9/NmPfC5Dj90R0eNg7xXQ8L9GYYBu3WwY4A1g1wiYRmsMCSpJ/IQ2Wl8AnezENprBeq0ZzHT3A6MdMZg/ffa1vSWtQp2WK779Vr8usV3mg5O8nM/nwTThuAxtRX3+lmUn02XwZ94jYD5aL68uarbFedYlA0Y/k66r4fDKZ/nsbYHIPe79U3GiLSnxNTS6pLjpeiWMbEbmQ661eliFS7AHqaqGmbrUQbbjphJ9oUDqeSUpEyUMlHeat6N6Jk4zSqMqosbbK9IZF2Gt/sZ0K36fysL06in8C1KQj0cbBw0kuevD4huMqn45gTtyF54hyz1VywmrbfCNKAyAbjSbfbwP39HYCBYwI2HCP3FvR+O+R0FKV55AAAIABJREFUM5/weeD15RXPzy+RDukpZ31InK9V2+Hs867YzjxTDpAo/PxmB3xspfmrJ8HWKubY3lUZTQ6FEAtAj5VX0KDSjKrGKXQfrT7f+ka+qn1r9XUaLkqgZhyGAjAs5T6XwzTi8uBKNyqlnX2PtseNBgZ5BKUwQsC6qhOk8qprGRBx3D2/t4jAwtsl7pW+5PDjyOAQDfnFB5Eq1vbI5VrJ+n+LXlgt1XvsXxoL3UZLu6JkJYWLtOCirxW+LGtJ+uAxdK5r5KDA4IxSa9XN3Z8d9D2CGSphrLefaoitLFDXf4RhNNclH+PuTpCbh3yEc5Dp0dLiVExejZaBJqOReeZc2RjAHeBvnicnzuNYK3FKhxQlUTJSffFf0GU6jgjscGqzD1qtl/NN3GHfHSZpjUDzslHyVhTVf+vFJVOc25IFog+rzhM1BZ/q3sBtJYUpqRxf61jpRVtLChxsYcpOz5wjSFvrd99GV3hTXR1meZqw3UybyAXoQw6wWWP2mhPymAwjnfvQcTMd616/pRFQQdWmK11+brYJ+T9pe5KVeKtOQ6k5jH9W87EXy6w7W97b0nl0OAYdJV0xKjAEHVZiJrMAOAc1z8Ff82COIHJ1d6yzHuc8MPaNeJzDNG7VGe80WK+ltATtDX/9+EsAhn/+4z89qdFlN3HcwOVGQEnbLNty6cBUs0cEXm7haIxabR23OtiI20QEkKruTENeYeQbdaF7+Kkz5g8ia7Ot1J2zyk6GQ9l2G952OqpkbSoaFWRjW4mt3PcRDnFBqkufok3VJRIcURvuOoxUzzfhtHVY7c6QEjAN/yBuV6RXte6f78S4KufdWZHjQdt+oN0r+crT4FXtKCPjCbwTdOKni0GSo2rIJ4cAuFLRwqEbkmOsCiV+5v660rl7XcRNdkMVLsfjOW9WONv1aQFaNMjVIbaf5ZJxvaVOnBz07HSZIiPpF8qHSsX3OnrqU097kCaKBCdxP4s/aYpQJGWcmNDFZ6R3TcdtjOWwUTEP5QgxHqQhXqKdbQGZ/glz3N3f49PHD7i/vwcB8PC4n+hmsNgv57HC53C8HQcmgNeXV3x5ecPb6yuOuKl5OuLwEm+KmQOigs8DTozTIhLqnJeiZn6bc9gl+moVeNk5PdWIVfDF3NxPAzjBdvHoyAt2qgxCIemMFt/WDFs2JqdQnbR1fZyGj2yhSP4HDYVS8OwjyT3MMFkfLfEMi0tUrqxbcP+SS0Wc73zNGNbpTnZdaRDyBCweCQOmDqTw7DvvHeMcrnnzOjGVJ5tayBmOpYgnVx66YWjxnbtj3JYTUSmA5AHHkRvkq7AJ8Z1GSbZbj66grIPjeuK7CnyZ4PWzvq5/04BwbxHrqiXNDDBSTd1gKJw4jlnOkjvu7+8jWj+i7CwjaE1asEDNiZBz0S54NJ3meOGYEyoOiaQGjLs7vL28rr6+rUN07u7EcVMwbsaJ6ihixOqH7gMsOu7U5C8ugUVDOW4QR77qo1ypiVs6rACEunx9VCuXy4iuQxf6ivsQvl/0sbxktNo8B5dVI9fYNACczmnSJUbLAnItSPG5VJ24vL6b8d6gbOeplnx9LXnMMDIpV6vOUXJMms7SS3zqBgXib+mHGrdLXYt3c5V2UVjmTafTCyfbs219SSHQ4KZgNbEvpgw6J6pgeSqkL7wYGOuArSxHHUFA8G4QgNkYsv9LGHlB3gpijRF3lYWuyIAbNntk+9nHHG2aNqUBq9W///C49rj9k89/gBsvt+Z8DQaQGmEu26Ray+81kYvO1eBx+rTBeDp1cL7r+5WmO1BXEcCYNumZvTGibOONpFRaWDCpR+UIUuZM2bPuAXrKLbbf9hp5l+Ci5QE/RvD1kIMou+yo45ZBktRI/O/955tw2tZTHW26AAXG/PzyBoVG15okdfb6d9ft4fTtXh4NOPay2kyXXddZRiqkeDcNAwEJddZar7c2dyfX5fNlkJMOQhcne3gaDAp+JRTR5w1EFSrPBoq8tHWXOMH0qvxQfqZC8l5mRa10Tn05IIYECK3IoO2hDaCgT7BY+77PufZnH5fLZy5txpd50qPUOYbhdquVoRLdqFkAiGkU6dxkGzRqKj3hw+M9nh4f8XB/DzOsDcU5Vsu2Fityf9Bq+TgOvLy+4Xh9xetbns8VBw6ok7UBeU6INQMjadDmxXJ405GrjBkQmGLMNpZSAicaLnCkEUtlv6+gsHhEGD3agZViZ3LisjNmgnFGry3mknNrljK1HJazQWHGJBDLk/hS6rzjCLtYsks8EJwQo+MUvMgy63few+YSHDGkTbbRR6Q+2l2rKp6KPVdphI5LHkWSDBhuefjAGIZ5HNVHKrBMRyiFmwEfcHVptbOcoEhjM/IhsT1wUuYE8K36DbiEZmu1zooETbEKbXP+Kw2JHyo65xHy7YoG0xIc1t5IlI8VtjnjgltfaYliMK/u7BoKSce9Zr6abY/YsxMdGbexAVbN580A3N1wxNUBx3HAxlpp0kCTKgeHzHO2X33d0/GSL63LjzprCQ4hrwxi1QqurMxJ8KMR2z2syYWoPdgj9DzNzcUT43bepwelhbxjiRi9Lxd11ffEdwlIUfYoL1arwZTPRff6Pu0Hk2aiCbMh+7Atsdj03QgqLOy66Lc8a4q3oAeAWk0MemkfcgS45D31tCr4MrKtnYhp9Hr1P+dm71a2ZcETHnp11XLwawfqLOVqiz889M+aNrlmhXNiaHenua9A7RgcT1oIfcHY67stuToxl5ig2VTVOHLsf/70lzAA/+TzP8ZwrdugjrYikRApquz03lNfc3/qWPRbvLVwf8TVQuymAUlz2gjsb+oxq7R7Sqo7llMNgBfgeVTF/uz2X1XcsZiyoRlLqQKg9nCVqVWzeJ96KPTW+uomeFI/y37g67ID30XS86Tcrz/fkNO2nnK6ScLzo8T7zRVtSpJS8RUcUmxN52C3+rf35NPWt97V6Eyb8P6u6puq76r9cFIE6NPxcq7wiFGRwVYCIccgwCnKY9kyooy9Gx7s2WRRocXujEogpX66YDX7ry+xjJdipxL0XG9fz8URDyU2rV9S9Vf4C9ImlZyHoHLc/d0eJeM8amw9X58Ehd6xpXB62gWVT4sYSsNlFznGuOHTpyc8fniIk+XqYuwaSxbAnGFkBi++HRNfvjzj5fllRcxIA1EsajDkOFEGoNrHTEUVHbcUkNc4C7jm5sT4NnE6d9F+RLn94sJNDlr1QRE0xpJsr3JaK6NUbGksnRglwLzRJwwFubvsepWO89mEAOlUsj0xXHIIYUNo4ILvEPjpdDKNK9cKxsgLVPdVGSqyxbcrHbIRzdhCV1zrC48py6TsU79r6FWHUaHDYmCrzhkHSvBagjSCkgdzwpJWdCJbqij0903mEKnT+d5iMs6Jbfynv2oAI536HGA468LL6Rc6MsiwyCZyHpXPWGVbq4wDt9stzT5RabJK16YkfhZWkX7TI+2L6XBRaO1TFad8WUdYTAbc7m6Yx4y74CbmGLDbiL2FgQfGPSwlJxoBV+eB/TvpX4v5UQunUydhYdc3OU6RYVbT9G8eTW5JF2dAABQ5Dab61sBJGrGfoqErfm2cQ7InRGdk5osoRR1fYupSWo0vU/6TF1qS81pBkiGoQZn7ZjetXoGHcQl7mfLXKOP5GVO3i4SLPrkKYpXyWuNiPaWfMsBIQ130qQwGgOfK4uVjTM1U3kTy7mqD/2xTvgSt+p39SCpKM2PrwWYTVIWYkeKeuGCSaicyLOTIZ1aXCo1Pig7ZyYGBP3/6BSYm/vjHPwo6VN889PZ+ZHoGP7RP7OP2JN5PdTE9xro+WZkCxdTrLtDqJ6b3FeHmfFaNiYNRzr3oWn0rHbvkY9dlRCfVfsJ/28iKAJLh02wMMoWta1cMac9NRGBw3S9xqt/C9it9tb9zfr4Jp01kZvv0SnH+5kF9rZq06zZAKn6yKreDtgLeqb8d/E4CFL90xtoEJZqoVLqKCKgyM0MHnOj3es3zHW3cWwOeP0q5EySkXhMlqRSgPKuIloyhUiuu6NR7VftVvQAku1F1WAJoDhgExgKxpRBGFHBUXj9Lqli0rgkdHJyDrrzVAKCM5rtswZQW1srwztECJukL63RRTTEna+hScdTOsY0x8PT0iIeHB4zbDR6nzcEGzA+4DdzibpNj1iWa7hNzAm/HG748v+Ll+WXd10RFCY3W1kA8J8jbWKd7Xboq/N/2zUQdVBp5fH0YmrV3SidI0k62mdP2L0SKX5K4NR6v+cprCaYnoPN6UR15/mUV2c9PJUBy5bhk1+ofGcu2ysA3rP4yxP01W2rk2vcE2KjADftyHEcFHTxSTk58RJpU5DUVOhk2hxmHJ9gtlQyN9masJvvO2neSQowGLoJOILMPXvQcTdO5ezveQtI41qJB2RgB7olESPnXyeQBAggZ3fy5jV0tsTC/27Bo8QzAC71TzxDDyJ/FdilH6ZTF/kWO5+4ujr+/0HnpZnTrgTMqc+wkTI4bG80KxSL9UDaamgG3+zvM5xdMXzzFDAFjGV1tjY7MOPrbj04jo35DNgE6Ks1R1ioNdT/erI8VfzgmUitPt1XQ961ufjk5X34mtYpK236SCrQFCPZH1VlzVqLPVhVmF7W2IkPpQk/9ppixnO/GBzpO7bPq+B1LhVX3jCZCgcV+Je7lgi/9NMa6/N2B3JPJOd5XC084rX21QCFiWZ505HI9iAMmezujrHBWFHEkJtNR0oOQzl5tEYONJa4ICVO3B36fhkPeKylv388pK7/F+7zy5Yo0hsI8fi5rjDUE7UzQ8BdPfw33A3/y+Z+e+wkUTnuNKFfxLuzdTOPVu/mUc4NGdNabYz0Wv5D/uUVnYT0Cs/v4VTYAi7TKtZ4XB/Ln+zymBM4MmpVqFkeooEZoK5hi2gZxUrNZLojKnliBi1nRc2W4OJynIbeghVc1RNE8AdPjEvH3n2/Caft6F/dHp3CvZRfAM7O1Vy5qSRDodI3vzsq/mnCZ5L3Wi/avIl2+5Y6nYt2/R1MU7HPfuKsOIvGookU6BL5Qwb5yXE7RShnjeZSWOOU7DbOZJeSiKrJsXTS6Is5jSBpNKhOCm/SH72g3vYTwSqf2AJIYgTEKG2vgvC+NZKXu4y/Mv24buMHUOAEbGkuoNM50eoWHai6lnxEJ7yACwAz393f47tPTctjGgGOlWdkw3MwwcR9HJThe3qYQbd3H9uXzM97e3vD69orjKGJm+ipZL+jSDdEi5oE4znyLBDt4/L10W4RHadqM8CbmQZ2ZTKBsWI8KqwU3NiYVZjGg3ZdoaJcwe7RnQ1VojDr3pylwE5BLsWSfbQUkhm90Q4+cVxvrvbG1vBxjuceGAR3qzYEySEbs3cO6wsHMcl+iRhRpgFrSwWrPZJCLKxMRkl3OWA7xgGPd2+PHWrUYJsa/0t6BOpadjFXztQxg3lPXDSiH4+52WycPkk94Ah/TUDNqqWUjcMNenPhB+1dYVEjU3yUuahrwooWuaoQMye/rxmr6B5Z0z0654zjecOShM7c8eEiRbX1kGcG2dsJizFukGKejhv4Y5/bqWxsda5zpkAPTj3Wq5P0d7sYtx+/xS+65Q9FmCI7MOfNagU20o6tc4S367vqo6Q1eXiwReguZOKUU9w1A8RVnSHiG16fwpF3ZY6wObtceq++lyqptOmqpjSiejP5nwKOCYT0bkfoyNUTQS1ZHkzocY8clhA5R3Vzpp8j6sj9oyLzFWjy9uRFpwJTZ6cj90gFPURP7ZYGpgUHNYeIEVmMeOLACUtscCNO3IED+wvo7f7tEThnLoF2X/Ywnj+1KWWJgQ+kG0Uf1TQbRUXbIbp9xQjTNru7GPK/7OGpOF+6wdu32prTZfQN+8fGX+MXDL/HHP/wT/N7L7wZWrzrOW44s50S5fddXujrYbNX2zpZieiyt0dImh8GmZWBg2MhsgrY1Q2hxQZwVLBhLGTr7F/N3YOGD03ESXgbCTuGVJEn/M3pC8SJ0zmqC/S/+3BMeSZ9kY0y4jdSxNa/vP9+E00aD7UpJXn92UcE7TxGn13ZmwgLDc9VXfXnvkyqokSYK5fslFDD8mlf4rRoH+bOMhqtWNJKpjFMY6dnHxXRDcbE7YHx3J4uAVxqDJfsxLC5veyqsbIMMH33NTf1ACMdZjFw6ot3RSLJu7i61d364aVZTM6ShKpf9tFo9qyFVf1zukmJUFbJHrVkQYqBdTGApi1AwA7j/cI+PT094fLjL8mbAuBVN77Bw6Qgv1+ARMX/Dcxw6Mo8j0iW9OemDDgGA4aRrjF95AyuKVSMQIM+xEQjPsqrvkJC6r+1CD2xlhe7GlgBdIUnjxqoMNnlQDKITxNrYhbV9je/oWEopNxpEOxUpFzBK1dCIiWbogNcwrEM7xuiXpu79ZuRwJv87btz/kPsuNP0RKWMpRaGAuLcsjQ4M2LiV8RYOU/ZjWB1lrRPUbYiCCwfcj+SprJaKvI8yDVXiyAkTUrnHWKlXMdb+rByJZ5pgCW+NFaZZArLyHg11/JXPdaC8aBojB5X3HNYlYCCbxeJ3kut2f5f81NKGYXVoijiZ2SOyeXPYYjXsJnSOeebuChrDeXBA1QiztffN4qoAn74MI7Nywi3CV4Z1pYjIF+sZDIDMOEEy9D5n0TDaMf856qajBEO2KyHSEaEDzaBGYq1dY6scbz954uD0FFWLi8xtynzLvBjpl/SsYFytwMuYSlHIdzKm/bMsUkEtmZ0cUuJvfKCrMOpsBBcVJgHll6D6LKos2yiniRUV6ht4B5joWN9XUV2GL/RDdvpC+BlQVn1cbiz7vA0wB0Vcm94DYWsYEqDY2uCfpvvaiLUuNg4bbHzGz8r5dOHHyi6hvcXqh3Sj05F4scRNRr/xc6GCpPoGb867iX/7s/8L4/sbfvfl50l9OLOSyoshxJO3W+V7Y0Wxk4j1U52BS7uq7TFf4xtmcUjMeqfS/Xv902ScgzIR+mF7mQ5yBs5ELxxw8NLlFYCOsTOAs1pD6W2I31eWjdqA3Sa/wGsYeJ3OMqu71rt6vg2nLR4FjXreG8RvHtwVQu+Oy1dLZxNNbV8Kyv6cPWpsoCNdlDZ6mlShSXMGAvA8gYP41FcudBwundZx6OdseTSDrg+VQNWxr9crw0n/ro97U1Wt4DJy9JSxlfohxTkmKch6CDJLCMoYzXRNKV9jMtzSQN/mzKQ+lk1pd5kh0pkGXwe/1lFvJGqP6LLs6K7exwAenj7gu6enOCGy6tqyt/PI6nV89eqx+7pq4PX1DcdxpGKj8aU9Iy248X87RGwD0E2xpCLbB9V5pjt5TDMA6vJUKsJy5JIH35FDC9qxtRYcANaYlfipXMcFNli+XwZIJpDUaJqRWH23jLeZrApo1F1oYpaGa5O7kPe1aiQpjOnQxelsQRMqJa4UlxkmWqQzWjhdZXDebjfMSLFcaX8TZjes7eGI1RwL8ggQBS3WQNe8FTbFwHjnjRoVVJC+9ctU4QG5L0z41CJJpvbVhbL3tZ2dnlKyLw18oQpr4j6nHEaxwMYbxGNrKZjLaLYTjWvlo6fe0sFQJ4Cn4HJOGSAgvhSm0UGn69MlcmHChHu/3w8oPkwp3McW1WUwB+v34zhwdxcpsiJEwypFcRWXUWbggp+Thy3+X0GFHSmQzWwTBsuTP7nCr6tnLF5VdYeh5rbjRMGLyLXI29a7rIPYlZ+nqO08vZVtTtjq+36VRvr5wmcwvHNRaqINdrKxL0xXE/EDCU1MIQhxXuGyqpkr+p6pkFzZByBZ3JmslmNJcuy3apvYORfTt3jexdgmb+yOsYfIW9LFzNaeTo0Q7c8WHOFnbmiHsVC8k2oy2UnHPJjKs0+7w00OIxwqdklLbVyJoZdvIL89h6YpZ2urxL/97t9hfv+G3/vyDxfPu5ziKL1ufEvwFIjRMZXNqzjTn6/a3d7rmAaeZ4JhwJGB6cKdJa9DtsGUIWeLeVNuTr1S0wIVxlhplLKtZtbLKUNR0dwCMBs7V7BBFYnoSJYpZ9nwHu34fBtO21fm8eqhYJdQXA3Szzhp9d258c7+guPxgRf4XQDKub6Ltq+AyH3rl06oy0f7GMm0nv11eb2AhIwO7ALIjywZySPaqAAkisCZuAUQCKlE+iwIGOf4THjVUJHodx7O8dfeOZMDNK4zFWEqyF8ILUSZQRS1vGM14hRwPkNL0GDxVjx/oxOzcVqWXW85ylGTloKkwwaenj7g46ePuN1uCVa9NQ/j7twHGl1fvjzj9eX1pBWTTRhYDJ6q+PE2a46ieeMYMdRd0zs7fWmkZKriWY9fxjoAplBuK6Oc03dOPKNR5rA6PIE1eqwgiELKVhkgGUsxqEJmH3a5y3RJcRCbuJ/Ua3CGldPGZm5xYXnU3Pq8c8vErBROYEU6Rbm0FWfTPlEuLUFu3G45vx0LSBnP+6zGuKUSL1T1NPCm1kDnY2gUOr4bhmM6bE7YGCtVJiOxRabOjsFJrrSPgEVOTDCXFQU9y5RJ1Mym6Ns6wEVAxHvzHVdWqiBXHstgCknyfiXD6sPE9LVGebuNNNq9/3NuS/7OtYcmYBE6kADfqnvWuwrXUu7Uii26zqMuA86FB+HXxRN0zbzJ0TJMR8d/RxzUUUd+FxqzdWt7thikZBCR9JQwXQ/M0R5wB1NAc88lENef1Dg55LyTTcwEnu4JiyPcURdb32QPU6YkNjoKzqs8oXI8FEc7QlU50nT5SRWkQOrzerv71qWrEPi0uIFYnedBJA+kYWqthgo2YOFtHupCJ8pnHqqY8GE1s40GyQNRf4+qgtLmKUt8DxxATVC0LTnAleqfCiUwjrZOV8PZJT0gpvB9SyaUOrV/iRBtKF3fVHAG5Sg3W7NjEvH2FFRic2CTQkvVOBbEGMD/+enP8Td3v8LvPf8ufv76O9h1cx9jYWEFVd97u1o+oxX7sUlFzHGteq/5I/2XvEXowqqOMQyI4/VT52sAzKpOvRor+WZ9AU97wcBwILDkmzoSkWmQVyUk02hGioVMhHwG03jSlnZgv2BoBUC5itdXJvfnm3DaqFTsYn5zSbl9xlJaQ9FRDZAmhA6YdUYr2u9OnjLe9j7f2AR9D0q8w6+lBMzqyNn+wrmiUGYQ5ajOayrkLH0hzbYDOHH0vAJwJY4U+NRjXisNuW9J6n1PpnX9JOlRpS7pFlNUjK5t2d6egpTlmLSJ/cnDA7SPqqs3q6bmfp7GmcAa7QlMb4ojPtPoY1PZfTTrwJEP+PjxCbfb3brglNFQAMCBw2utrd1f7AfmPPD2duDz8wveXt4abaibmNpWKa3VP9PlTpEtghX5scmFAenkeOpmIaYFtZGKMQMI0Y+dsZQ91Pwu+8rrpOasc0vRIAdaV6RDAHPtv9gmITvl7fOUd0cZUPmZQ3P6S5HOMnJBpbi+P3j5Lwecxo72H3kSZBqsvlYLIf3OI/RlLsUOSkeoHB7FWJlzhDJlvQ1ieMcVBUxcnzi/2TgNyVuWe/7WZ1YYg4pMc59XDA+a2mVjJB0jNyZkjkqb2rkLZK7CNwwPZys7iKxDMVbIUbNpVhcxA2vAPnJOOQAGyFomBmIVPKDvdncXfVLQKmFWjDqBz46dY8APzysEyhmkcbGDvTC6gJ838nGDvwtOBM9b8L+LXRR8152R7pDpODyMHjMxVd0zGm3raFS5GxCgE7hOFuZKKccqDhuQh0CY9GmG0VRBHeF9wSeOY6BW92CyyuhskcCvOrsk/zRnF4cP5OEXl3rU5YdJcc+LhxfWktrSsthAqmWUF7Hxe3aTnxvi+hDyYIIIWu16WvUEzCJYkMCDgplZV62kjJDmxiBbzKmSa+voIqcSzuXgv1RKbfTSyxyLBa0Wf87qc3mAyiLt4Uzb9pPf1NTuBSc89pFqXco/K6U8sFLeS9uXU+LLGSF71WJDCO3N8TdPv8av7r/Hf/79DZ9eP51oon+knoqhvOuzCUl4EjDrPe/h7q1VCn/JvfJW7f1aBDymw24OHyNX5VrnJ8AvSD8eMtb2vYbi5Z2oXN2evvZSr3bHOqhtzutAOXGamGiGI7+uttw9t5N4dHG4M38FG+VPzzfhtPHpDkWBxtlx68LFd1hHV4fy6oWOO7fbP2fR9rlW+TX6Ku63iiyNpX0lIWtnJC0kJKPXm46tsuuXSWky3n/l0Gh8laGxXkb2FKDeI+urn0vyl2ITcH6HplVRqGink6ARlSo8LgIMms66N5MsHnSmoJzmRZWnS6qJ1LMOExFB8gXi48R31nRNOvFmaRz3ussESNqqgWJx/G3oAk0PyFSdME4fPz7i49PTisSj5pR3lRxycsg6zGNFbqbHoSPPL/jy+bkMdkBWAkLx54A68y+QK94xJnxHv9UoqCiXVkFnwJMqajGv8e8O67aquxneBEDVnRh1+iAcrZyjVn30MRo9jvV9ndgPOmBMP6yxKf96KEWNblKJWrZe3GVZR8re1Phe5fGbvA9EtDCr8jowgYb1iS6WdF48Qd4SdeOeq1l0UmkgcTWiVo3CKCRtJRKdrIPiExq563wvB+aIu75qlaQcpgiUpGM4ejpujC8oDq70F9HTKtmwIvAm6JKrvkKvXKnhnqYN9XmNQ0kkdVJh+ZicCwdzEkSdATBM3jykwL/pJXXeLQtLZgl1QmKRCpoBtq51OJzBgEXPtzlxZxGYMHmfPNG4F1Wnezuym31iGv1+uAv5K/8OvkuAl1SVkVizaBJxlihp6yLw48AxDwxu2CfNOAdzOZEjjPs5rQ7DguAQZZYbJ1MX0ZAraq9fikejgvo55NiszGqgVuy8w4AK9W2PAC4X0Kz0m7vXxWkOsX9K/5xskxxvn0PSiEEqQ2ECdepQfU754ol+u052oA5dsRxP2tqeHFD4l+OOHgvfJ1mt+kTatzb5TgFAPU2Yo0hOlSprvpJCL5igj53+FZCpO9La3BCLuozwh1s4+vlBaDfvRM6VJamX/MFj7EX6AAAgAElEQVS6pyPnrnjQ4bwjk/qD/JPt9Ye0Pm4T/+N3fwqbA//V9/8F7o4HeUfJXPo6hKbgSPin9C0psdsFndrrs6DOyaC2VqalnNLwO9aptnn4VtqKMSdapU8cVk7bqS98N65NMvd1BdJtrc7fRtwdN0uuES5YYoAe4hDjF0sGQF+rXQG/vQPvP9+E06ZkK77XiH2B6/Y2VJgu6xY+2z+/+qzXLb/JJCREfK18fnfBoHCUc9Er0ajEii4S1LS9At+VlkAFFHWMkVGXVAQXtFo4uQR7zpmMfPk4SwsIu1Z0XYzjSJCMskP3z4DzIc5lYeqJRuzPNkAWS0PjEuyNEY3q8ulo4xwso+SL+GvIS0mLTbL4M08SaFudm7CmU8Tu66W58U870MgdGIb7uzs8Pj3i6fHDsnvmxJHyccCwIukTZQCrBT3psH35guPohKO6ZuRd6VLcriQOw5qrTUELy7HV/JV+9UavYIIYYgVkrLWDc2pNOqVOwtfnGeSwrDP5a3f4ojyVmQHtVGO216WF5tDO6J48Xas3HBvpFZQ84VdVkXenUUbyywD5uNun2qSRkxcwpnHmkVYIcCWl5tqGiTx6HGqxUt7Yv8Rgwzqy3ekscjfdFuWl7KWzISuHQAYzeKok2+hPjAXLUC+5v8B76gIXbAjncP3fy7lVk2Lmb4V9OiU55xaKv9on21IhqzPNo8+bYQ3lB+TvqegnecKqco5SAlQyhJOqK7YmXYBjrqiyjYFb6s2gwTFxuMOHY9xuWZ4n1vEUwKQrdRQEp7H2tVYin0MdANNuhpJU316PQO8UljEK6rD/Y9zOkjelZw7aTjUPlCOr+TQoH7IfTPesVVtt36OiUefNp7zDPfeYLv/R0xhUvXWSmRpFUo37NFmMwbhNzGKhv/he20iyS/UTVYdcAZ04JxxbmSxyWA4rSlzwiGpl29YyEvT0aA0aMWV5fypgoNjpwTtLPhhsWzqY76qU1Nw0lKZONvI0QKOe1+s4IGfMW1WjdZOleJ+YvqFOHzE/ZaT6qEe5e1IdougKPxq9LSUNTK0eJu21QMB6NDB9ZT7RRsjvhsGH43/67n/HH335R/ju9RMe5oPATdDcPQMWyW2ck6nj6kGI91fYonaeRDr7nKatqe8SfZuzbiujIE823sAyfx3ZhqNkm0Nkz8d0vEX2mdkBmzzNN6Q97QVef6J7eo3xlqx7yD1+J3uW9oo7DPt9f+fnm3Dazk8x7vV3X/v7pz6hniXq1aMKifXxtWex39jiV15Y9Y4GRKtaa/s7eApTCiU2Bo0umTC3Rno0VQNUGFiH45ycYlQz+5gJkjqk5HF5NxXdJptqqFKYysBHo3/1FqFsEKtF8Z4KYk1HgFkpIdv7a8BpHx1xWHWf9/YdctqU1YrJ7uQkvVokWIwwZIXLtrC+osRpIn2YMWsAHj484NPTB9w/PACwOJ0P2UYa2jmsmXPpAHxOvL2+4fXlFfPQyRFYFQOD860qsOmxkyEXSpWU474G6jqH0I2mgTfg0jkQ1QkhvTAa35EzmzbjuOlwh3ReBiQzU8tLXLkqXqQyd0OuNvSIIR0TNQgcXH6cPmO+tRMhm6SREYu2ecFSZrPtFakBpZFGhgUNyZmReot8/zU26cEufz7X/aa68UyVrTj0aWR5cYEjFLrMPedOHV1jXVmtroLF3jtI3/apUnxSSAyByeh04majelF2dRhcraOpkcaMGk1ZGn1VqfFw8A6PjLf9NQY3aj4D9XMwhoqidzAWc1wxW+pdAb6Qq1EulZnFBdkHpjNNcjn6XEFlLU1tmQlfVsYGLE6SjIGJpCjbFDrEHK4VMAYYutwmUAjvZXph7PPb2ADloF9Ohw5EyZZ/s7LNPYYeROTsP+VVVsPYftMZIeekjY2RnVJ29Jq89bfJMpXynNe8rG4UoDVDNos62iRa+5F0KJ0e7VEy1LnZiSuVZoqzu9ChAM0NdSmpFYdnmnbTC5wTDeAGT2dfevtK947vnkqkHMGQoebYWOpJDTXICLahR8COWJEpf2cSJUfTeCcPtH5StlcqJMdEMlq7S6/kaPVhrbq1G6Nq9MJWQl3hp/w0D2YKKoRD+eXuGf/bx/8Dv/3yMzy8PeCP5x9FzqAUVjtzC05Un0zOa7AcA0B+2ygs46QT3CFcbYayXSkX2bVcoS6HX3UNtq5qeu/Sr+tQnQMW2UTRzvTMvmTwRWXQlM/lX7JkswA0pUz1T9pWX3++Cadt7yQZd3eicBrSDuUXdV98teqSCOTFS03OHaeUc5W/nB+VGmHQeqlWQhoDGdJhY3+6UaW0UAdsGWce1lge0V5eV4K0jpORQDqGLUVy1mAS3KPc2CJMneyVS19mAFLxwH2lnRGM1PlAMba3asVQMgIthdpa+1tXVJNKhFBq9W6kiE8OndS1Z1eMi6jffer0ySXCaJGlNAB2BUriwPJ4WWohg60Vtod1B9vd7S75YMEK98p4KIeg3XyFx4Wj0yfe3g68vLzi7e3A27GiuOkYKTCm4q75UZqWAeyNx1W54mo+RFdlGs21XbE5Q13PTZeUtvaQ2zyUqUwd7Q65d6UmT8ZGOd2DB9GB2odiyd+sz0V+vIrWPNrA8CEyzLRgg6ZDJRY1hqqWVNW1kTPaveFkv8OQSqnays6yLJVQ0lywSFKh0rhwRjStygLCF9LJoK3HSn69ayJODI64HO/eZbZjYFTujjwZMtJ1K2XFZNzVlnAc6DzWXdJ1B15P07KU776/TeQckUaTvCBOuMyFWZzOmwyjy8ky5vjI0+En73AFg92jsxYGoOiGAcPTd4+4u7vhzYEvX77g9fk1y9jhsGnw26jVM5WhpFcZidxHmwZ/08u14sbBDJMA44h9j255bUYCUGaL9KDejp/7UzB/1feYo8BTymuexJncUI607jVBjBVCb304PyMmpOrLyWntLAzp8uj5Xk06dSEA6GXL+q+HTJmh9hpzruq1rcOWTpnKEnlnjBhoOhMu5WpeysQxqL3R2xWkFFwsh0/KWfSajE3h1xNYc35rFYcpdeurAC9nSFUESfR2mx7pZRr4O1aS3jwlNwZp0rfkb61xp41pnRySSYBG4N/zn8TVxM7FdOvgjE3p1OKnaIzEbpftBzpF3UIFABsDf/vhV/D7idfxiv/s+z+J8VlfZRTnjfVdBa1bkyf5RNlzMfimCxFBpupww55zM6Jn2TWu0p6bbuUSq/nJlHXpo68QL3lQ54v/FeYl77uDKeoA0ibJERkdxvj7YguHPt+E0/bTHwL5+ovOlz7XE7nV4tdl33sXWAybABsgo8ZJa1MVcKvHy6iq15DKTsFNCvfxLKVTRqUFo5NJm/h1udY+7zTcSZFKyPKdXUZ1DBxf3o0Fwlm8ZXkj0AJsQ7tPUyO9bQ69NmoWgCLAugMGHz39csh3dSy3psRsIGORKkTlh8xkyr4niCLlVAwGVeDl+K6p4acoQ6DNB/vtePr4hI9Pj+syWgA3ON4ccMjhBl40nnmB5erMdMPzyxuev7wWPYLmbEkOqao+0nYQfUn6FBtZ9l3poL/vMuGogID4t6mMfX9Z6G/5jvZrm3tvv0S5/QIEKWKizEmBptuFj63odZ2v3mpe/+qq1nS4efJ/IbwHXbkKQwdCQV8MgY1EKy13Yt2fpmhCnvQ2xLYKKUoypFy65eIo00QqxuFfM4zGMW7Jh+laem+/7y2M96bnRnWX1U6GSdZeu+CbOSNCWXRbgS7eGxete/WzU6uwJrFK3m8XNItUNOVrNXZYvZeKNwiYsyftLD6qVTg6Y2XE81QyHhojrVtW1h6urOml8w7A3w4YgDmAt9c33N/f4bc+PuFn333E85cv+P77H/H88gYcE29z4uYO3G7tIvlyHNbYjzjWfdEqTvQUc2/GXKVhL7ySVEi9JnJKXqxoEpJpgOS/XHHq3LjaMqH5RM6DMkHuPeVsmLZCR9oa3U1z/vI3HVH81ECEthn9WyeJIu/Xq3vUqlZD6aLNPBSKFFZcK5BtUBvLmOTUMVBixn3wRf5y2HqgjHyeByglMPIFWU8YlEMKVenurEz3frCihhmXbJ/9ajCGmrfEYME4Ryi/BAHVfNGmDdlvroRMwtRxUtbf4SLDurwZ/cqBMiIBlB40rNMJk59DwXEeyjk90yEd9UbXksfdZtSfTVNoWoR+jBtwu+FXt1/jv3n47/GHP/4Bfv/ld3HndwuHN2MwOV+mdG7yUP00+duzvyN1xJ4pZVt5Las8KnNnQAsmKA63usnvQE/dVbqcmXDZ60cbC3XckV2o+efprACQp5Ok3OyVn5przzfntO1OwfpMFP5vGFAGaS7e+ykO3V5X/q7lL/i8NQIdx/biRZ3VVuzpkIiIbrw8KzOkvlugEgrOxHjJNno/SvVZr14Ua3c63xswjRgBfopJSDHryRPSgqFzfwhpJnWmMFmAVzoePZLBjutcpR2AuhaTh6wozXXQCXdeAJyD0Ena5e0idNS+a1EjS4Mmo4tpEK5St9vA09Mjvvv0qa1sppwbeoTOJWVOuvn2+oq31zcAnumWba9M0J3dL8UxZWWtq0K2vz9kv+rg+tS2ax1qOGIctja8+qXaGJw3cSJUH76j2FMBpEKrjqyVSW80YzELZaYBm73qq8+UJxlM6Zu1VQlY+y1Xf5U/hN/2ckoch+eC0zo5svaOZJqqxco6o5A7bdxzHxygfDZldvhusTWPRFeDH4Ac9pA9bEqscK72r/F4ZqbvOdaq8hgDBzRIsyg2eRGze62ExffaZ/IngwVqGtQqVsjjlFSV4GMe515O2SxHMXWKVTCFvUwcdnlRV3q4KmIYGBhjwg+sCO+tvH4XR46d52oZgEzpzfuExsLIMQaO48APP3zG6+uBDx/u8fjhAz48fsTzyzM+//A9fvzxFfM4gOOA+cgj7Jf4Bw1j71heL2IisfNIUDj7y6Syfm7tJe4zSmfOyWKMmBtqVgHYiMNIPctzXnQ1Pec99bHlWNgeea1wQM1+WeGEzjN7IquxKJ7RqOj6Lfh5jmI84TklVTcylQdXsRm6KYVPVZP0uzCtaOgxQVZfh2zHOEeVyVHue7fqC7ADVgbIhk6i2zYsT5V5U91YLTAwuhkgkrq+JqNO8Ou4YPJ7VRuyN4OGRHAGBeKdVXpudRQfM7PZhPj8dwwJyJOfmu4PyNBAZei0ZRd46uLUJxk8I2FCHuQo+6zeHfN4i+wrIasceEV6GBzTxNYUPTgyTRbAzTGH48/u/gJ/Zn+Bf/Xrf4Hvvnx3mh/VwzXnpf9ddLtySm4xyEBPlMvIrslUBvE3W7T0wnbyc00E6r7A0m+k9Xt7LTfmLHtA5Auil2quOY81RtXr71T/k59vzmkDdPAKMO+8aBefb4/gaGPQv9NzZbXmx1ffvT8z5wiBlBKFsN4lc2t5ASaWMd1TsTO31q/tmESQ7PTuXhVXBXtOL8oee2fM3QnudeRIvKvOPoA+l80AC1owmJb4BmQ0u29olXisKhYU7Vsv0jArxXKa7Use1RZLcGvldn03DLi7u+Hp6RGPj0/vBC/675XuQyW0ovUvLy/48fOXZYzxvqQ8kc/aCht7JWRdfXSAqXGFnSZAV1QM9dz4pHFm1+aNKGmYbcq7IsqrprHf6p3fzVYKXinjmuKmunPwOwtAzQuoix7Fhbp/oPpZPLe1RaqIHDLyqtWkXIthpDe3lCwgHTgq96bQcoJWSpPOizIMU6HXieizjDfKmzsw5yYji3ZN1vg7Vw1AxS8zbppqicCkqI57XYJmNGKIBzqfZj0tcX8U/mjo7Y5dnxsxFNpnlUXQg8PkiBxpGEfiFPN/pLttZamwWb8IRaJMEGpB/DrpdczuCGfbswewOD5mFHDe6XQDayXu5fkVry+v+PLjZ9w93OP+/g6fPv0W7u6f8fnHzzhe3zDnWnkbvCfOyPIe91362s+WvG2ArGZ3+AuqiXHX9BJ5mTotDsTBnGFMFkJvdh6AOO3R1XDtAaLkx9SJ69MxgtcmAOPeQpf+IBlB96n25z2druBoSZVFmUgLFcWh2Jd4lHgsl/tyfEG7ndcrGBd18OTHNBYD3ajXPDDtdDSkjOMdO6cITAyS4JbI8cJVvnuhK/lF+71jaFfAWt/2XnvKoax+JgWlvzpGS513oayAOI3YQ3lkGKKxAbdtkPd6Sl/2IWWUdgqdFd/e1b6pfqxVUqhOD+ywMZoDUtMkmDGs7BdHpXyWWJYuj7vSbCX84E+/+7f4x/aP8OnlET87fnYhGzVsEwfrvMZ1pnPbVsJTnCHZEAjn8yqBxhA0UZy5klQXvbfqXvbCRZBMK39X5nvdV3/vJMoDtxRHpZmr7Vr6fJNOG593nZpmVNv2bilwOIpB9+//zn3h3/ygM2JtZL6eMAKYeuc979n7aWQpdbuhw8/6Xhckw444fQotItVS0wLI2Yzgur7R6s59a/LFSUA2MONPNVa1XAFHawpmXDUs1K/DhcIIDYJeKYWd+fMdq3S0fmCBZ/e6kajao5yUU464GH9psAAn3tFBLhZ1fHh8xIcP9/jw8IC73KLTeeg26IDVqiESYBeff3l5xvPnz5hHRfR3gBSVhjJ2vMlTOa7d4J2ku+seymqhq5soA8/UKG8tF526k+GymtqpX70Uw6RxdMlV/hLzTHrpRvvaA5WdyQEUXVVJc05HB1wxVs6ZAaUh1ztx4pTgW0uD5JyMAV62l6uM6gDMUtpXm6wBGsU14xmJJ38ZzZyeknKTtppuFHqo0uGqV65tm6YJkkFEUGm8qKHZZHzVu1Z3lpIbZkljTioVIPfMjU0ml/kkeyYgvGFLgSblOX8gn4lu2YS4VrsXAs/ofzuynvdYxZMrgyolHig2whlKuk8M3HLONH2+9odussSLokVf1JjXitLL4Xh5fcMYA7e7exgm7u9uGGPg9eXA8fa2TpmM+tZBCoF3AG5jrACKF71mGjlF2H0OkmY0naXvrafFnj0DenNQ9jRLn3tQrlEGVDK1gseVr5lGrQ1btrbq721/CXmhqZawdHPlxKm32LJd9q0SHT15FxCsYi9q4pMGpNveDZYpw9XB1Wwtq5g6RZ6bXdN0Y39kTZ7KGtlVT+ZIpzTnwYhuCiwVENHh1gA3XZyfqX5DzoO0FjrW6307G/SdVy/Gy4wRr7fKLtP3pGJi/Lk2COUEt4s+Tb6Dnm7YeKp0FtXWglKD3W6dNHbRj3fm1VEnUxpcth6HXTsc//d3f47Htw94evsl/Jj4T3/4kxrpdbXRZPHIZrVey+4Eur3gedhVzbVHuibK1+XcpI45t5N86aHHA5nqnjhvZdQRrM9375HlNr5Mm0Fd1woepI2g/PWV55t22rrNLYRJ2nQDqT0X405xXpe6/LQO5Ho4Da29XJ8gpiTg4mhYOlqW5RQsq0mZ1tV8HlurS+naujUghzBHY3rPnVwCjEw5I4CsL9j9VYc1IXOoMCxlmAJiSWXaY8RvoRLadLJYHaRU40l90DD+DLfeXtiArXWolEypTI4nQD0ja72dlr6wqNhoq0/qLqgxH9/NAoBhho9PH/Dpu0+45emEB6bfVAcBWBdJFs31aOpFrx8/f8HnHz8LiTxfb7ozXuiGwU6LUi0VIPAMGuiEcsZXdlbxk7ah9elfZeAWx5bRDylhSa88JU+VUdDYyY/SL10+6Xwr9SuBRDY6COvn1XCJjUT+fSvnaHtKgOVofBXDlF8pgQ0nRAFh7CVjKGFAhlFbONOlmHeQsTkGk/ICYxGFAYurAwAaq1V2tZWrnQAqhVfpy8lTmgTNYm4X/PYrRz0vU/TIlolTCcF9SaF8Uc5BOfKbwabwMJfjNMbAnEfdOxVvc/Ui8Z20dKbqOQZGHoBosfS1VjXbDU3FUoJxw1b/j0Bonw7cumxZzjVKDoFMayWd1t6ySP2L+a8ZWHw+p8NfX5sDHr/gcM8rWcwnIPM7jLLF1aAp9RIbdO0+2SYJP1MLWfG/eroyMdQfdLY4isFDWaT2nBfOWRi5tdrH4Ihj617J2NoUxwmqfggWnNTPhv2LFMXxI8eI6vNWlt8vx72+tqTJ2re820SpW8LA17Lru76TlnTP1XUjrchDDBz0IfbxKa6Vcum2xq7zY66d91FaBDXYCjGuSrTB7srwCi8TV1x04/aC1ZaJG+JS9U1HnNoK4jUHIL9ex6P1TCXVIVsBRwZ7veG7NlS4niuzHq2wrGB7bkuT7mlwThpufeldTMTMOvKuuGQei0Aa8HL/iuf7F7gD/8OH/xk/f/5t/NPPf1j7a12r4h/9ZNbkN/RHHSddBCm7AiU3q6OgLBDHS52oNohaC+o2RxgwFwewegRstOmf/7RnNwlQPbqo9/3nm3PadvyrL8B5x8la2540Mq0b/6Ex3qHzScqivSGflcXk2k7rQ/y+HQyQSv6sLVrdTea9TgEjJK4ozLms1u1kVkfLJPAUAi0v+z9yY/CqZKKiLjyKurdTSjoNsyBMi+6L4ijsotBZApDuwC4qbWBDJWU9f9yz3gvaXMyPeYxJGixl5zkW0luGU/3bhLB4oiswgmeezBmYNm6Gxw+P+O63PuGml2dGhD2LZ/usk9taVyvHnHh5fsXnH790g5sGTE6pt88z5UPmVfWxKvnVKi+ZlsEH3Tx4rXwWl3pCkaaRATFAdW5KfUAUxuLb9d50y/u2+G8aGKkQMiEo37sMtsj8tf0z3vtkOdlSju179USTBNNhjw+YQli2WxjU0JW86GumuCiDlYxZTYwWWw7McchR43SuttX5NBRXtesQCUvjK9YjUECE4iW2awaCaV6Q7KuP9WcFAeac6XDfxq3RPJ3eaIOb81nHanIpXz0cwqMvtRpU3/QUSJVNAzHBNvoFbGamAz/kmZLSavxua39kM/DWCnxOT/L4podc5CuCDXZ3gx0H4I7DJyyOyDdbqdPulYWRqzJWznJ22JEOW1gyy1C0wujFo7V30GAYNwPshjGP5RO77Btyx+02cjy5Mu4eF1Oj5iNBodilzuU8wWbJzPZtzqfZ2jsXRqOlDCqdl9MxxXHTmVJeMHoLVryw6OFFo+0hja4fByJ1S6ZXJbf22KCwl3I0oPNSzKhrRAiqD1m99TnjFEnBb8Em/mZC4+5NrW95STr1sDqlV1stTPTjoqesChEb8h3LdpKZAyAXDUJ2t2hK4xHF2+raxRyQcKbJbjnDnEHSYjZ6LA6tQ88unl1dRf2jfgUnnxra9hSi7OIF/aI/bYx68gnnfXOalxMaunjTcy0AG3IxPQ4um1MObQu5NitdAFTQiz+tpG45+I7X+wO/uPtr/OLjX8En8K+//5fwY2DOiQe/x81vCoGihy7I+S6he2ZBBq/in7ZVQY5PXuP1pDGnelJHGDBi//DIvi1SMxho5NXtIZZX095/ig5K3O81QDnZcxzv8F8835zT9u7TnJX+7AGZBjCK1wIq7wVuKvqDvO8GmyB0njozVxp5LryaRuFpYADIFEY9ktXSSK5l3VVG92lYSPfOWJlmRDGznmZTAFkNcvnZUyj2PvdUjwTmfvFK6PYl/NqrHEeRSPpyFleNTuzRv3Jmgiatt9Yq1AikdtKEGMwdV2Wb8yidaqZF/GI0VuIN03fivbzU1Qy3MfDw4R4fPz6ucdgIw7QDkMHlMkiAeezLiZ6Yx4Hnl1c8P3+JO/jEmJORVVpd5+lyAhSQaxIcRTs6HWsurM2dGt1CXp0CSDNIA0MsHQ0mpBEaBdWQQP/qgkPrt+L92oyddyCSwsID1vhVma/92Hik2tuP9M4qnK4kaUPjjYa5gncZOU2hk5BpXBbPG5BBF16nYfCKbOg+BmknP0tmRtF7hzbJ/cvUTBTOwC0Pz6EBPmzkCZHAwjMT4pUy8xXlBOo4eB3zGk3tWBGGmLmKaGn02Nb3om8hTLXNVTPH9CPS5Gp87dTbVVka3iY80pxv2cOovM/ZSpPDc6ZhAO5uN7y+vS3n8ZiwYesEWcVtK+VOXknHW+XYsYx6GXl1aiOQS73h9A/HWnUDoXxs2JSdwYBh4kBf7Sq6D9hawXRUUMEczahEBZXcAGupiRe6s+F30D/Kj9stjS5dj2vG3nZYXOFVueihEupUQMh8skw6erIygk73XZQ0GJiosEURGNAh/vJ0vXQ6x5A93Nz3K4C56zwvOnY7SZQYsAJkXgE9bNhEnZ++L1CZEoHpiuU7eHLLw6qnLnT/KY/JLx3LNo0rRM/0fHbC6l0Xemv9ah/uuLlpdiGjZ5tjfwedhkny7OYFf9NwgfIky2j9PL31vT2KDXHCMRtA2BzUv6a0AzKYoOPPFoNAGYg0IO5Hwr/57T9dsz4n/uD59/Hx9WM6Up9eP+JpPqKC+udx9zRCkdl3HKe2kkbnbKghX23ME17EZ3E4mfoZ6uxfMeg7vb989/2ndDx58dJNkOfvj9OGhh1NoM6PX3+uDsPFd++IzSUh3ydsGQRpTLU+f31CW7oKFWBDJzLpXi42NwIywFSFSO2zDSIVXbRJJST2Wym9+L6PnQJFp0TKCbCWUkYanK2eAFIeNU5Aa0q20WjXSGynnOPQPaBRVsBXzNMFr9Cp7hKyE5N1xXHFCLLCqzxnADBwdxt4fHzA4+MH3G4K/qNdV2CYOAJUfB4NmI458fb2hucvL3h9fU3DtdOixqj02sEmI5BNty7joFNfeWgBXuofq8Vlnbe9PxesU38KqTPwoc0ZypAz5fXqOFf1TE/VbCqH8un59x7oUa7VNLQ1/tpH1t+saHlVR6PSYMPhmaPfVNIZS5TdDC1FMRNraDzFvDZOtvKv8tNThgGVLpq8d51jqAE58sZCV7p6/uuI1LJomPuGBmqvVu6DvLyLpuRNU2Gyay4pO3y7vUQXSmbM9PMziZfRoQhQfdcUt4jZpqwopif5tnoWKWQFFzTmSDUTkAp+ug2MOdYqypyAG2Y4IeVKlHPgWNjvvhwsOq+Kg4t3vCLOgcN9fzNS+JK+kftCc3IAACAASURBVK3hAG63Wx5CUo/BrPiQEfkk0IY3ievx2qKNrnLFXKUxqNi4Ia2CMDxoUPpuitN9CnTyZ6SAep5K5zGX1mbZOYZdN7MqceIJVBaeboZqAr+KOy3nxeANT4q3NhnR4A1rGguTzKVe1jE9Vli1aIV5KvWOvCL8pYTehMcIMFfYBaz+ZJu7TjK5Q0dwmbgBoB+9L99XJSHynI+9A4LfhZjgQGp0tfqZhnleS9lxijqudYIkIwa1LtBpKLxXjC7HTRArowYiODKSzmfYpiaCV3qKb2Boj6eLrDJoYtrfalftvuRXpe/GM2vOfQWgHfCb4RdPf435+Fe5Ovvdy3f4+Pa4ZPQAfvvtt/Dz198+kc9PE17t7qtRa0SC1Ly+hbqX6bJGvldlGIGvg/ScaBN9paaI5Xb1guh3CbR9zRHjfP6UHVvA3zOnDSi+2j6FSvX1O7+5nivZy2/2pRo1Ji5Aoz7rqyfluOmqhZSMKBWNlr3ubfF863OtFv2mhwGF2jESIBXlGx5lS5ZgqZA7KRAJvAGCvhSiprlx3MMZ6Nw7G4pdgJoAp31yd7lipECvaGkXV3l5DWNVkv2m/DkKXpuiv3o2xZGxquiocGQasHcGPD19wIcPDxjjlip8d+gXTVea3Jwzv59z4u2Y8Am8vb3i9e0tVo4sFXVjLCu6JRg3JCmHlqCc73KFROi4H+oAEGxKAW86XtTlRr441cYB2SofdamicZ0fccqt2iuDqQqWzeBZNvmG80JOXsKAVNHCrxWwK2XflFco1MbK1JZWSoZKZIxO0zKeRA8m7k8RvdnwA9gvvCd/70ZQJ239vua8TtsMpZu8S0e0B4N0NZL7yFZ3R35+FdgpGdUVxPWfJi4Ve9pJLkomV4mDBn4cVV88AkhuqlxCvyGodeVuUIOBBp6BKeAmZT3GUzZED6cVT3Ja9hUInY+a09vdDW9HyAcdrkgvhdVpuDyef5GIl4wnBydOFy0ZFNMJUsncjBVi5BjLYWvvB11Nr2zYg3EVJHNgXUXAbwZXW5D80/Vm0VPN7tyft9MRWCtPOeYdt89YyMNrkt+D93Vv9eJq6sbrmHvumUueKzxPfA8D+Wp/HZ3EwjEkFgfZOqalvAlIWqFSwyVHo0hDb52nvFAbgFwEnDqVuiTockrf3pz56u0OakAdAnNBTJCMVNLIskWGDQ9k3PyeWRyZMm+2TijV1d3UFZ4nFU4r7G+pjcRzkpWwcDL+gmeJw5xj/h0DVAotmqiSVctIZNOkjMISRx7tZaA4bbGiK/WUbzTE1j9gscFe9mK6Wi9dPiBPDyBXYn94/BHfz+/Xuw78zduv8OfHX4bumfj56+/gj778gczqYjpxq6W9okmlOM/gz/VpZRY5ui0n/DhGXqStdmEF3H7zk/yw02VzMH/TI7cQvfv8vXPa9kc3l189zWlW4PvpLSxjzmoCz3b8iWW3v62YQD5VB0P7eLlK0WRMUt+IicmoNEBEGW39TjvcCzstQEpTQNoIpU9lHK0/aDjW6Wj1os+61Lrd+xINN+VmNV80FmXIzbxQPHcrJ1IN+UY/VfQOMeLW33q8cu3Vw+Wzq8YagcuYYvxiON8/3OG7pw+4u3uIQ0fQjJwelfGF3bbSo+aceH09cBwTb28HXl5ewgirMqShqjr203SuzTZ83tIRosIcXxAq9VZe/uXlbOX87GqVE42aoDRWNpqmvdII0anMgwoyiiX1JECrjq1WWpppdjo0335oFOm59aeidXQMyiiiEk3l6uSKkMlMHURnkobrIoANLDg+X8ZuGNNpxpAW7hixZ0y6tQyJpB21PT+jAUMHRCk0+kyZ1WZvbO/ycBJ1YNu532WI0UFMugLISbC1t+QmONUGhKWcuaHfYrXBU0l7GBgOOA8nWWVWW1yNqjlKwyqpJf1DM3WVFDWWLMV0Sgsfytq+sH3tjiDKFRczpsENHPMNgOF4O3C7M3hudQ3ngqjjAGzkPjfPtthPy5+UjxJhMknNzZzr8JEZPHa7u2XgLfnZHe6HnKCrSKLKBwocJ/rlGFBG1RjMONiRDKV3hN7J3zn2Pu72YZTg0S46x7ux4LkKIhiColOtdG+rgrHq13WmZZnkOQN4cFO2mQ4sy9VYZg6+aClw2gsA2I/0V1zPFfM0xIspOj5KnS0gUN9lECGVZuFK4nE7BCPoajEPDnArhX7fHwlBXAXIrp6GY/GR8QRthmNKTjI4bDVXbE7BmDCRMjhiZVBUncPzfnH2xaMyHstwRpWYk5r185CSuautkq+xldgUkgp/O3hFRueozI6hewMhdHb50ZRXYkztZ0akaHP1HLDbbbGeT8yHN/zob6vwdPzZw1/gz57+Eu6Oe7/Dv/71vyobN5q687vkhTUkz74seejZDYC1ftrN4Ae5fo3VbmO71T5mMWHhzL+N0pdOSKfN/l1l1b3zyjvPt+W0iYF7xbDbywVWXxl1Eb3ppZ/0pAOgm0HRYW0xvL8zNaHCwtCZ6WBWPd6Yob4DfNMhnOAu0DTwy2HT6tTI2cbUmwLTONSAUYUBK1Dr6i8EXMBO4l0JAKQDmy2DIj7ZQbgsGijFVUH3sVKB1u/6jmDqSVhanrN8kadnFqpnikuOP+2CHofNcYVj8/Bwj49Pj7i/v4tI46INL/7WhweWmB/Cs47bDbHC9rZOktR+x/Az0sReedFcI4+doBKdk+EpkOuAT86K0ogA7RTNTTZE8ffVWfRyjcq9M3U4hUhiZ6jTnTvF//W35srbCSToVKMuyN70Hx0PM2t3ua0Ajdx5piCkYwllmzzNSClZ2evd2gdU9TUR4Riznaq3pEGUUABAyrQB8ANqXLKrxRNeByrsQCrYWv2umbT+qhjEHdNIzxFjW2lBqEuRk8wDPGyp+KEkv9IvK/UN1eLaZ3cymha/ZqqxAFU/kKHwTkI/UV7nhILSwLfRmMaF3nZmxn1sa1+bu+M43gC7w20dDQm7VT25kptMifr8QuG14E4IHPGBexXd5zrR8jZqNZc6QEAiD22RVYiOu8Sy/mWTW0PKTOJHN8uCZwA6NukoBY/xNEnj3O6GffSF8QrWaaNWfRKZPDQh8aYTL2nLlbKWpaD0tvNgV/+WVI6oJ/nWkP3mKlrhr+ecGdvRIIjQ82tGYNs60A5L884qCvw8HTruyuSWiiorcuBZW9J8n+z81yCyH/pTgnINbHsV748PEgBwxEXzyN8NVnd+HhOzNbH4Y23NWvMyT10gCAk+uo5dDiZRS4W6V7DO8txZ8hrR9iyzZ2VcaMd2DHHqt7QOWPnu5Bd219l/FHCFjHOIDuqasuXO87BPEMC149VlSnT0avYxpLN/A3CL7CI3TLzhv/vwbzCnp030eDzgP/n8Jyv1N2q59zt8mk9gACtX41uPiteWw+baBfgR5UwXVwoY6HAikEn35NVqGlegsbUsn3j/bpfVn5Ii+W05bXzeEUwFFdKsGRAZUbzUVfJKKV4p9vUyIjDttXeZt75OcLftnYxK7OXLuHLvxNDVOjUKu6iXoFVTEZ1N3CjhWoLaYOmEl1k2CdIJtADDO+0LryKyIqAlEuXaF++wlZCUZOhtJxDw/bR4z/CWAwHh8ZzKZzI8RtKaiygGs9efaGtVaYSsn3f3d3h6ekiHbdAsidWBK77LSJWv1bS3SIP88vyC4ziSFBntAy9j7Wtd1is9M7sQoCl+LawOHfq7BOVFbzFeCHZqPEn7pbqQ+50AKsyYm6hX+VgjeFrP1PYA8NhnHSB/M/7TlKA8NIJDkZ6iaEL8kqVK9VL5W/2d3anju4xsuwO+p85Zu4g1ma2Nm0rRypYgU8T3Vl/keN1F7uUEMWidUoTBizz9NN5iPWkL+ESuzMlF34ssVFKS+GKl8EiDtaFeRhm4s06U9BxfrUjwX9axaN/S9XK8G3YIneoz4RORJa5Ou/d7L9F6gToYAoy0u6SwFia3wEMY8QHqKSbjdlt1HXH40LHoy8AJ5wCG5K+R1wt41xHRyXIK6sMxALOxZGgiU4XG3Yj0VzQQZb0Guc+PxNvmelfmysYphylPnO+ewpmBEYRBTpFIGkqFXod35SqOmcwU6VXOQz/hz0RhIfvDPnJvzgosjOS5SZqELs1h7fBixTUbPMk75JPtkl+vV9ku7+/an3Iq92dNvtZMXi96kwyFJao/idkoqIuxilykUbv+LiwVFaSp/KmENFy09TsnxKQImTkT5GRQu321nhnXcJSO9JQr1rfmuKfmZurr1kPqmT4NHSuBwr4K2gRNsr3zPJ4fBXMRSh1fXt6MnKMRacxTHMyKs3j+3ELdqxX3tjDX9SN1ONsSW0z+qTo2vdBGFf/ayD3ZaxodccAj3u7e8L/c/6+xCLJ6+HR8wO+//ENQlz74Pf7R6z+IPl7QVPR3tdFxOWADdeCa9lTQ04Lzkqlt2doo/f+1ufq7PN+M00YAf89xOr+fyL0+SPD7TcR5XzCKmUp4W5ELgL0WtI0VY/L373MVRICepZi+U5vFu/JNBXoi2AaC0d5uwMo3J3Jpyp4Oh+S9GnIz/E7GkQhlUxhqIFqNSdrs/VJFXSmN7Q0F8uiwF7GiewJoe0PRDQLp/p2dnNs0JaruqHYMx+PjIx6fHnF/d5dGqnntpYAfsHGDzyPvxmPXaCS/HhOvL294fV3/1arJGmTOuPK/Tod8zHcnIPx2kate+jrRnU7olXSxF3VRa4lncV0zVzrv0lDIP72mJ42nzsFu3qbQBA860DI1g8C8A68aKy5gbrDBRi2IRqOh5lssmhZISeOaTpWmK1HTZZ8d69hwSdsjAMTfpVrXM6xfDZDpx94NlbJFii9o0Krc7qtLq5smdUiqI6wipEsYu4EDF3hWAwZprC3arMM2KrBlqHvdipfYF+cHTIfU1nQ/ygVOr3TiKWmEceJa8pe0Z5DxhGPiG96nY7h6tyLE5eC1Ex2VI73wIk8b3XCVmHl3GziCZsQEM9TVN5wjQ+5xwyJP4gFb1/2P8zhwu7vD/Yd74Jg4psOPA0cUHKMfOkL+G4hVTK6U8kRJrpJJUIXtamisnHtreqaMVq6kDaWYCD0x35Nga2VNU8PEOE5x9PqsRiUYdw7gpbgY5331rwdDUcGy6IdGyznfdMSu0TMCgyErpVes6jCTK03KaV+8VKi5BjS6LJvJG6ILdyXPdxtwW/6uq83VPtvgu0IYdn6rc3LV3GrM9ZQcZ31XfdX3Q7F1nc35LEwqOF1XkNBnFCrncEmiDP7t/TR20QpzNmskNIj0uvgEQKV378X2cdL5zH8uCmUfgiSiOH0Cc0jgcNOOLKLonf3fdHLqcSvu7CYoHeXCtJQdcdxYF/leNYSf5sVyTMu5BtwmuFXg9faKf//hLyIACdzmDf/x9W+qxsDSf/j6c/z85XeQeyrNcl+rBjoXlkd7eh1GC0Sg1U/8qswykcmL5yf55+8834zT9p6zpkJ0+bl4Omq0XdVtZGZhErWx2jvaUIBCKsl2eeMJ6tvfuhGRvxUGKYPo+9HNSPMwItI2nlyKVTrIS2UA1pi6Y7o+91Zsi/h43zfFz/qI8uXmfF6BXPv8nTnnl7rKsI9jL6p4lODjivt1tPpl/+TRGTupCvlAo5l6ciawAPnp6QM+Pj3hdrvD3Vjfr+jQAORSWp/lgNGITgfJJ/yYeH058Pp2xFxHukAC53lMPBym2LbzkLXRhTGhCkuKJV1TfjqveXxOY6KAfX3fs4tLXnKO0Nn3fC89Vw2kCijOuuwXq1TckqumecQoiG/FvoA7DtmbaRpYQSmAxBsZkTq0y7cTmfMwLvPiQ6i9AT0NRKVK9+wueBOedGCMdX9XGllgJLn6XZVtFE3aM//Tsn19M42s3eCN/qm5rGj4XqBfA27B9YLzSzHup8hqxsEqb2mIqbFRcs/1Wt5FtjDAPY6xN71frnCGF+/SCNohzrHzFso8tD43mvo11eEVZVNGcPCvhK91j5ePShFiFY6Jh/t73N0G3g45AREGu0V/jM4kgEh1fLy/xzEnHu7vYGPg8+dnvL2+4nhbK/jrpMr9WZ8oPi1Zm53vaWDG7FRKvCUEeaxQaYDL4BLw0XfzF8C8pTc2Y174o1Yay4BMY1GK5Gw1u0AMtfh9TwH1AALlA49LyKfVHXrNQnCuENLhwXZCIhiLwAommGQgeONN48uIPWEyRySDBjetvSHXXCbp6ChDcF7S3FXvRjnFYWh9kkOe+lGVyeC4dw7TDslHX7MViulkfGfO5bgdHumDEpQTeyLT04X3agYDi6W9fqCVjkFWeHgWQsx9wzEdAsuHLHhgXDrM+6DzHh4TpaS04Zyu97iHCwBuY+D8BIYlkrI/FcxQHa08uCjr7SWjsow5ZHqkXsXiWllOMzExZNi9vbCCVwsrEipvA7cx4MNww4Bj4m8fvs8sJZ4w9uv7X+PfPf37iC+V/gIMD/MB/+UP/6KEMBVSI/7at633xIrBtDBtoILCgSXmeem4KQH+js8347S997znsP3Ud/8/Pck8YriVvo1G45/mDGifurhdpyzwXf2+M8UZnzbArxLSVhkO+164rENAukFDOn3BaAQ5w9pPFeX3dnfHOJNQvJhYacjtylzqXp8PwSFdbmbV0laXqQUfE+3oW6tBdtJdRATsVN9GLkddIE26Ci844tLsxw/4+PSI2+1urZ5ZAXk16UgKGC8cpjU/4/CRN3yJY/2pNGYrf1WnGA4yt6l8A2s9SSCGbdCrotRWZban3mVTBP4qtveBn9kAeIg5FTsvX+Z4fKszVWxd1kUmbw3uhyO0h2VFgHMlLOTvJkqW1eQ+muhc0rJptFKgboAfsq9N69l7poEaROoVjXhd3OI7ul9uM8kmmPpTT05DHHG+HMchSlXqRsm5GnOcgaBYnMRWRk+mA0ubloYKpB55yajENcWuRrWMRpMYQUV6294f0h1ldpB3pnulAflabWqrLnGIAPex3VB9TQMk5kij+GnsybiU5Sfq0mnyLg/XYB/yW5NVHit6WHwXpsy6NsLLoR23G7gCeP/hHr/16Qkf7u/xesy4JmQ5YLdheDkOPH14wMP9PQ53vLy84PXzD/jh8zO+fP6SuoKGpR40UysqXEWs1GUHYM4V0o6lmYolzNX0UYDpjPThNBnF8dMagJCLgUwZpZOaczEnfFiTgdV/zo4VjyndUb8kBtsAV+hrHts/BY824OaFu1f0cuQpkslT3nFZg08gC1JvzAMz5qj2Fyl1IPeuGCooUnRobSX/hWS5r8BGnsQasidKsa2jkqSha3u6I7KfM9Jta2/VSF26KYkOIO0RIz23vS154r11lHmPl2gvNBVWrAz4ke+uvcWla5bjVsH5TFUP2iY+sc4khzTIMY6OSyRdDVeQI05VXUMmETZiKNBQ9trHQYmTrlkdOuIgqeLDXrlJHRxRsU3wSY6h963ru8JoTcXs8947MOSzmRRefRlj4HaLDIa5sgMGEAFPjxR6Q6D4qs8d7mvV3odj2hF7SpdupY57wSv+64f/FtyusuTO8bvP/wB/9PKPAV9p+9OBe7vDnV+7Tn1PW/wdx9G+n7L8/+755p22/dl5Yvu2GPXEja6vYS+qTpjgZH74LsFbOyKsW93qkGUky6/yoE+DijKimKrhHFfl3WvXvDFgT3skIAnIyBicyCY6IDfw4mjjX9UzWpxoDNbO3080F1DIT1LBeRjfAaRF2k6jANc26uZUAXCDZI7lz6u7onaY5IjzzaYTvYApgOzudoePHz/EHWx3+e5k1CeMNU1p2fXUOubf8fZ24Pn5Fa+vb1lPdoNkltMq0rA2ncOaheKuriBbfK05Qaxa53H1P43mYJGUt+yjQTZybUT0/llTeBcSEMaJSxQXVKIif8lhrnVF39PA2akf9MkVtQ0IzJJPutnOmev0MlhGA3UsXU+JceNah8yhGHNZNh1xzjMAnyVucNgGipVWGb2z6nmNqHDJqWTDsKp5FbnOOlMTnwI/6bDVCJB2JudTDQ6LVWVXrDvXq0mYJatzYyc1OyxOtfTEkLbCXCAhJxb2VbMdcRtmbWVZ6lYWKhArica+cYw5gDo1s1YXNYWITbFfHne4xUqQO2w6zG6wuwd8uF/pcfd3dxh3y7F7e33B8faGt2Piy+fP+OHHz/jy+RnH8ZZ0zkDJJoBq6HsSQvA+R1n6ROfAZVbanlTyInVjykSPgie3eewPnHZ6N+dqjNJTTr4budKZuB3jOmVIEDMH4pJ1F9pom4X9HCv3E5LHHY6BSjMlXOeqhLBQpt8KI+e9es7sgVF6nhPBg4w2VaZ8UwEnk7kQRM9VgI0QnGeWG30fX7xYMpQdIZ2WAw2ruTTt2NbjZdwLH/SZQc7pRF4aT3kinyUGNFurdJ/e0BXXeuf+qfQ596GU6mjzlZ8J4crZtqzH0AfNzyrXhpVi012dPqcn4VfTtKUpu6iPDu5xrPvUhrXgkryYdfXes9Eul6vq4pXi+U7EFQRZvL5OqV0TcAvC6xxeXxYegZvbwLh1RhKYWm1mNlPInS3cuMU2Xb9ZrsYljQiCAHxO/PXH/4i/fvollOV//+3n+J2X314jiusCyj9YPz8dH/FxPrXe/f/grwH4e+i0qYIBzgpmexs7y52f7fuTcxYmwlcpzsnirWe74XTV172+pnrydzWmF89ZKY2r/hfLZl3Zfg0RZC7XDopAiU6MMmGcTO2PGD8nzWFnWmyTpcfsVxsCYpegI1TyRqH2fQpp0CDvWhWjVQ2HudNIxpInK5pBtY4aIQBwN274+PSQd7Ctdyamc+WQ+48cuc4k5T0UtXs4bC8veH17LSAzOx1jjzD2+JFFGiZ0hUbG1u6uE9oyeniahzAM1InoStWLmqkoLce1Xg0gFNq1Db+Jk6SrZfcdtB9SRbeh5+EF2S+PLspcbYbdMojmMu6SBpUWtd4xLVF6bOM5wFYKZe1rzysoijrNGorx978lwUzoqPJDGjb7Ie/oi6NYcgycg2IDk/r5Q8HUggd0fmaq0Lqs3JNXMnVLGIu9UZdnKeAaMzFnep2gl/MmlLIwiHQWyE/kxdXjMgoQ8rOmX64riD5W+qFl+R6UCJmedaWArjaaWUZqOQ/liNaHNN7VSF1/U1esetVgSh7VLrusCiptYiXsOCo98Vd/+z1uP/yI2+0ODx/ucXd3DwPw/OULjuPA29uBY64rQ9aplDywQPBV5q5rFktILszqs5wU3VVqCHnKssimGsb7ninSzB0R2Y8V0ZgHmGYdKP2IJn1c5P0KDKTk579kQg/DNrEEWI5y432pW+2GlN81RzwYQueRvMFTYT343wMMBw9jAHJ+N9O6Br2Nn/Klw8ZVWX6duegDLTBluu7BKhyIO93q6woQloL2dQfsZGlPx3TdO24lk3AMq0ASgH7wRfZnES1xY2j/LNvvoaIuOTzuH+5bNo4leSz6nrg0WL9nUKtdrwHRmdRls/SfC/1FAwq1fdEzgQKls4z/yPxtdKHsLAiXIKXYUAL7VCxrHudIxxraPt9rNodyh2TYWFH5FNbc7WnKN5jaH6f4EtcjPTh5WfKhDQzcBhanTO3c7akH8k/qzZinYUWzm4UOCV2oh6aIuQuba+3vl3d/i18+/q1Mgzhkc/3+s+M7fDw+Nlr83vM/wKfjkxL4YjJ/8/PNOm20JdRBu3paYIZiKmWY2of2Wbay/Yw69F1oA70jvX8EirZukUqoT5T2Idlb2jiLwD7Wtkn83LXW217/RbtplOTwy3CIr5lu0zbk6xhinB4Sot3JSKVE51QxZ5vJ/JUitv6Miz8TVEvRVhtsBAlQrI9GlU57CXk4oo5yeIALarFh1hwgEG3fbgOPjw94+PCA2xBTzCIlLa0Qmbeo6xDLw90xjwPPz894eVmHkzSyYavGamzOqdydlHivUcx9myPSblv/POEtDRhv1x9kvj+Vlnul+mx8m/scqrpoqlZEqSC3oDmyydZ5po16vZN81GlA68I4hniVF+i2d3dt0IRR3jGOW82EboTo3xmJbA57qqlTN7K0oe8HqckEYH0vTZs3K/nOg1RIjnJy0gez7U4omRMWJI8wGroONvH8MkXReJx9RyMP+abBupR4pRDCKgJcGQqRanUKFi36sL02SRHBJd9w9SKxwQCfR9Qf8xMOJQNlpGHtwyoGbH0LGlUqVBHVIAe39MlDrQbXihtpPaUPKtW20WYeE1+OF4yXdUz/8/MdPjw+wN3xw69/SLlWh/MCJRI3LTC/8KD3upzmjik1x/JbU3Yu/FMCRqONe47rsJpydYqugTcKUWkwoJw8J9UWTfV6iKI8264LdtdQZq1A5RnqhRertJiMc8dTahehVRPszgdkjZwbBf0NitdQuz4jfwKU6aXP1NZRrLFAvazg1BfRU3HwTOL2dplr2T8yCCDm2gJCiuMmfDuxk9BhqR/OqX2im1skBNmGx1sNAdJ56u31/d6JEKywvps1JwHAYFBxSDmlAcuqbNDFYImFFbEtwYC+10l4B6j046CgSm1zSRt/CzP1V4qGoj9O9QQtYSYHoHgr0wPy0RjxT5wrnSsPbEk514M+QGworKqAMfFgv4tOZSz6RZ3Gz23pltS57piSLObmdfDSJoi3Iix8lCZjH6b2ZjowlmP3w92P+MF+LPsTwK8evs+0yp+9fod/9vyHRUMZxNf8HeBbctq693XZ8ZNCfuddFSDfAGZ7E7vSun7jXPjUPxre46qpq3ZE8SP2IFHhNIN6S1uIopVuQNQp46HtRWEEKPu9L9N2gdKepXnQ2tcoKEFIIqZRgWbGZcld7mkIYX9HU2nEiAnHragm7XSVmQ25fnQeMsAl+o1OyOjm+w+PWx5j4MOHB3z4sFIiWyqrY0F8pI/WHtVDORjDJw4MHMeBz5+f8fL6Fndi6byWWpDD40TxrLpcZnLfo5dgKAokZ33tHq52LPUW2qSm9lelJPIbvMdfV50VzKATnVG8xsLrj2Frf1b2p9k3/wAAIABJREFU2mQunJ9Wus+u5Bm5prlHMOejqVwyDfUJDYGglQZVmgMJpNOTxpZZwypvZDv311GHr6iRUqch6rilXvYzi5RSytP9GuFEqXtSRWws9jBGG6dTVuy6y8N1dDWpFPy/Povz0kAjuRw2oZF1I0Tna73e78DRudUnZYV8KEbHCgQx3cySV82rmkqZu8J9UkH4KRzQGxD7MDwNj5IygAe+uLTDlbuiRRkcFegRbA78U9dLcXQCcRrkKw7ccDcGjjljvwd51ZIpLVYUAZlfh5zM6ul452m9xhWLkgsexFH6QdDIkXRX0CL/yczFvAXiuLSVzqbFcfPRehJEJNSQNac5mphWMlKBYeuyzzm9Wd6vyqKxYbr0qq2PSJsRF8z71FwBIPdxRWfMsS4uv411RUGkxB+8gqMgNOaBq1vVrtERTemkKuspauC8x/is8XfpPeepfpvSW3EPKl7KVCriqIl7d9hu8W6JaEpezEzOer67cUP9qzzLf5se2mW1+KL3tztrZxmXDKQ+AamDHJ7OHJkrV5iziCWeT/msp7vKPBRURje08T4htv/B/sGhu8LOTxFYr4ooe9lqrimtYtgVuSW4zr6GY6/ZDa1rrQsUNuGRNvQKKUz3sK0leMMoCvWFA4cf5aCz7VRLVsE2GqFjMp4W73vwCOXLUfYt1r45sXf53JxUcPhtldf5mRF1NwDPtxc8+wvMHF8+fMZ/+PRL/Msf/wTfHZ/iZcPNxxZYOj/fkNOGnTfzacLwm9xQPgyaWeL015vfDIRzH8b2TogbP9puxevyR2G9MkSiu2WRt3JsI3O98/4pMrzWx9SokKHoCSNzuYdri/KJphJBk+V5qSPLSVULD+UdFvQu3DQgebqOGrPaJcXabpPtyObtT62nFSMotnbOMF90LLL25f1e6zDDw4cHPD5+wP3thnG7xWoHTWHeyLYosObxwKwzwAB3jHBPeOjI2+tr0q3m8bKnOVcUIHXWczYJTjmHolTB9ICmA5NQPU0lakxP8ErFcrW1i7TF3JF11QnNdwRTEf0nH+RF58pLMiM0LDmr/QTKbaZDnmhbZLpVOi2ippocePYTIcvFo7GZPb03dtpzDpqe8kp5XWlyiLZiXOmVi3zLAEqXCh00rLfPVRrNUn7oSsPeSqdZOmHWKZ8KGMSr1TEa6ZSdEXOyVrbq3p0YtijEy+ZBDJqxAjK4Kk5MNcAQTmZGTAOXZGO/OpgJqVDarIeXNScvZx+VillTOd2L6CfaBZdFAY++eqTcbeRPDKRzYck4PZxXmO9AHcATvHy8/T/tvWvMbtt3F/Qbcz3vu885raEUsMEWBWOjISYIaQpGYhAMAhLrB4IYjRUxfMGIRqPVL0QTEkyMiNEQCReLUS6pII0xaIMY/QLhlihCCU3l0qZQoKU2Qs9+nzWHH8btN+Zaz7vPof+evf1njWTv93nWM9ecY4457vP2gu0meHq6pV4a7nzs846pAxjmWLj2oPHwNoLvfbkexLYB8BxbOjyS7k/SpNCJMQpZ2LFqtginlQSlbJjPgGK5Gy6KOm+JkdfsjIgfJhEzpFG+L1W0iurqjCgDBWQTDF4JwYZqau6TyYSHwg8JIW6mMUWMK2ABsyoEe94tJ268Vz9BZMuxV7XgcNtGBoimSykg57+FMbKC6KsQHfw3Gyu/rJx4D8nbbC1CDzitg9d971RXLn4dQuiRsK2rEYdLSwSzoa+zyKIr0ibYm+oCa34Xb3BfCcHABaJO6c4Av+u+WJLCDxrLJAxsSwbrcVWx/U+8LNP1z6Q+tX3VCdIeGXvq0pfFU2jmI14seteNMzTn3QIptn1VJ+dug2qZ+ImES9Cu8UrBel3TSHtv5XNe0pMmeTDM3DGHBYdht+2g4A0TO3YP8ti3BNRXBg2K3Uf1izuYrF66TNu4kKLxd0XQgvhgcfWDUmJFUuoFr2aOie/8yu9O1fkT3v54fM2nP+n0rAWGDydok/PHPdh6vTPlECE9trMA7NgGOTty9t6yLheNI87xSB3gg0+OjUi/qPZQb7iM4ZmlgdLijIZdf1ZZfHu3LqC37wI3LIlXn30LtNuCrjwB5zhTtwZsVuXRtSG3oP22ZnwyU+fjUq5BzTylYQjcW9BbucdV/XUKRz31m9Dv0vpRn8MpfH7zjE8+sTvYaulTuANk6Il/1mWdCmDqwP3up0Te7410lWEuvJrjwIalOezWDgd9yYP53XEKnidnh6Gx8/KLRG8lFHW1ULOBNaf1ujhWkBa1Gz8cS5qd7E54vBMu5autyfqhc4izYnP2yiEo2nF/Ezd2mvTIQuc6qQycyKAES3eqKsvZeaJXI9kHrWr8e9BT2iEL3I8MtMxSATCHbGwjf1eADgpiR6P6n6qKdF8NMIcfnuJwHuL9jqqasW+bbdKavRHGPdsvfptxdUrY3EIpJTbwqyVmvldsdh5TJeOd/QVEfA9PHshExG8OTPU59R/KD4ggWqvi5HVmnKgtZusUasuy811LGO27DW4cgjRnzPB74kgsiTzzBFltbbROBm1LMtOZMceP36isfV5I7e21WXGpumfof29P6N65IFCMaWKh1kaflbbldxLTp07+5C2SEStttfVDSTT/yKjDN3jsp/NxKVkaHx+I1CNrvVLPVBX3fU+K5ux+ONMKYGzI5IK3VSuFYzRwqDvtp/LIooKqwogNfykF4XeCNyX391kFnjATRRyWN9Cd8m72aoawDQXLS9oVsRMAk7qFc9Cq61Oag9Yq760eTMK5f6n5W/Bw1tuqCNlzy9AMdydfynXzdVbwmpKXcw4nyVLeRSTuumXuiZeiZw5tyFjKh8H0YLwCzaqRaZWcJro2nTY38aVOss0qHyiIRP6By5R4QkRF7YRfZFHkVhkF+NorGcMCmqGYc/pVSkh/rfz8ZLjEKDTSOv+/au14LxJLJXOllbh/lRR0fzVWwAjAa4kA4G88/yD++tMPLrHGET6goE3eXebV11lhlmE4BCP9rVfaD+Ei47oo5kdQ9pWySYTnnEe3oavdYqHXhw8piMHsHByWkHL54HxixqbNu8IzZ6kE/LS+dyEYeNIrddn4YmzJLPdwxSrpsxtMx74chFoGL13I0iJUzxFrumYFa7ExBB+9ecbHn3yEp20DxoY4qUhSpZa6ZiOc9YejMhX3/Y63P/IW95e73bEV9OIgsahlBljEFRs7UQu50mh1Gha9ACS+vYTy6+g/ikZmkJdLnfCBrK2T6WX2h+DwtlKAfvAfSrGaIemzRV1Mw4kmPjva7gwK2MhClO5oqhkmdvKbwPC0ZBqHkOXCqRzzUOxC9dm7GXjSlGHhRn3jelNWi2zctqEZGWFvfXJdgWuNU7yTDnJyOVoj3VGyJ7lsOlqgKtL5JJKZ0yzZJtCXM3JFEgZbkNn0OAgqjpoXRigcPzoeHNG38hYIyjBj+jUKYYBTLol28Vue5qcuKyDeWR3ncj7GYiegmg6JOUmCQYJQNRb99kkuwCK3h6CadGwOg8t2zVZrrlgRVBDFdlCo/kVLFY6CDGKCVsFF7JAaDYquIYM9GVQyw11kBz2d2RPdwLTJ/Y1lNDvnR4KSfo/6k28I4QxetXi96fyg1ah9OZl0SDbtmvgYrpRMAoq2Yhh+Ih/6ReoNgVNazECsa6xEstOHacGZENvbDShG+mCxxL/4r5InKYc8Btm0tPsmi1rcDZePCRo7TbxbYo3H0dvkfWJBi5ADOA9ViFRjIPmtZvWCxzMpkBhnZdF70gJNQxsZmW+pRM1gshUg/Rr6JvZuzYmZsguiW0/LSLyb1cQy3IFarWMY0FlbjgvRN2tj+xA6hsq47ibNhvD0Mih0ZEUU27YhPAHJdwonaNzZZjwWKwokrgEIusgwfGO1D/d5n0XfLQ6IA8GDVBaNV/CLPa8TnTMhnnqIjXMltvKZKFY/doUPJ2h7F6gT50wZWQGAB/MMyieyr4dZo26QUn2lQNqzysCQ+7JksgKdNWrmIJDbOMW4yXv1+2xMD/3w/6uPpCBcyWQD0mrPNfyHznDtCw5LkuwEwcJhNWO1rMO/Le2zgQNiEy87cifWKIwaPINaXW0qk7+vNioe1oEophSfP3qDjz/+CNu2paCmUo+ssu8H6ih1B92yQROfvtzx8rKbox6K35ELh6pTjKgRfBY90eJrcSfsQKP00KyzWml+cKKAFXx3Pr14KPWux9pn0XC8gFzio7qwCb0VdwNpBe1lAHr5vEKhvDxTE3E5dmTgpZuT01lz+ihu/ACBaNtqjNLQhJLWbHM47wAvO1tpq/X66ij6byOGU4KL7ffVMUhe9vvXRKXxXTmEVlk60hl4xBlmORfSaE2suFKrOzhRp5TRzedRPj90nWlLhracaQmIQ07CkMfR7XFoTM7spzE2ZEYmpmpA295ZIVzAeqB4KV4dAqifBMvOScxudWoQt0aw2ZQ1a1miNwuV2omQOnfnw410wMC2SdI47b3a1lwT/3XGq/fy4DDGGDS6e2k3cjOW383pOg+pS1d6hv1MWWu9ji+h65f+x/sS+J31pWabcqWXxnBL0YTkI3CbofKor8m+0ab6HXtu7zmoH24bmzz7bzXuVk8cINXsC9mRlBPmvhJmHxrXQWPVWfxefA159tcX2WN5yARvViPtjswwPE0WBDkz31BAdNSJ7vYn97ctWAePBo8ALIulZSJREWPD+53F9X4eTCn8ZvzeaR9uY9yLbDPXmqd3pq4lEsDbSfshCvFAaHJwxjqWia5dXwJaK+vCRgfOXI8nc2QJk0oeFt1iyjHpn5ZOXDeSHY1/0xgdt82WPU8RT/pYHbr76bljIFIMScv8zEJWcsWycJBghV8zoMWP/CMEiLt8aV92dbXzlabs8d5r08+y9eQe2/c628EHYdrhMlsUyPFffLhmwVGnjE8qodMPKKlEWyzvJCcBmCVjNb7vhg8iaEtEl4xWQRwP/XqXyimWHJhs4+TV84DN2lufHZXmUqGID0q9Tjn5Vufp+8tvpWNLOXMfKlu64lz9t6dr22HWSVSSl0LpumJoGZjKyBZW0po9BIcLaPtEOMRYLTqPDXo58F1oMjhbBjgc2N7uY8h+UXbOHPwqMIbgK77yY3z85g22sdW+Gi8QAZt9nvR8IqbHY9nVPif2XfEjf/st3r68bf2v+LXwZ31eSr6slea4UWdQCi7/z2xDBcfN3siylZmIJzzcUjwW2UOlcuv437YNYxvYd8W+73m6VLRRh2KXUfaRzQ3dglj6WD0Sv2tGdVaAOmo52OYWXd3op9JVoqG3Y8kMGydo0SeWR0UWO5Y52CQYG55aHnQ2S4pqiQazxqCcOq0tuS2zWv2oyAjJJ4FzmelyEPiQACVvItpYEsRe3WKg/P/maDRd03VAOBbS+lozYFmeTwTjQAtAuf1BzzCDpeO5zVyulHRb3SbvwepJZA8Dx3r/MNNNfB+4Rn94uNcT8gDa5phyRlZCBfd9x9x3jG1gu2217NqFL+QjKeLLMe3i7QlMYNdZS1spqTGGYGJg7nu3jfDMeshUnJLmjDRE8PJyt8viAWy3W15wnHRcyB3BpPCPwZCBk6B4d4HV2Ut7J+mWkQMcdOAaQkdoNpGkDLvlNGpOqPQuxfN24IfA7isLPDSsKpxvdWmvxnBAXZf03lYiRlDE/eyggJ/yaFX0U1ZJHnSpWVjGoibQdyBO8DSyWUJSdNapgiL1hlAwtsoZG5C0lIl94csDtmRQYyYt01gsb/4uJ2iE+5zBV+h7lPMDXu7nWNWa8pL7Yop853SkijHzUQzxhKaNjeAkVFjjeeajNkbn0JZGJm1qPIbzoEDt+mkV3PdpS99dDiZfXTPV9pCBZm6F7GmyTM348kE5SgqxlvszwkI0R+Mb8YFKmWN3wevlhYixVSbGOHjRCG42Y9BewhnJT0+4js30cZwiGXuNdcbqj45AjYk/EkvSlj/g84Ma1zhZ0mjIsDsgY+Yv6Mx24B3j/EEEbasSAWjQ/eHpdP9ZXYfgCosyP6qnszJc8rgp+HGbJPOkE45OwYrfCdoLtvV+zcSQE44urOtSSzaffBQ7KxaeCao9d6vTQ+UXDLkfLehUHIMqyZ/QNl41jJBKtbIojItjtB4DG4YZfVx4tq21577AYc1zjqPi+XbDJ598hOfnZ1cQE5Hd7wF0b5c0oBk0Vez7Hft9x4+8fcHLy26Kw5mFOeSkFsf3yDDS/0u8+xAbr8SwnBmBtd2oc00YcDDRVU3Huk6pUuz7zKXB3TgVlJivSQMgZuyyNdHSy/578IpA8+ALztKqqmeMJfshUd8mGZyJ85D4O+3gA7Y/ZJhAz1tShPuAKK8LHR3c+MWhD8cRIlmJfonf+RNJA0X2K3sRW1gi+TJqFutk1Bvt42Rb7nsEuNVjNmqOVmRBhVBNZyta7sTk7GwdNc3lo8nqZ9HU69Coy3VjOmaE6yIT0fYxiKgEGq8GaOMmRSfWjOsMcKBpTzlksyf3l90PlVCMbcM2xqKnKuBO1Sqa77BUTYXdMyYCxMmOCshtywSd4TK4gaBglg/8Zdgeqvt9x9i2CrqIpka+I8c2wch2gqi9CA/Pw3qIa4OCXaGsup9/jNkbSkJSMCFUzsWGcOZlwlQvif+5HYrfKfFTnI+Y58a0EytDn/GKidLZ9aFkNGQkymnrk5GaeJ7kM4o3pPmbIGc3rKK+ZJOLmyoUSOpKRUsBKpWjV9N+9E4afzefhlcc8JtnNovf83cQs1vUeiRbQP1y1h4YOVMl6RoY3xoLU8shLkTjdM1oHOKk2BlJz4XRi24dP3tds/3WV4XLwGovzmfda7WPH54tgjzLeka/CDf1a5Ei+aNMbymbAFTiIPibk4suO3VYiespajJHh8Yt7G8Xf+bCSFzF3jfC32Vhqq1EiT3At2FJl7CbcYqUwE+u3ve8My78s/Y5Epo03gPqh6DMbj/zHZd1D+5kG7mqYdcdtXXhXPsFfBBBW0Iw8uOfO7+m83P2HOkUHWbcUgGfttLaeTzDRizTyqP1QZxJjssiCx4FbCvuDQdPUdjPdb9MvYukS2Rc1j479RAap+0L9D52s1B4IRzE6NViyBreJg8pQxHoSPud+lrdS+NWWYu4jNEqzaCJFZbGVcOE70L7NUOWp3d6vXkBKBS3pxs+/vhjPD3dsq59Ku5qR2kXrXzG5xCg2vc5J/Z94uW+4+2nb3G/c8a7gtIjtqkHi765j8dKDioYSiUpRzyQreUY5oMzzu6+ENFM6b8e0BWBzfDZWO17G5GTHnYTE8uSsqT6r6Swgx8a71IAHc6pR3TQuFUllsTEK2TWC0LxV72ZHSUrReaV7Wz1kgVIG/VaDYLFsRIpvm2itMqXZpEH2gU5s658oTLj2l0FjX6zDBej2Behfqfwe/aTjmSOLmW3ohdyrC++8Xz6qwbs1OFBkxNDa8U0gkI9/G5ljs5OOFghR8eZZiDvWWvIRJngWdas9k/VZHTGaYJkv8oTdLyWYYnfI0i35eA+m+0OVNw5JmPgaZSj0JINFPQ2BvZHJnbcMI3o6sAA6He+LHzMsnBqYDrtVtvUmkcfa10Jf2LlWc8F+7OeaYWacjuvKLrDM3ZHjoIvFacpSlIO4aTmXkiBO3BVX/YtxivKRrsRUIzwOZiHgn9Kr7XfaC9NJQY0UW2rTlJRkMU6G592/1HpiAr8enVhS7rL05MRTd9yvanzT351PPuR+vF7BHM0++vQLRaljT15InE3X7wzakwa7l6txLU3kdzJzhx1bQZo/puqZgIyyR96brHdjNLhidaVP6Qh6Q05q4SJuSRmFOG3R/KaV3JENcay9jCWwTc1lnqMT5YMrnAdFmVQ4xzJ2iGCbQw7jMn3s8W+f0D90CCpmUCvQ1D3ZyrZXI3Dh9wfFJ3Yp/rhJqUjK2hzjT4rwFNo7W8N/T4ndl+jO31Vjvg45pa4RwGBw4cVtIF05omtPgvMzusIYwwcNW3WADOYvb2D7nnYDInFoQm2Xnry+4MaQ66SJ7Q9631ZHS9yKEC6gwMxV+TJcN3LKWdfTqjmhuKMHqUwKmvVaWAfNRwf1ZZhOS7r680ag2vbT0BUSPE/KnS666Njs/SPHQpJug0BttsTPvnkDZ49YDsL4hVKSoV+ofJz3zHnxMvbF3z69m4BG2h5xcO+M29K9reyhl521dqOG7RmkyJjJcKXJ6MH4RLzS71tXas+UJsNT/wmqcge9Q3hWKD4IOPndBjATEYOg5T/g+J58RfjcAdpDF2zOM3ZCycFoUSrzba5XAuvbJNtHhm8OZegI+jMS0W0z6aE8o+Ae/g9W7WfRFpdVXXM1lJdVL5SLFK0dbDlOpVpzuVS5GBm/b7UsrmlgRLxk9J7SbfmHJC/csb8CIdk6VPS7aw8OsPSD+kEB35zVh9St/k4a/DL0eHJ/G6Mk/g1BFEvZZH5vXRmFpgpNxEIhgxQsP0uYN50vrbseci8oTCkLmDPWjmhdaCrRKcxPekiQxa6gWjuwqH02+LAFT2OzxZqvw56/Fp9WjrDFbMeOPRXU9+cpyvX5mPs2P4I/c58UNYxaui2tOuiIGsl4Lho6DDJrpYd6fzPqk8QweEyft4o6x30ahYDwJVXr5odT9Jr0+/JLsrzZqmVWxITWWchKhC78m52mleTiw3SMhU566+2Ny2SsjbsFVjzftWsN2Y96zq6gykCf09Q8EDXSc3ay6DLaecUQGNpXx5OVNcGqdcXOoieAjibbSubwHxHt2QtxaPDpvsj+K8katfVWh9C9Vl78X2R3bIDRTyzq6UUavljyRiPwZwK1R37mKbn/CRdkQGMgS06lUJlX8cgvnW5m0MwxgaoXUsyRWx/tQJzo1m0ZcnnBPziNudll8siRwV0+33HvJtPmBZjyDuP+wc+wKCN4Zh1Cf5/NAPVoQd59l7VeR5MtYRHc4rs/fps8PrR/Su8Zo5O8FnsXWXeOq681CL5L7Os9TxObKwAsJR+oqdHmqcTHQqueamkJk/tHBkxNpghIfli1MkDYP8Fs5NWI8UfTZTJ4ZYnsF7adQ7edClKxdPtCW/ePOG2bY5mGR8LBugIdAXymHDSVHPu2Hc7IfL+suOtnxCZVAilp8elkWmQGx012wvjLeBTp4+8F45cKJJQ5HmCIGVsocgT8GqcKbmBarcd86+RmXTtq5XZXBKYTG60PGpZV6OdWv0jaEzRUuY9X03exEL7UphpjsN7IPkg032gX/BWBNpA3UWWKDgNJHBc5CoMbu67Sjz1SCAF3feUJEWcpifR9zXJIotM05jwshjWGSPNYP2l3udfY4cmJL1snSJOjoK3U1sOsFRwqhVTpAP9lUTOgCaCxfMRRGfNLUHln5cT99LByP7o+muWCf5MOWUdTQowZtJaAuRUCqovYwzM3Z2yfZqjtvB3QzG9HSX1aZ6C7R+NZZGGV9wFtjBIMmrwlPVLs8F97vb+NrDdbjU4pHe7sUWN3QphYwTNoamfOPG39LvJajyvASDzct72iX3tqJGtemjDqwmELoHQiJ/xTn+yJorPvnd7afUOtb04/NNarkuW4xZr+/hgHy6M5QsL0aga85aA5cSOPkOlNPY+q8/4uL6y1Q5rB040gcAsANOCdGM01zW3IPUO64b8xTASslGxHLXEnHYytXFAH7DKjqP5I22AWneom7XiKOSOgznm5xwF39OrqrnAOsSe6RdWNe+bPDBi1/FFKz/zn+U36g4+UvJb2vvSaYiyW4megsaJ+tn0tj/X6ImYXaNtPer6h22oQoEd2CEY2O3324ZnbJBtlJ4bdYKwwmf2cim64qaKiQ0QQZykMSHAphj7gIrpVaUgS0RyS0PgGLNsEAEGqUUZuN0U2Hfc73e83HffZ+xbEd4R23wwQZvwqNZTUtCa3x8FbK8GdM22dCauNojpD3WUw5M6LRymE2NyfK+U31nbSnP3WeeJA95+T3UVdZThi+8seRl0oISaBeicrLaf8Oz0KqNDKa7Q3Www1vHM5XwCIC8sVypaS0FCQarW7IRNYYOyXWUkxKWk3mN0Faxku1MgNQsogrENvPnoGW+ebhhj8+UmdXG5/U0vqTLkKhBM7Aqo2kEjc+62HPLtHbvPtqW/Q5k+RdGPnZPaoKp+HDfxm/9nil9SiaWS9A/2zixjF2MfSlF4v6D6Mh60AJSVcepwksc4LW3E2DTq1v/JO3AZ0zp4qd2TFAosbXMZ/+Rx5z/JPvqPXUh9o68VmE5Yiz2I+zMDWnnbNCJkwGLcrOplOVwo7hNrq9GGtz3npFMponjnSp0z+fxsZYHUQJSB6ETnxoOoWSCdZmea3PuXhCb6+Md1L5cuK7ONpdmiH5dBHxE89Cx5Pml/Vjx1Z/ao9FH+XjLWvq+qKaHrK+4TXW3V0WiMUvyeS2Re7StSdiGC7Taw36etDtinnYC2Dmzq21BaSCeED5xrkxRqQZeMpa4sUA52/qLTMsG77cfdtoFtlE1+SMKTHmbJNFbxV6nMqpdXUPrf32FZyzb05C37VLUv+w8btpIlWx85O9OwLsxXDPVQbv3yWUHs9Es558GCSP4twrF8z+0FeXgJ6YVkoBK83DNJNouJ46UquZp1gfSavRQmvxm7k/5Sl6L2+s3bkVB+yoknpQOraIbbdVoug/M6NOjhOrz2Ovd3ZZhqk7gazB334gqpvfHUxXaqbvSAIxRSuZkQRdFRIFlH6Ld4P2a9kL0tbNLGBt1R9XWRjPpiFcaiA5vujOrDQ4sDNSSTgSK+Vy55sdSj9FoWsV30sre5ylD4JT3JoU3vKdRs09s7fmTsuN023J5uuN1uVsCvogj/Y7j9021gAnaDGq33tr1v87gkdSD90wzQYMGyah0GFHRB0EMEIhvGbcP2Yiuv9vsd+1Q6ifIcPpigDaCg4lSplfKIMufK62xddLEteSkn9ZcwhMPcy0UgIO9QnCdYnbwQmPBk3aN+1Rp17tHxvTq2Vxvzr+/xUoHe3qIspT+48MZ1AAAgAElEQVTNWS+UQuLhUip5hJqViEAhS1JxPvQhh2C5q4p/yj7ayyUoFAWFMelzHFSXK4ixDXz80Ru8efO03FXigQt4LDtv2P/mNd33if1+x31O7C82y7ZmhYXHyB9m37Pd1alYHJLI9EBRQRaV1PrLCjOXSDovN7pK8JvTSspwhO62dzRPaSwaUl1BfsSSTsKJDdsSrAjgJ3NS/xdR1LU/De8qZAeL2D6PqG7tK5/SmU1m/2n5D5AHMlj7FUSv+PEglBhWy9sY1d5CtLSP2mV4BREpuyJnBVmxVIcTD3bKhHWPelK+6NJ1Mo3L6N8jpVgzqO0sUiq3OAZLR8VxLf2jSBvIY5M+Cck6yXjysRBlRj17YAbOvjzW+Ym6tDxbc/peeY1bG9sGFTvhETqx78C2DQDDV6sS3gdGJjxgQdo+d0oqhvZbmUlz3PPJ1FwlAFgwObYtxyVr4KTb2pnEcVVKXLy/+Aq7n7D2Uf8e+rW8GCityx+V/u9Cu2ByEhCuuqvqfLTw9/HQHXlswXP5XbgVXeixBpm6fiZEWqBBvXLlJjGO+Xo5FWHH8hl70wfZXvyL076e4Jn91yR6jmOzqdypKJcGC2m8ogJKuDT9AHomQCzPU6drzURrW+0RM0AQ9KsHTtXzOhZSAVMTmbCB57TT6niD4sL1NUUEuWUqYwWHLAg/qFyj7zviEJBS1oJts8M/1LO5j7UgOouc8oT3ZKr7BZoTCQd5cEPc/KCpeHm5A5A8IbLZNrVD0oYMyJx+NYj7277KYHpiOnjGfAGfvcsDn0oPBp9kB0kWFXWvraraKcHbhvl0w363659egw8maFuDGp4FOFfI7GTwO0c9+1o7pV8i2HtkCPpn1k3n7UT9q3PidZncQJdpCQvMjjjqI8kMMWz1KOmodUklz2aUAlzrY4TyQBcygS2fJ13qOm1q5qxJZ+hNchTZVq7SaEJCSj/KTiwzFuWsVY/KoWuGg+2SAk/Pgo/evMHz81MGEkModQ3PthBukeVJBelHDu/3O172HS8+w8aB0bpuH/T8NaflsPRGeybrcMJSjn3QxEYjxpKzdVTK+2nP+8xR7dHKq4CU30VrjzFv1UQJiQNfOm1qGXDMkJFt9XqiHaW68kqaBnXyrDn8XkjteTt6O1hzvcSLkF4D3CzEpzDER/aZhDu60FtB754rsUwiZYDcHZN+NGpIHcms65ozh71wiWY7z9dvLAcrDZgTmUYs90Kfe6lOrCOUDDdEG51LR3obWtgcdPXJrEn2IssSnnxy0aneJ51CxDnQm9nkYNqs9DaAgS0vjd2nAtghzrOR6a1+Klow7gIzZGCHOQDiy9GWxWpEDsmDUNT3D0MntjF8hi6WVkrSVdbxOEYURcZXbOXZK+fPeRFc6BRtv9Xns7qEPp05kmTZTsl0zpvHUsl1h1Ye0R84kq81+4AwZ25BZynnk8puPKg0+Fv6Xi/ROs6ddNdDZFhRAzgQMz9SoeSdx7QBje1S+eGp9wKKOuY+1KxqHM3ux68j9H3xT9pywFadWEWtO+zV9Kw7UrBbj6T3Tqm8latljpx/Kx7i3lshFfgEeavhQAksT1XVrmyAQDMZTo2eDcHKaGHndfo2BltWKQK/+mrYKYzZP8KC/5zZ06SVNDWtAJ1i2XqE1RyerYB/eXnBnDu2seF2syuIANN9I/py9+sNPPk7VaG7rTaYvqxk3Oz9vNJFfJREMkhF3JkaoyeaNFapOxEF4vykpmtvt1rG/gA+qKDt/AhaPHz2uK7P/vyQGAI7REdDzbNMj+rq7RxFlZMYx8DvcUfP99i5wEr9fhYTrH1vdZ22Rk6S5mJFsHif4XyWKdS1nALT10eHc5lGxTVrLpnz388C5KyXG118OSYFiY1ntGrJ49ObJ3zlR2+wudBY4OCC1fauxbPg1bm4CBP7vttG0323UxNPlSBOCZ9LBNMHPCritjn74CBWoFOv8ho2bTcsaCi8lT9gwXdc+D1VT7cFxvJEy3y5kpo030q+bKCl8Z7/UOao9hGsdt94gPu2uMMKUoT1X0+a2LOg7yExTTgmRgfEe2fKiBK2zcDRASKChVfZQL7isAhRaO039bfcmsWZSdvSe7jOzK7mdW1JnXDnyaoz3B/1KZ4fR/n4eZXt4JP+7HFSa9WVK2aSZdoMfzQtVHhFq5Hz3DC0hEiMd+vqiXxDgE2web2RjFFYADdkgPNXyX5kA7LqAQiGbcwf3FbX2RmwRTeGYMhmy8PRKN7e6/QgI/k57PUZlA3TE4fxqPPqczyP/x/bufq115dNy1pO0I7v4zpPlUbhtI7wGYEEWlcqpmzKEfH2Us3OKNMfQKxdDvaMJZHdZpDM9yjcdQqyz5awqs9FgEUwuI2jMWJmXfj/hGkWUslJ+RKDM144Uj5mOaZP3deNe9234e8aYwOYP+SsENu/cru1+y/RWAZ4QyxQaiTgoJ5CM1aPLNuo8eDdiZFI1LjyJsbJyxwSBymeEzKHB27OA4Rg0/9nM84CIH2joHUgP0FbypdR4TD0HaDtTz5cJzuqpdBVR15SBfb7hG7Wz7HbgSPrxJAdIEY7vbeBTQTYFXO/Y3+xvt82uxbCfJFh8ia6yAaAGWuavL6QV/viLpoJlQL//wnaAuF27LwAwZSfB/qJQxrVv9b08n092emkDEhZyHn9Z8lcPfv9FRbmGatytux7m03Ss3es7jWIfLRUcxUMQOjOlRVNzZceuWbI4iebglul9Sz3r0nxAieV8/AMppeiJgdo7FhQVtUd37dt4PnNMz756A22bbQ2+0luahcuAog9bHPWynnAjnCdE/j07R0vb1/6/jUQnUSa41Fo9YCtukXGeHXy8lSp8JuYGVft4TNkMUOntR+vTeezfRUpeViSKnXfpl9mncqp2mL3JTiJVVIlSogPCOtYrlGOo+bYcL8/D+SIsQGtH9v3Ej+naxjSsnNH2Q/EUIxbw0LEjURV+i99/f6Zo7w+OcX1BGpculvSHSAerZKD6t9i2A+4nMvaETteyrvqvrMe8qLrs4y7lxR55ffjLPbjJeLcCQmhxSmznZGC9H5zfM5JTe2EsoiEhmswkaaLAPiF8tTOetCLAhgbbhtwh13WXaekzYb2sKyLZ7HhdxkxjoLmboU8pmP5Cued2MB3QXMyz6a8aBj6WPenK6e3CpL3D+7ggg3ZoMBHjrzEPuRq9/n03qLdEWdb7qzHehg/PXkunQp2AT0xWfBHtsiDe5T5Kqf0nBd7ciKK+TbZ91AHt2Wt+8yEAHmgkFYrmiWrLSX5qNnl0skr8P2cCJPLS8Lhd2v5k0gUhxYpFXAc3BKz2k+eSyMbEj7uuy3RN7n1qz1St5SKUYkl1lpUz+9lQ7k8UPzX78grqjd+deEW1LYRUH8TN7qdQsSW9N82u5B7r6U2ZddUM3BsWykIi1f9cMYxxkHbA+Nl5XqZ1FH+sT5S2Em4M04PljvpRbNLccE2VO3zZss/N9gZB3kauSqm+tLIuPppu0F8/4QCsPNQnDYugwMDCvMNbb+9n4zuKxzGa/oUH1DQJkt0ucrJa4N9SDCFM/SuNqne8xmy+POaZ3gMgg5Bk3/ii4N7uWYhaTYH7Xn61BTF9GCtTsU5QnH/Mag9sawhkKdkJIUwYq+dKde8A+hY42Mgf0gBzw67ozA4f+ZhgNJnnv5xL6JlKB+xgQjGtuGjj97g44+eXFDHwnd1UMp6FGvw2HD6qeP09uXFl0SWY8QodIN0+kMlc90QtO3zafiPjgnzw+qSiJOH95KFMra/NHAenOQ2sEPqqOoruekG6yivpejYaGa3oy7wsBVRVlOefE7rSZrcNZxKuNP5zVqXcXjIsF02jhM70QbRIRyLoxd3rtBW/+bdKizp2XVFoyz4l1Xm297F0/oft13vrm7C+ttRhxXOrUbq0/F5jFe7oqLptJCLlZCPOqHFu3kSkX8X4DRgWHE+UZ35PV8vHi0Zo+asA80RsecjsMyn5lQIbrfN97rB7/7RdpJxyKYMwZhbNhH7OfLwAjHHwdYJE78cxoZdLyzONY3xWXbzDF4Zlv4TKYYzjdpkXvitBY3lgA5Q6CJlw49SEM+tdPCcDZdn/MUd6wxgJFmq6X7SPRIKFOu4H2fSG1XC3hH/lE4PvKx88Ha2l01qrug4S5zkJAon6VqARDho2JCutTO4SVulnT6wmadG+2Upx6qXZMUx6FyNZnn1H9isWcK3KCzLIffDX7D+xziTgPuJklNqRCpgs89GXm+HbS1iBqdsZ94TiVI/gXt0h2fw2i5vqb8sL9nXvAsv6E10TXUk5mt54IAcx8Ki+6i+PNGLzmhwQSC2XjSRTTxeMShUNPhtlcqu9Y+vWd/DzvrzM5WktWoKjUctoApev+87ZA5stw0bBmQbuIlAZRj/qtb1Mds4rqIXk8toEwrMMZEHqE3j83nfobpDINjfYfg/mKCNoQcnrFhK7hlO/Z9D4MPf+3vvju8eM9txyaI/pxvk8xnXuDpy/rlfIUAKy/vBjnc4HcEAzbwf6PSok+EETSr3ijUF6d1TD0aXJ9LwKifJv7NWzRq74Sq5l3KW5WQKecWJZi5WeH7zhI+en/D0fIPkFLm3TVntcjzZIeqfp2/af3m54+3bF+z7+UbSg7IJJaZgkkBB7hoZ3tM69XwxAB8BPKhsPI1PU2F3mARfSTf+dccdzbq2BqUbxvhJ2Bymvsoe1i7AhUD+7hoIHkfymLVL2lKQyfTpnMiYvKIAXrMzTVRPLIMT4KGZovHPpTjhMTERsdJ3YRZR0pELwrK8x/XwVM2hw+c0Mb1y0s5Z2fifgix17yT0Ve/ZRD9FN5VEme94z6+lYIX3aK8ocNT1Qa8IXvoyWOlVPCbHgQRZDZNH+pA2Gq/2KR15ooP/zjYgll+LDIzNT3b02bQ1xplQmwWPunXmfjm73yhWEBAJAiuuKHuoRcMSulVNvg7JwmfyJ9x4EjP7DxK3FYegWCd44p3/L+b38X6g6GutSmgdzdnQZdwEoB0tB5E01KQ9E380BssIvZNJRZfmVBWKmCUoW8ljlwbb9AwlWmzvIi1JV14ur9U4YZn2SUKPx/aMRtJ6Q9zfEYFgQzj1qSGcruKfCzTbU6qcRN7Hxm21oC689h9W61j+B1tZVrK1p68vj5TUzWP17XQmX43op7IvFrZOewdIPwyRvHTacPAfIwsuqCWZ0IOKWoHDnRpKbcvrw470SV9thGVa7grc3Q+IFI+PPiUTNN9bJ06Mh5sCJny7/W6qRLXxV9mO1IzNtylP1v0Y1yXNtabPdWy/wpZ1itNJLKilq1Ly9PNNIOope9XGD3barp+HMIDco++ITbXrO0IuBAPbtvmpwcBjr9XggwzaIhCpcV2F7nVoCkRtMOJZDbwpWn7G7xM2p202h2HBnd9l0To6Dg+4qOHgQtx+XsW1ynA/1wD10C4raIklHAsqmj+3IDromvSb05c7hCJeeuaKdfUra/rfexXCf3bs6SPl0sngBnNdnx4fBG+en/DJV7zB5sFaW4YJ9f0d2VBHQeLUIjMIc1e8vLzg07dv7cLsUHQh3Evg6C4AYgX9qaFrSrOU0zksM71p1OvrJMaTHGRvx8dkXdqhnWg9SbrgycFAks0Ls33iPpA7hHBvsgR7g2FsggnzvS5X0W3tiFN5Yhv+UdDaPkiWFq6vGcmDJ1EMXi+umRQlh2FpvKu+qKsvn6qOkUBloHM0ik3w2KyTA1BPadaW+KCh/0AZJ/9mKrFp8GrJx8r0SGWho1SNayznldQ72Rd2fIV5YsXtbASPI9pySE0xLe+sAaLwQukuj+uwnye8OhIsHzHLw1ScmQVRyH5Og44u6Rq6L7Dqr2Xl6aRoXeXRxy1on49W9B7CWZHHiZPCi+Wal7WGro/iZzzdkWRUxcYtGU+wLVci5AmtVjSDi9I1XjvZDHMi7chwo/GOuZ/1j/sEGquOc9pfaOpgEaHl6f0uCl2VNdmk2s/FsyGaQTx1CKp+nLvWWRu8r9sSeV31rWKnCuzhZ0ABxF2mMWu+yoNg0Mm1cY0P0z0DIwFE43S/LQdWwTh1+8inXbfnhEFbjZIyoMiLrTPAij77oRsanOw4o/uwYwzbYiGlKYKOgNSl4dIDxmJk7Tzu/2soS1qVVCsP4k0bRBX02VovtG6RYBlL8fZg7cDJTT94helTSKerZO0HXXCweQcnMurxVqQoeA6c2FiAfJ/4XPJbOAyxgOo2Nm/P+V+rb8n3s9ZFzanYxZJqtiILfvDgpJMjow1Ab+aLxtXpr8EHGbQBjx0C/7FruQP0YXpc1SNLQ6y+/MxjfYZWKwgSnhAAUn7Rxhl0B5mt4mpUlOqTnKnj5W7B2ErO3RrUYkHnzA6f+UEp92PUezQ2LYihcdMFD57pAgCME+3PPT+T50RYM0PFtcoQPL95xscffYRbZqWrRNxLVHsaOaCLYI2M81Ts9xd8+ulbvNzvhSYJdTMAgchEOt9r/8OIdfbwzBrjGn1S7RykqGPYPQIvJzqUcbmBGQhplcvam3J0r4Wd5Woi33M3y6qVuvS7WEKaLxyZJYXRpFixnIrXRP3In2SJ2l/+ZS0Tv68MLstTaX/sczhPNfOrqjkG64mgeXkxYbTuTWVjlEY5yp8JZkmTn2hVeiJosprJ9VNlQEHj6k5CbZg4dL+DZp9XR7sv8g0IHJ3OSnKgkW2VQ1/72DYlh8pCLS21YKb0Jj2hL53uXGORae2NNMyqe8FtJd/rAEaCMtgochY9z59SBHKj4AedJUxNqgE0MxPJmp02vMce3eDh7K9Oc05mtYawE36lg7iTGcxS+vxctjocxyfsxtHKSemteDJCp/gypdAVITspA9FP58tsei5WlMyfKyx3vayfbk6su3ZRby6y4+0BOQbwJVNW69wBvRXP5OqFufgXLt7alKqykcjPvLz1LFpiO7wu/Xzk06zvA0pr4Og5Zzaka9i885pkpz4Vj5gPpXm/pJKvJTtyJknWWhYBq6BvTbR2/ltQJtwtMJ1EAPFTJaMvm9gzYEsaxJhP1ZzFHhL+V40XJ7wAtIQI8ggUKhF7o+K5lm4W7fvEhdpZqJL1qVb2W8Yof4EHiH3U1B1d79o7LutDjmPzQC0DZyNybmmz7InqOGoMJM3PfuTDRVh3dx+d/rDdID6a+8TLp2+B5ydbyQBLGmwurLPRXj15o7W/f6vhEQFA28Bi5tSuMhiYm3PYWWcJPlPQJiJ/AcAPA9gB3FX1G0TkqwH8HgA/FcBfAPDLVfUHxaj1mwD8EgB/C8C/rKp/8t1teEdeRVjTSUzb9xjnKn/aVim/NUB8fY/aZwdeAqkn8nXWDjPWOhNXeD9GKN9XzWnv5vRFOyjeWBWsLn+j2dg8nAEMHryngMRGSwVtNTjmogcLPYB+DJP0ykkpHJ0mrTJLlklEMmD75KNnWkYkYFpmMNaWcJWCtt9CKQvmvuPTty+43/eHghbdqCjcJTV+i7HmfnrZuAiaj3tcZ+iAmM3j2SiiTWS62uDEKBTdQg0Xb0jWXXrRyuT+ZDdmoe+U6o8kBYhP2FhnUIAatrRBRT0Y1wV2PYBilnj0mWsKcjoDNVjszPJWtbcNJO/kAQPEb0Gn4XyYBmHJrinsQs9cJhq0CZoQn9V31gnFQ22mM267pgA7nZlVkcVzFWCEk0BMHwMvVK5apgCmaHgcid5c/qJnT1edd9RzcV8S13pM/FS95cQzPu+wit42G/4Q4Qgs40Ak7k/lRmpWoHsG1rbVFTMotRd4SAQhcNVnL8V9R5HlDcdw87uK9rn2J990h7SWxL28vEB3P1UwnPkxcHvaKHCr8tP5Mve8yfD7KwGZA2PE3lcP5sZIZ7I5wuwMYpAuKFoLlQXQZ5WBtDmiQX+pADPxtr+1xNCJi5IvVfjBiiGnNoA61S5ImBEEhr0U3HM4FXgJMxPy27VHBOAhXvXbuvy3ePa4/1PzTy7xo8DQUPP6JC5Gj36G3kxJRYdl0aDylxwE/nOQ15xTEqHLmPsyZuXu1Eh7UaIX0SN5QhktQq7hGTRa5n9cdtIeLXRIPeX6rbUR5+infxB1dQK1JAIRaTgtZtwF6Z0xfpIq73rM1PSgeumTKjKB5NifnSsopIsRPlML7jWD7zhevr8ftRRtYiY2khZzYRINXNL2W19jhi4LES6RsJQg84kNZoIetPRJtmFq5+7U8MrnWii/QrraKWGnqIFOs2vN7PvE/rc/xW2z609uTzcM2eCqxiQhtiiIWDVie4cnFOLLJXkFSq1uSGth+xBPzPQKn2em7Z9Q1b9O378FwB9S1d8gIt/i3/9dAL8YwNf7v58N4Df733fC6wEbUumO1LQPSx7q68HS+fN3QT/xbGkvBuwz4PIup+ERrqXdWBuesay9eJbbrkZO0JDHY9CEgXHU896EriyD6jj7kpFxkA9WmtQ/iih7FpeFsr8i/JrYhbUfvbnh+c1znQxELxpf1ZHXtiHaDEH5QxEsaWbY3r7c8faFlkRyP1arxREgOSih7NIgU7nEkpbsBGYMGYjQ6+UQVCDGQQSj106xzjIgfMjAHRwHJP68B1Dj9wzMjjTPDkUdqVGZ5iDDEOj4vgop9ol7h+MdZm92CqixRsPI+gZOYagTHZe5mZnnB4IiNB5a4yBMVKfjpMGKNmI57Rg25SJuFCppIE5rcoonkS+oHeLUevxI7yha1qCEfSEglWvyGMVWa93IsnxjGrqD8QDDs8RatdHOV2Vkq94w0GFlDzh2oz58nHhW2s7qUIjf8yfUao235hDHLPFImxFiMpMvEk81mbYAqe8vo2PhUk/ZnUEKHRvGJtS3eE/bEEab+5zY/eJWzsDnch34sj5oneDreE2foRub2vHXAtjl336cg0R9wWmKuU9y0nx2U2wPR9nFml3ns2ez/+zJeyd3Afb8OeTU33P+VX83l4BKyRCqZOpSJVmzroXwwgNWXybZ9NmZcX4kafV7cLuxyxlfP/5UT8rhY10a/WYxcJY4wfbI/6/Bik/otZbEpaok9aj9wJ8fQRML/YwYLstDEbgc/LHguWjgWHPKH0Byu1qSw/wYlXMOWs4myOWUWRsFvTw4rkukoZepPZi+q6sD+tkNLj95gVtUWEwgjbBSPQnZCJ8BaMF46jsyLMwPwcv9DYQCCoqCDCQN7pEr8jMrNg3KSw2QVEUHHvkMEZAI/MqDzXGlxMNSbp/TqJoHiYglrsQLDMHY/XJuiaSBjYUC/ZwLATaSG4lGPoMs/miWR34TgJ/nn78VwP8KC9q+CcDvVOOmPyIiXyUiP1lVv++1yk6C6JNnlf0IB7EvcTuv69GzqOezwucp6288/OWsf8GDYTxWZ6JXR8bnxKHRqDBenpqnMj7C5VCNv6OLUKQyCWNKa9BP++r/pyMcWHCfmCAk20rLM8qkxxdFzPSEDGdyy6u8bTc8v3nC8/MTNg/Y1lmL3JAvkhcmDhFMWKZkeF9VJ/Y5cd8n7i8vuN/tAJIKxGjhnSzmODvDARtRlTrX9zlJ13FiF0Ean0Sme6SRy7dVk9aBz0Eras18QWvphRZ5KzMXdGUHQZsqflXdtLFrnxcFntlNaic/Ljy24LKC9CL2Nwz267r8pAZrkHEJGWMDK+AlyjUmHQs9+VqzB/zT2u6JZUKnrvTyh7oIMXeqZMTsOdIwPs6gdUN5xClkihp34ayZ7fO6tX3ima51J4W07+k8utPBszYlU83f8KVpQYNxZBZSdplF5uBRigcyGHDlo65vFcBOFaWs8GyhhkknZHEkv5FwYr/b3Y+mb3bI2BoBy904swlK+FYbA67/pmE7BJAxUq/s8yXfmfvELvdMKkCBHeS8Axhqjst+v9fyaK/TxmZmV/lOonJv0BI+wXJdBsj+1UOzMO1ySDpenSrr9O30lxqpbDhYl3cSRD0DQrPts3yULCb9SoDm3h7B2Kfv88uZrdXus1on51eyzFI3Tl3Sg5I4C69WzbTO6J116djaa1YCYDPFieF3wprARac/GNNoZFkGDUirZrVapWmbpV8ROaBrbWjaZPhYThlJ+lxErfarLnZQgH7pOaIcfD1ukEDzGgUv0YkCIGZoFb6kU2kpr+aam9QVkZSSEOQGVb+R1H2KOLtDa3UVB0RhQw6S8GjMT50Nr/Ps+qSgVwtqcfjcvpMNNFq47xd9g0CnbaGZ2GDpJ0ueZTA8BoZvoREfCxE7fMcSrRNjn9BtYPc73gbCNumRnU7gswZtCuB/Fksz/peq+lsAfA0FYn8FwNf4568F8Jfp3e/xZy1oE5FfDeBXA8Anbz6hjIEP8alSzbcBCKYKtjZgjwTps0BXT+xePApmWKTGJnTyo3PWieH9TFi4QlmSR1YHOQ6VXeFCgozwVOkyrRJELLRVRrvVU13JAoricv6N0mKZtGEDLGF4GMgKxh893lNRyrL4I7IiEK29AUEbBTAET7cNT89PePP8jG0Lx7Q7gArxWGpABc5PEwo70jUzmaqYO/D2Zcfbt2/NcQIoVQtTHmEwhDZ6ExHCocw8dyQdxJciDtvUnHcloZzwTFSgaDUnMBHZ88p21tBVZi9pzozVxliq/sVAHCxpdbXVfoSj8Tk4CY/sAEB8fyyfDvSy5KhJ8tn7pzhqMFg7CLK/1y1K2MaWhDjtUIiNtmE4l8M+Wmf1nLUQY/S6n1NjIcOXvgzBTQDZtrKDYBvpM6jTN+STkT504RFHkF5hf5pLj8jwCtCu3jhZR3NcQkmukMuIuh4cKLmxMnEhVDgZJu12+ngtszu3OTOTtzlWFIhGbqxHrEyGsG2dTuEQrG2WqrOg6v6y28lkAFQn7rst3TF6KbbNj/bPdpB6H/D7hyDYw37otD02qtD7DhXBdtvsmgBIrtq6bTeM4SsRpjl8+66ATl+2acFXBnbhcMrAEDUc4auGnGY2X0BDTJiLI5/Lw4MylcVDLTXroQ0kF9yn3k5n04+YR+DB5ZjmgtT/0Xn+B3MAACAASURBVLaqbbOOcwLD/ADGP9L2fpWcVdW8PqLdVOn/+j4qHbS8z5GqEwMJ96aLSvk91Mfs8J6xOaL/hN5r8CC5s6YmXZNgtQdrE6e1neD5CHX7kei/KH6XVNTyBJBAn8m8Hp4cexDJ2LC40RN7384eCatv41lLXq3YBOgeeS0F7FDY8UoYxU0B3UgC1PZFzSVpnJ6XApa9p5UlWr+XKLoO9TXTCoXuix6WWhorsSQw9E2wbVBDC4u06SwG6dSURjyAGVEibQgElX/wavX9+Dv7GOFzxYRQHPgU23mhgv2+Y8DurZubB16x0dY7Jq6zLUVlkwvbEAwVTImTfH3PrAyo3839jvVxAD570PZzVfV7ReTvBvAdIvKdvdOqsu7sfgd44PdbAOCr/66foKGc1uANQL+eC0Xc2wY0N6X0H05HaGq/16sh5MZUKuOt8U4IdkCdAJrPZ2zqpmZLbO2HCCyYcR+g0hwdpgcb/OPyJBY66TivMz/Z6fXl8Eq4mC5lUEpFwogV/TlTr2RIS3FYNpSd2FruEfjSeGinNU0JmPOAUBSWMRp+YfabN0/Ythu20WdjFYpdBZtnnEK53FAKKhWsbyzdpzkfLy+0h431ILAsr4sh8GVuvowpgrHAZ3g/JmrviWVyPNzSPhREHh7FLjuEU/y2WmGOuwPfWB++GmxdWOkRrBKXrEKVtoMOH9W52MqzYskPjGPnsiy4fl8bKQ3S5/KOxvkMo5PfuQ1BjmOXn6Wsnryvx59OG2tyYc+Y9iaq3bDnMtepdu/QvZwq8WWaY4RBEmCT3CTNJ80pNI9MZgdpEOGHG8L4eYajn3znsy8wvWD4Vja4G36h4CqIVRTKpTP+jC/fiJP8yvWqevoonlFcSd9K9p0V+YO3QKEIiudKSo578QYab/rM3HbbMjl4v3uiZp/w8Ab3lz3rkjEwXO/lLJjaPviNbB0Au+MNO56en7BtUrNf4XiIzyZBzBnc90xIiUcZu1o4PGfNG27bhu3pBsiO/b7bXrrb5vrfeHN3nC3/Zjck8eFKw08BGeLHa4/h+nKiK7HFbklRcU5AZZrTBJRujaJEf5HhNtpkIbdjKPmVSTof/3DY8lRNfzeKCDB0ZJt8rk/pHEsU3sPW7TbrqRzrJY45POlkhizJGB40Gw5D1Zd/Gc/FDF7pNqV6XHvS9xVonUzpixqEJH77JkW/rEcXumf9OHwqTPsDwet+FNfL3/J/H8MK6h+3n++FDm+qwJVs1twLmQvgfIKZZSzJVHWoEz7oyTTJYpmQIp2orLsl0wPWJUm8445X40taghk+6UK0dM9SpwglrIAimx+nP0MmtQ45QuzVpb3wvi/cghQK9FLHLqPtj2K2KlonNW/vStmYlYF7nnrVt0crniecJl9oqZipePvWLuW+3TY8AZDbrdcrAp5giZcteBsY+54+84xk4Ctyx/CZgjZV/V7/+/0i8vsBfCOAvxrLHkXkJwP4fi/+vQB+Cr3+df7s9TYQQhhOJTkZwUSv9CjWi0pmBtwx4XcfBGzMiM2xAo6jSQ/VPBhjqNj/nwiRgMQHBT0Dcf/qdGF5jhToplnSssV+C0Hnm+M6/vabT6uXIC/LSJabtePyy3RCXWvyDI8Fr7Gp04xgqDLexlYKV7MrrbVZv5wt6TRN6EFP4AS7MPvNmyc8Pz9jG1se4cwOl0CxwYKkdJKm+jT7TIUSGU47UdKWJt336bMU3f0K3R20pVwaMqBSWxMdGVEOutn/SCrr8iPR6DiiFKxIlVoSdvn76++3ojip4RSXDpQD1vPC7CzET+kLneByYrqL5r3Fqo8chr4Q4xiiHY3ySXtL291wsNHuHfksyvhxMx37g4HJD7z4qfYrHGe3ep+Mz2pHkaot69v3fVGOKL/UDe0Ygu3mszWKPPY6AxoV3LE6ev6J6XZQwPb3OCtFQY47K7W3hxyw+mNtZ3LJ/oqv/U0dsxBVnHBK+PSDi17hC8L3OOzkETUnMsgwCucFH5tJG1AFbj4zuu+2ZNuIP0sX7zvmXpgKkAFPtBkBEtsfmRP3XRGn5E7VXPJkRTQDmXRsAEjY39B1InnK4pCBObTG3lc1mKNVSyX3qbZ8SPwgESn3N07FxK4lmo1SsWJBsM/dgky1WY5amiYZsM1YHrvA2IzOobfFHS3DKeykJB2yT/FdfTVHkHWELFaZmOEG8ee6V9jaxOIvpFRY/nnU7LyvmIXMHYo6gTAOVilG8n4CGXin7RRfKificWgFeYqQiWKg9C1Cx5bz02haNoj0WL6PNs6Hd5FFHlihDqscPwzqUq+RTAt85ojnO1z22dAE/oyrCI6Xp5FcCTK5FT0Tv8crvlcUVYkSQPKo+OqPVlPDfRU3Rup/JR0tzWepF0P3CpZeLkSM14IanvipZfwu5xDfQkJ1bVto63wWSWE70yiCutpDaGraWlzzMWHbAo+aNZfS05TsyXInEKutpHX6CLJ+XpxUnRP3u8n0kypu22ZZsVmzuNGEQmrSaAIiW840zlTUn81ZeGfQJiJfAWCo6g/7518I4D8E8O0AvhnAb/C/f8Bf+XYA/5qI/G7YASQ/9K79bN7OQ0KnQmseHnUwLQoaUdc7sk7fja+yZjzRyhze79xhwqHsnNj/lflFcG4KEActhz1H3k46YeEtcbY3BFE17y5B/ZrGqj3MqrU/i36wlmR8XKBEgP3eXb/8m+M0yiFXrQsdQ2loiHy5RNKktBRiZvRDEcHNNOEWBnYbA89vnvDm+cmP1nUa0yW9whmqMJ6+NMqC0uHLLXfoDKfIMsUv+4xQNBVIyb1moMnLJEF9JOJjBVaiRZPeR/6NIRSKHkrLu+S/1SJsSL3icDbYGV5Zp6zyAyXp/53+uirCk98r6NClssftpSnWHJ0Gp4mAeD/71yl62mJZuof1HcaAky+L39CeZbX6mH4HVIQcnJKTVFOMK3dKjxQx/6UyppltRDl7ugv4LvnSe9UZU5fFwTE70/Dyk4l0xkxcESL3qoUbI3EnEtKhzj64OYhDhexeJMW23RDL9dIhk+CDkmIeq34C8XFGTJexFx+kKP8IrEU+7vtEHzSHp80LOQ0tgrhtwLb5aYwzAgcLuiJgyT2WuldPhVZ8uK6e+463c8ee+/0k7VJLMPl9U+kgepAVjuUQwbj58eiexNuGYGw3L6c283a/W+DoM2lxYt3Uibnb2PKYCMLn8YSjU2VzJooTOfd9h26bBbm6AxGAPgjUGOZeyYtx28xW+ezhvhdNqnU022uBnNuQaasmIkEQ4zq24iXRGGlNvhVPPofNzDdJZkbe1mDPa2tgfGAM4y1JfRp6PZYRq2oumzW5DtmIIMsjVlUoZi4TBdCuCxmZKI5ZTWcu1nlk5wrX1Y8gv2mBsyF8V7C25J5eh6B52lS2vAflXH/TRgrNOiH7LlsgUXUpgmbSq0SEMx3pvGbDf4t9ja3jzWZGyV4kngn7VfleT3+29TeuM9eDrrKioJn47HzaC80Z/KBtXesSfCypu4L347AtXnrax7rLSR54FH5C4JY279DZogUWkIV6SqWCzlNxn3fMObE/3XAbNzuJedoMd+3ZVejIMDxTpJHUUo17jt9t5z/LTNvXAPj9rmRuAP5bVf2DIvLHAPxeEflVAP4igF/u5f9H2HH/3wU78v9XfoY2oG1YOqxLA6cOV9R6KONfDmL1eaEb7EeF0DgoPs0wxip9do85g7IusrKMy8jk/vE56wTqJ1tJHJ/nVamgli0hBE0JHy1eJKcgMBFd8EE5AJndlpiti/W7SMGBG588QRwh02UYEHh1DV59U98Mms4VE7LvEnjaNrz56AnPT0+4baMuA0UsBjH1MKeYIXfY39rG0n2WkxPBbhytHcvBWGaZdkfETz+eQjcHNbPQh7qyndWqLO9We23zLP3YcKHM7gF53jDMvByZrJBHrifGNjPt3r+I3lWdFxa8k64nfOD11Mlw1Y+mc8mZAT8K5+Ds95Mjp9cyIL4pA0almr3sIx3fUk1xp88ujs8XpCPMdmLFkX432jE2rBtP2uOGY4YAyLbXjfDRoM1SmPyMsYHPweDySgFUZZk124sFd/mOy9i+2z6rp63ufWy5J/Xlw15+8zPn41CeaNDeM9nd7xPQHeMplr4dHaSHm+38HqcjkEYiR/U8KKhBNJNx2H6/vKvLW+X4HJCke8roCNWkj/X9TpcB26BMLdkMPaGZta/9uDGrk9g279f1/hi+okGyf9u2YX+5w29P8XvOFJsMbE83vNw2vPwIgN0Cye02oPe6Jsb60AVFgFzql3edqdZJkkS9Oe+MNHCg90GanAdttnLbNnvLA7bYv3O6n5L2s2Ty2WfSxi7Y94kxwgqHb1H8prvxdOw5VAAb7+v0YR3BzzP2VvpM4CD/R4br//RCClcSSJOpmVwpiz9VnpjTTptG6fTT+jxzz/jEnoprdv6JVUHRt2mOduaJnSGZ1N0eHiFV5lIm48lTEZbDO41mUrrpwEPiIxWBb0Omb5WRQbIt7py5Loy9UCI+vp6cylncZhe8UkFLYIetC5wbMaIeCqyFyse7yZ3+vpmTroFAJ6qKWIINzocteIzmDiKnOf7cdm0VMfrzSootfA2hQFEVnEosDMsPDdpEwiBi3Uw+p9Irgjz09bM8ctIlZg0zpeK2C3LHhg1jG3XFwVZGcgNyFvseJ/QOgahvo0kkH8M7gzZV/W4AP+Pk+d8A8AtOniuAX/OuelcwXuGBr8+5tjQ+Y0+n9ZG0zumb/M6WRDrjTI0yIEURnAFAtJYd5oAqf6zdB8pMb3XZxXkzP+fFqdpNttARreGkRL/a6ZgC2qyYVuJ0jDWZciFNZAi8J8OZJE+v4aLhDLhiakY0PCll9967P2v5TPYRFYRW8JbEAgeOTIixMLG7fb6p3Wi+PW/4+KM3eLrdXHCnj+Ud866Yu+K+75j7bg5IBLdw4x/BwaylG6z7FqIcPi4mrMHr4odGu7XBHmwEPxTFmB6s3Jk/WhDYKl6dwI5VLonx31tZ5dnDk3Ej5JWN+sJjZci9nuiMMK5VMqvOrK2W7DWD70OqvQ3lvup53xt27I+00rrgvtSjJ31MfVVvs/Fkg9frLCaQ5XczrNzkiqzkexLfgUxMxP5JiB0ksQ3bG2OJmZDffnS0qtaBB2IO6dx3iAw7xCIcSTL4mWybE7rvrkdqF7pNvBmxxzay1yK0pNpnFKb6fi53YLfnQYon+mjca/gP3O979vfhEnvptM7HXDcNUf24vnKUpSI9S3L9ykuIWADCV6y7vbycZ6VNNc9e2VKXat2wZD4HyVI4S8lPAtkAnRvy6H+1WbQZRiPq9frEFSWvsjDda0vKY8JBRDDn3WbXXgTj+RlvPnrG/eWOeZ8+W7hhv9fpi5kQSue6bN0m5djG4pXA6Wkb2HXivu/YRPLs0ag3uI+uv0ya2D4jjeYxthtCN/HJ8tMPYQHEZuJyQ0osZyteGjHjBxvL+9t7NeqzjFBgbHQyAyE25wSm4sXXQA7er4hu/0sN0IEsXmHub/OAUDGg+048FEqmK74WYKU8MOXOef4UwhSETvBn4X/EdofZAgZCb41kJWZzFrT15Hs97j6FIlf/pJ3gLiQh17a9HjYSlR1Jpmz7tEhHhT7NV73e1T7ktz6giC0/I/wnJZoshMvrb1HvHrURbIbIdaFM5H52Rrv8p2mHAWISbbzelm8hvcpOTSimkLc0VKgGQzcpvU92IXYJhtYXGg9F1b0vvqqtwtsRs9rRnD13mYqtT+kzFN6Jviq2zWzfdtvcjm6QbeCNv3lHLUcGBHed2GAJRz5sqZn9V+BHc+T/lxSCzTJSJiNsY6XunFVGPu5NkXwQlUk7SvgUhq3DTZ5Rw6Ed5uBGsK03Z5ylMrr8DmfbYm3x2WWC1Pm0CGlsKVgL/aDKDnK0aRJlM1JdDA90abh3ZyECKj5AQF3hlBtFMuWarY4394yfxoEFemC+kMk8aCuWKsZXVNaKN6OzOjBM6LcheBob9rd3vP30LcWSms5EZZw6Lo0s2j8vuv5Ujlaz9Wh4X6vjs0BDTZfnh0pjycFxJq3M4PGXHI504LhPvWfloD3Ctr8dsBZP/cpG99jcyVgd5ylC7vpdSjxD2VunFbMH/NgvMBy7TKwo0tzNoYJ1+copL3gSKWTg0H+JVhZfCYJwSftqhFgyUgtbwjFRePAUOmsbeXIjYIeC5EpsRPBPy7mExlbtBN85p5+mavvb7DTD2P8ACzLEAsSXnLapvQy7xpIwe2+fdEAQJcwsm7nnIGzbRoZd3VaUA5PZT/X9I3Iy2MTzKzR6RxsxQ6i8a1XTo1l3i5a8rlxsvw8pae2z6RH44pC1buwR9DkRHAuoi87MC9Z21wlR9va0AXLzkyK9LGxVhS0v9UM12F6p7ReLGTW9z7z0WxQYww+0cZtx//QFz2+e8PR0w4475j7x9HSDjIn95SVt78AExpbJ1w3APvfcawep8D+GKvZhbe4rjEUG07ZQ8odBvY7Ijxsb0oykKuZ95p7PYFMRyS0ZoYbUEx/5PQNCb3faaXLbRpo5x9SWUd3vO+a05WXjNrxf3frkbLA3LlKneXoJYiSTj20MPH38EV5e3uLtp3te/WH7hbCAvZvJh5SHDnoyY81XXMS4Hmr22fPYSp5xO5XWQbYn+pR36HUni8yAF66Im7daOII2TsLvBPJUkfgRf68UMX5IhVV/Qj8IJ/09o+H2JX+JbRyLM6+BfHwnO8eRa8yxVjvaDwBL+1DJelsm2+UEbjNiuNkftXGJFW9WmJMgQrivfpaNN/GmoM+kNWUVhfzzENs/S1BzLu57yrJiCX6iLScBiaaWuLDve9Al7R3Tyetzvz7sw75PO4lXFeN286SQIlIzqoo5Rm4BAIB7WlZ0RlU9X4lD8MEEbQDcGWfB40xE5BNcKAZJmI8CyY0/T63RAqsmnOwAJt+XA6TBmNqXK0Zd5Zj4EMwwAp1dc8N2OkErM5MAOi6aAiPJOE3dkdHNY/KjWQkDc0LoLKOdboyx9gCuLZd0gkfANjzjk/i7cFW/SsmnzhYqH48yPNT0KKg7YFe9DKPi07cv+Szq6qQ9MS5n5U7ItP7WnnGKz/sayuOM7K/BGT6fpTzzPJmr/CHiDTm8fVJhgtCnPk7RFrdeZuq81wdcl9+ZhCm6bLD0GIAeQJbPGnLDvaAigzK0KPpVSUkBjwSHuzsN58p0L/KXtVAmXJa+a5WqE2ljUzUyQWVVxgB3nXImx4mlEh9ozA5MYEjeWRhOZ9Z8iEpLdsG08edDbCZh7i+473eM8ZQbrNPuqiXj4iTCqH/lljEE2za6wQy6+/4gQPwkVg4EkIdX5IZvWCA554TcNpvtCDp6++/iKfXWeSkyeT4oDi1FrD2qXpxrrrnezaA8FHbShojVDBfNGCoqGx6mx4Pb/V7LwIfbqNCj4bjEzGjtRSqWAuwqm9AAuA3cbpuh4SdYZlDo788ZB5iAbBqgfsBI9lsV+6cvGE83bE9P2OcOmRO3MYDbExTA3O+QORJ/SC3HT3rB93O5bGTbYPtHBxXQbxJ4+3vplALZl23w0kevK+xyJG39vTnV1wChlkZV642+zTirQqfpo7gWYcICiUkB2+YJloNd8n7l2CoyOLD6WFcXW6lO3F/e4unpGfv+KfY9DklZZ+lA9VMFXCB+Ydu32B5NGhakxR8AdqORyjA+aBq5ZKaSu4oIRKPPDE2fR3bQZTUW2OVBVaS3c/xaNxc93zDi4YzKyIYmAbTeVOpPinLZEAV8GSm9WkanD0wiJETT6Afq8KhV4xIOtNus/h9Hqz6B8r1ZT3lxorI34W/zyit/5uG28ZxW19Y2AVSiRXs9eYGT0JJF6WPDdSlq32Xx6TjwZfZ3KgSTJhrE755T10fWxn3afZb7fYfcNtw2m3VLXOeO4ZdyR9vSZJnu7nxH1PbBBG2B7qDs8MqcpmhrCDi7XIwiqcQUNvuk6/qJLr9JSFCRzKAhmLM7jl0++wWhVr22QKcdtCFVf/QNSid1DaslgzUAcTxlGNxg9HXOIZ3EtI9WF8/EJaIkIu1EtrAlWZ+UriADcRgH/48PZAFM4HZVX7O/uiMZxrY+2NJM/0ZtZ7foMINVwNfv7wIW8PX9M6GvL4umefB+lcXRaq3lHrT72kvlCpV5DscgWKEvR+2K1YpW9m0NcnnEzDiFpJEBEVP9mRzx73b/TDgyUbb4pbGkHMcif2C5P5qfVNZWsYDvSx6NmnIsz7/kf5yNLxiewQ7e5OPR4Yaes7W1qT6oGDM0UXUqhKRJHxxa+kI/6MJPB17R/lERyxjFlm+EUGXPOV2mTc5FkCcdBI90upXDNsbWBs9O2LqnM3/bbth9iWQUizu+dM7coxtH58XqB9s7JbaEzB2QWNoCmMO775ZjHn4o1FTbJ3S73RK/U2CHaCVgHMAgNUJGl0q8pRoQem+hkL0nTX6q9KqHfRSU3yfMFHYRttOMrwdVeHBOwdRZr1XrygVbumPtTFVMv/sNUOhuOD0/Dzw9v8HT08R93mymZr/jfp91aqUfOrLfyekQ298WB96XrvJ9dy93QBW3262WvkJw2wQ6njzwtHGeu/GHiPj+LqspAlK4nOX+SZVmm5PPG0HMsjed7f7Eft8hmy9b9GGJMR9+n6b5HrUaKA8SATDFdaQCqjROAV5H7tG8734oi+kvG8uQK7eD0a1cuoVkwtD7SqtfdCt+arMFCqjYRhPIjm274b6/NZ9pAjoo6FR3sJcETyWyUKs0qEzQpL4dYVNgDoHsVlsunHb7wV4B4Lcs0h198dvq2jQVGuPWdCjbBCVZIx0YNXDAF9/TeIUT6DU6H1YSnShGeJPIus+lqVNWO01N1NPTbPwRalaN7f/RHj7yNtZSKU3BjPGr2mxnu4HQeczIpO26CVv6HttvavwSw2JsrLzT/f2OqC6fj36CNjmUpYzxSdnC0CureVAgV8fkGE3FlB2473gRIFacmM7xhEskXkTatSxxEQkZmofwwQRtZ9BYq+ne1duSuuQPcOPFQxEqhspTiTUDAH6e9Uoi1Oo+cQSObPaOcaB2lJ/xe6dembRPvd1QEDi96wVr3x418xjpVpopGooIityEO16hwJlb0Z4FLUgiS0F+Xky/NBBC3JSaHIOJz1WnHFiwN6j0MZRB/swjXwqVf2+GYlHWgT//corgWkC4jQd0znqZX16HDOCyz++gLaOWCEXCg94PfHngHlS8OvlcF6RonrSTrg7yLsIzRPnjUoSTSB0XnikI2e+jmW3H0yBiztBYVlHk7C17xHsBc9aPCHZKMpf1Y5V2cMmc9rsMgexrGS9ZUzTIvVGOuwVpm/NFODg9+63eRwtyTRva/p9xpGd+eMSJJzpJjpvVj2SUQ5Ux21N+XHeFNJyy8FbkSOW1XdWZSxBL68ahIasVOEJqfgmnIfgr9LaV2FWxjQlgc8fj7rMg8FnVaRdzK3C/321PjNcWPLptA2P6PnTp2w1ELBgbm2Jsmzk++445xQM5r0uRe45L31h/b/vETpv9jYya/BhBdh1kFvg1YiS9cuZMFVN393ftYJLY1x33bWJsNgaxj9uDSds65vphV7wsNjh0htDA6px4UeA2hp12CgsY517LFSdirZGCD2hqpznCZCFnT318VTQmT7ycQhR42e2KhHA8I3Gnqnn5ehGr2/nyV5a+5RdWiAsNpDBWsD/m4+N9jUAuDjh73a81/OpGtEKIl/dlfSlpJ0aAx0aRyTpTM55gJ13NvtYOhYiCg9baexW2vny0rjS6nVxXRIgA2JBjZOcNaMM3TzTVHlhUpmmloDy0hSF1CrQzb6Mvxjj1LVVZ8ip1Rq3pOriuxqT3PMozYxG90kywjcdBzx/7koQ4tneCR3F74WFXY+CcD0ecPm4zdJGI4NmzsFk1gSEZvI0tllE+BnlXJ78IEJEfBvDn3jceF1zg8BMB/PX3jcQFF+DixQs+LLj48YIPBS5evOBDgi8lP/59qvqTzn74UGba/pyqfsP7RuKCCwBARP74xY8XfAhw8eIFHxJc/HjBhwIXL17wIcEXxY/vOGLxggsuuOCCCy644IILLrjggvcJV9B2wQUXXHDBBRdccMEFF1zwAcOHErT9lveNwAUXEFz8eMGHAhcvXvAhwcWPF3wocPHiBR8SfCH8+EEcRHLBBRdccMEFF1xwwQUXXHDBOXwoM20XXHDBBRdccMEFF1xwwQUXnMB7D9pE5BeJyJ8Tke8SkW953/hc8OUNIvJTROQPi8ifEZH/S0R+rT//ahH5DhH58/73x/tzEZH/zPnz/xCRn/V+e3DBlxuIyCYif0pE/gf//tNE5I86z/0eEXn252/8+3f57z/1feJ9wZcfiMhXici3ich3isifFZF/9NKNF7wvEJF/0+30nxaR3yUiH1368YIvAkTkt4vI94vIn6Znn1sXisg3e/k/LyLf/KPF670GbSKyAfgvAPxiAD8dwD8vIj/9feJ0wZc93AH8W6r60wH8HAC/xnnuWwD8IVX9egB/yL8Dxptf7/9+NYDf/MWjfMGXOfxaAH+Wvv9HAH6jqv4DAH4QwK/y578KwA/689/o5S644EsJvwnAH1TVfwjAz4Dx5aUbL/jCQUS+FsC/DuAbVPUfBrAB+BW49OMFXwz8VwB+0fLsc+lCEflqAL8OwM8G8I0Afl0Een+n8L5n2r4RwHep6ner6lsAvxvAN71nnC74MgZV/T5V/ZP++YdhTsnXwvjuW73YtwL4Z/3zNwH4nWrwRwB8lYj85C8Y7Qu+TEFEvg7APw3gt/p3AfDzAXybF1l5MXj02wD8Ai9/wQU/ahCRHwfgHwfw2wBAVd+q6t/EpRsveH9wA/CxiNwAfALg+3Dpxwu+AFDV/w3ADyyPP68u/KcAfIeq/oCq/iCA78AxEPxc8L6Dtq8F8Jfp+/f4swsu+DEHXz7xMwH8UQBfo6rf5z/9Tn1jIwAAA1lJREFUFQBf458vHr3gxxL+UwD/DoDp338CgL+pqnf/zvyWvOi//5CXv+CCLwX8NAB/DcDv8OW6v1VEvgKXbrzgPYCqfi+A/xjAX4IFaz8E4E/g0o8XvD/4vLrwS64j33fQdsEF7wVE5CsB/HcA/g1V/X/4N7UjVa9jVS/4MQUR+aUAvl9V/8T7xuWCC2CzGj8LwG9W1Z8J4P9FLf8BcOnGC7448GVk3wRLJvw9AL4CP8pZigsu+FLB+9KF7zto+14AP4W+f50/u+CCHzMQkSdYwPbfqOrv88d/NZb2+N/v9+cXj17wYwX/GIB/RkT+Amxp+M+H7Sn6Kl8OBHR+S170338cgL/xRSJ8wZc1fA+A71HVP+rfvw0WxF268YL3Af8kgP9bVf+aqr4A+H0wnXnpxwveF3xeXfgl15HvO2j7YwC+3k8DeoZtMv3294zTBV/G4GvcfxuAP6uq/wn99O0A4mSfbwbwB+j5v+SnA/0cAD9E0+MXXPB3DKr676nq16nqT4Xpvv9FVf8FAH8YwC/zYisvBo/+Mi9/zXpc8CUBVf0rAP6yiPyD/ugXAPgzuHTjBe8H/hKAnyMin7jdDn689OMF7ws+ry78nwD8QhH58T5z/Av92d8xvPfLtUXkl8D2dWwAfruq/vr3itAFX9YgIj8XwP8O4P9E7SP692H72n4vgL8XwF8E8MtV9QfcWPznsGUZfwvAr1TVP/6FI37BlzWIyM8D8G+r6i8Vkb8fNvP21QD+FIB/UVU/FZGPAPzXsH2YPwDgV6jqd78vnC/48gMR+Udgh+I8A/huAL8Slty9dOMFXziIyH8A4J+Dnfr8pwD8q7A9QZd+vODHFETkdwH4eQB+IoC/CjsF8r/H59SFIvKvwHxMAPj1qvo7flR4ve+g7YILLrjgggsuuOCCCy644ILH8L6XR15wwQUXXHDBBRdccMEFF1zwClxB2wUXXHDBBRdccMEFF1xwwQcMV9B2wQUXXHDBBRdccMEFF1zwAcMVtF1wwQUXXHDBBRdccMEFF3zAcAVtF1xwwQUXXHDBBRdccMEFHzBcQdsFF1xwwQUXXHDBBRdccMEHDFfQdsEFF1xwwQUXXHDBBRdc8AHDFbRdcMEFF1xwwQUXXHDBBRd8wPD/AUOn/iZV7TrRAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# show the results\n", - "show_result_pyplot(model, img, result, get_palette('cityscapes'))" + "vis_result = show_result_pyplot(model, img, result)\n", + "plt.imshow(mmcv.bgr2rgb(vis_result))" ] }, { @@ -119,9 +87,9 @@ ], "metadata": { "kernelspec": { - "display_name": "open-mmlab", + "display_name": "Python 3.10.4 ('pt1.11-v2')", "language": "python", - "name": "open-mmlab" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -133,7 +101,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.10.4" }, "pycharm": { "stem_cell": { @@ -143,6 +111,11 @@ }, "source": [] } + }, + "vscode": { + "interpreter": { + "hash": "fdab7187f8cbd4ce42bbf864ddb4c4693e7329271a15a7fa96e4bdb82b9302c9" + } } }, "nbformat": 4, diff --git a/demo/video_demo.py b/demo/video_demo.py new file mode 100644 index 0000000000..3eb326b7af --- /dev/null +++ b/demo/video_demo.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +import cv2 +from mmengine.model.utils import revert_sync_batchnorm + +from mmseg.apis import inference_model, init_model +from mmseg.apis.inference import show_result_pyplot +from mmseg.utils import register_all_modules + + +def main(): + parser = ArgumentParser() + parser.add_argument('video', help='Video file or webcam id') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--palette', + default='cityscapes', + help='Color palette used for segmentation map') + parser.add_argument( + '--show', action='store_true', help='Whether to show draw result') + parser.add_argument( + '--show-wait-time', default=1, type=int, help='Wait time after imshow') + parser.add_argument( + '--output-file', default=None, type=str, help='Output video file path') + parser.add_argument( + '--output-fourcc', + default='MJPG', + type=str, + help='Fourcc of the output video') + parser.add_argument( + '--output-fps', default=-1, type=int, help='FPS of the output video') + parser.add_argument( + '--output-height', + default=-1, + type=int, + help='Frame height of the output video') + parser.add_argument( + '--output-width', + default=-1, + type=int, + help='Frame width of the output video') + parser.add_argument( + '--opacity', + type=float, + default=0.5, + help='Opacity of painted segmentation map. In (0, 1] range.') + args = parser.parse_args() + + assert args.show or args.output_file, \ + 'At least one output should be enabled.' + + register_all_modules() + + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + if args.device == 'cpu': + model = revert_sync_batchnorm(model) + + # build input video + if args.video.isdigit(): + args.video = int(args.video) + cap = cv2.VideoCapture(args.video) + assert (cap.isOpened()) + input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) + input_fps = cap.get(cv2.CAP_PROP_FPS) + + # init output video + writer = None + output_height = None + output_width = None + if args.output_file is not None: + fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc) + output_fps = args.output_fps if args.output_fps > 0 else input_fps + output_height = args.output_height if args.output_height > 0 else int( + input_height) + output_width = args.output_width if args.output_width > 0 else int( + input_width) + writer = cv2.VideoWriter(args.output_file, fourcc, output_fps, + (output_width, output_height), True) + + # start looping + try: + while True: + flag, frame = cap.read() + if not flag: + break + + # test a single image + result = inference_model(model, frame) + + # blend raw image and prediction + draw_img = show_result_pyplot(model, frame, result) + + if args.show: + cv2.imshow('video_demo', draw_img) + cv2.waitKey(args.show_wait_time) + if writer: + if draw_img.shape[0] != output_height or draw_img.shape[ + 1] != output_width: + draw_img = cv2.resize(draw_img, + (output_width, output_height)) + writer.write(draw_img) + finally: + if writer: + writer.release() + cap.release() + + +if __name__ == '__main__': + main() diff --git a/docker/Dockerfile b/docker/Dockerfile index 700ac15dee..9ee49ab35c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,7 @@ -ARG PYTORCH="1.3" -ARG CUDA="10.1" -ARG CUDNN="7" +ARG PYTORCH="1.11.0" +ARG CUDA="11.3" +ARG CUDNN="8" +ARG MMCV="2.0.0rc3" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel @@ -8,13 +9,27 @@ ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxrender-dev libxext6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +# To fix GPG key error when running apt-get update +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* -# Install mmsegmentation RUN conda clean --all -RUN git clone https://github.com/open-mmlab/mmsegmenation.git /mmsegmentation + +# Install MMCV +ARG PYTORCH +ARG CUDA +ARG MMCV +RUN ["/bin/bash", "-c", "pip install openmim"] +RUN ["/bin/bash", "-c", "mim install mmengine"] +RUN ["/bin/bash", "-c", "mim install mmcv==${MMCV}"] + +# Install MMSegmentation +RUN git clone -b dev-1.x https://github.com/open-mmlab/mmsegmentation.git /mmsegmentation WORKDIR /mmsegmentation ENV FORCE_CUDA="1" +RUN pip install -r requirements.txt RUN pip install --no-cache-dir -e . diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile new file mode 100644 index 0000000000..bb150076d8 --- /dev/null +++ b/docker/serve/Dockerfile @@ -0,0 +1,51 @@ +ARG PYTORCH="1.11.0" +ARG CUDA="11.3" +ARG CUDNN="8" +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ARG MMCV="2.0.0rc3" +ARG MMSEG="1.0.0rc2" + +ENV PYTHONUNBUFFERED TRUE + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates \ + g++ \ + openjdk-11-jre-headless \ + # MMDet Requirements + ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && rm -rf /var/lib/apt/lists/* + +ENV PATH="/opt/conda/bin:$PATH" +RUN export FORCE_CUDA=1 + +# TORCHSEVER +RUN pip install torchserve torch-model-archiver + +# MMLAB +ARG PYTORCH +ARG CUDA +RUN ["/bin/bash", "-c", "pip install openmim"] +RUN ["/bin/bash", "-c", "mim install mmengine"] +RUN ["/bin/bash", "-c", "mim install mmcv==${MMCV}"] +RUN pip install mmsegmentation==${MMSEG} + +RUN useradd -m model-server \ + && mkdir -p /home/model-server/tmp + +COPY entrypoint.sh /usr/local/bin/entrypoint.sh + +RUN chmod +x /usr/local/bin/entrypoint.sh \ + && chown -R model-server /home/model-server + +COPY config.properties /home/model-server/config.properties +RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store + +EXPOSE 8080 8081 8082 + +USER model-server +WORKDIR /home/model-server +ENV TEMP=/home/model-server/tmp +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["serve"] diff --git a/docker/serve/config.properties b/docker/serve/config.properties new file mode 100644 index 0000000000..efb9c47e40 --- /dev/null +++ b/docker/serve/config.properties @@ -0,0 +1,5 @@ +inference_address=http://0.0.0.0:8080 +management_address=http://0.0.0.0:8081 +metrics_address=http://0.0.0.0:8082 +model_store=/home/model-server/model-store +load_models=all diff --git a/docker/serve/entrypoint.sh b/docker/serve/entrypoint.sh new file mode 100644 index 0000000000..41ba00b048 --- /dev/null +++ b/docker/serve/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +if [[ "$1" = "serve" ]]; then + shift 1 + torchserve --start --ts-config /home/model-server/config.properties +else + eval "$@" +fi + +# prevent docker exit +tail -f /dev/null diff --git a/docs/api.rst b/docs/api.rst deleted file mode 100644 index 9c14a67564..0000000000 --- a/docs/api.rst +++ /dev/null @@ -1,61 +0,0 @@ -API Reference -============== - -mmseg.apis --------------- -.. automodule:: mmseg.apis - :members: - -mmseg.core --------------- - -seg -^^^^^^^^^^ -.. automodule:: mmseg.core.seg - :members: - -evaluation -^^^^^^^^^^ -.. automodule:: mmseg.core.evaluation - :members: - -utils -^^^^^^^^^^ -.. automodule:: mmseg.core.utils - :members: - -mmseg.datasets --------------- - -datasets -^^^^^^^^^^ -.. automodule:: mmseg.datasets - :members: - -pipelines -^^^^^^^^^^ -.. automodule:: mmseg.datasets.pipelines - :members: - -mmseg.models --------------- - -segmentors -^^^^^^^^^^ -.. automodule:: mmseg.models.segmentors - :members: - -backbones -^^^^^^^^^^ -.. automodule:: mmseg.models.backbones - :members: - -decode_heads -^^^^^^^^^^^^ -.. automodule:: mmseg.models.decode_heads - :members: - -losses -^^^^^^^^^^ -.. automodule:: mmseg.models.losses - :members: diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 20f2534dec..0000000000 --- a/docs/conf.py +++ /dev/null @@ -1,72 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath('..')) - -# -- Project information ----------------------------------------------------- - -project = 'MMSegmentation' -copyright = '2020-2020, OpenMMLab' -author = 'MMSegmentation Authors' - -# The full version, including alpha/beta/rc tags -with open('../mmseg/VERSION', 'r') as f: - release = f.read().strip() - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'recommonmark', - 'sphinx_markdown_tables', -] - -autodoc_mock_imports = ['matplotlib', 'pycocotools', 'mmseg.version'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# The master toctree document. -master_doc = 'index' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] diff --git a/docs/config.md b/docs/config.md deleted file mode 100644 index e07fdfee84..0000000000 --- a/docs/config.md +++ /dev/null @@ -1,365 +0,0 @@ -# Config System -We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. -If you wish to inspect the config file, you may run `python tools/print_config.py /PATH/TO/CONFIG` to see the complete config. -You may also pass `--options xxx.yyy=zzz` to see updated config. - -## Config File Structure - -There are 4 basic component types under `config/_base_`, dataset, model, schedule, default_runtime. -Many methods could be easily constructed with one of each like DeepLabV3, PSPNet. -The configs that are composed by components from `_base_` are called _primitive_. - -For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. - -For easy understanding, we recommend contributors to inherit from exiting methods. -For example, if some modification is made base on DeepLabV3, user may first inherit the basic DeepLabV3 structure by specifying `_base_ = ../deeplabv3/deeplabv3_r50_512x1024_40ki_cityscapes.py`, then modify the necessary fields in the config files. - -If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder `xxxnet` under `configs`, - -Please refer to [mmcv](https://mmcv.readthedocs.io/en/latest/utils.html#config) for detailed documentation. - -## Config Name Style - -We follow the below style to name config files. Contributors are advised to follow the same style. - -``` -{model}_{backbone}_[misc]_[gpu x batch_per_gpu]_{resolution}_{schedule}_{dataset} -``` - -`{xxx}` is required field and `[yyy]` is optional. - -- `{model}`: model type like `psp`, `deeplabv3`, etc. -- `{backbone}`: backbone type like `r50` (ResNet-50), `x101` (ResNeXt-101). -- `[misc]`: miscellaneous setting/plugins of model, e.g. `dconv`, `gcb`, `attention`, `mstrain`. -- `[gpu x batch_per_gpu]`: GPUs and samples per GPU, `8x2` is used by default. -- `{schedule}`: training schedule, `20ki` means 20k iterations. -- `{dataset}`: dataset like `cityscapes`, `voc12aug`, `ade`. - -## An Example of PSPNet - -To help the users have a basic idea of a complete config and the modules in a modern semantic segmentation system, -we make brief comments on the config of PSPNet using ResNet50V1c as the following. -For more detailed usage and the corresponding alternative for each modules, please refer to the API documentation. - -```python -norm_cfg = dict(type='SyncBN', requires_grad=True) # Segmentation usually uses SyncBN -model = dict( - type='EncoderDecoder', # Name of segmentor - pretrained='open-mmlab://resnet50_v1c', # The ImageNet pretrained backbone to be loaded - backbone=dict( - type='ResNetV1c', # The type of backbone. Please refer to mmseg/backbone/resnet.py for details. - depth=50, # Depth of backbone. Normally 50, 101 are used. - num_stages=4, # Number of stages of backbone. - out_indices=(0, 1, 2, 3), # The index of output feature maps produced in each stages. - dilations=(1, 1, 2, 4), # The dilation rate of each layer. - strides=(1, 2, 1, 1), # The stride of each layer. - norm_cfg=dict( # The configuration of norm layer. - type='SyncBN', # Type of norm layer. Usually it is SyncBN. - requires_grad=True), # Whether to train the gamma and beta in norm - norm_eval=False, # Whether to freeze the statistics in BN - style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. - contract_dilation=True), # When dilation > 1, whether contract first layer of dilation. - decode_head=dict( - type='PSPHead', # Type of decode head. Please refer to mmseg/models/decode_heads for available options. - in_channels=2048, # Input channel of decode head. - in_index=3, # The index of feature map to select. - channels=512, # The intermediate channels of decode head. - pool_scales=(1, 2, 3, 6), # The avg pooling scales of PSPHead. Please refer to paper for details. - drop_out_ratio=0.1, # The dropout ratio before final classification layer. - num_classes=19, # Number of segmentation classs. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. - norm_cfg=dict(type='SyncBN', requires_grad=True), # The configuration of norm layer. - align_corners=False, # The align_corners argument for resize in decoding. - loss_decode=dict( # Config of loss function for the decode_head. - type='CrossEntropyLoss', # Type of loss used for segmentation. - use_sigmoid=False, # Whether use sigmoid activation for segmentation. - loss_weight=1.0)), # Loss weight of decode head. - auxiliary_head=dict( - type='FCNHead', # Type of auxiliary head. Please refer to mmseg/models/decode_heads for available options. - in_channels=1024, # Input channel of auxiliary head. - in_index=2, # The index of feature map to select. - channels=256, # The intermediate channels of decode head. - num_convs=1, # Number of convs in FCNHead. It is usually 1 in auxiliary head. - concat_input=False, # Whether concat output of convs with input before classification layer. - drop_out_ratio=0.1, # The dropout ratio before final classification layer. - num_classes=19, # Number of segmentation classs. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. - norm_cfg=dict(type='SyncBN', requires_grad=True), # The configuration of norm layer. - align_corners=False, # The align_corners argument for resize in decoding. - loss_decode=dict( # Config of loss function for the decode_head. - type='CrossEntropyLoss', # Type of loss used for segmentation. - use_sigmoid=False, # Whether use sigmoid activation for segmentation. - loss_weight=0.4))) # Loss weight of auxiliary head, which is usually 0.4 of decode head. -train_cfg = dict() # train_cfg is just a place holder for now. -test_cfg = dict(mode='whole') # The test mode, options are 'whole' and 'sliding'. 'whole': whole image fully-convolutional test. 'sliding': sliding crop window on the image. -dataset_type = 'CityscapesDataset' # Dataset type, this will be used to define the dataset. -data_root = 'data/cityscapes/' # Root path of data. -img_norm_cfg = dict( # Image normalization config to normalize the input images. - mean=[123.675, 116.28, 103.53], # Mean values used to pre-training the pre-trained backbone models. - std=[58.395, 57.12, 57.375], # Standard variance used to pre-training the pre-trained backbone models. - to_rgb=True) # The channel orders of image used to pre-training the pre-trained backbone models. -crop_size = (512, 1024) # The crop size during training. -train_pipeline = [ # Training pipeline. - dict(type='LoadImageFromFile'), # First pipeline to load images from file path. - dict(type='LoadAnnotations'), # Second pipeline to load annotations for current image. - dict(type='Resize', # Augmentation pipeline that resize the images and their annotations. - img_scale=(2048, 1024), # The largest scale of image. - ratio_range=(0.5, 2.0)), # The augmented scale range as ratio. - dict(type='RandomCrop', # Augmentation pipeline that randomly crop a patch from current image. - crop_size=(512, 1024), # The crop size of patch. - cat_max_ratio=0.75), # The max area ratio that could be occupied by single category. - dict( - type='RandomFlip', # Augmentation pipeline that flip the images and their annotations - flip_ratio=0.5), # The ratio or probability to flip - dict(type='PhotoMetricDistortion'), # Augmentation pipeline that distort current image with several photo metric methods. - dict( - type='Normalize', # Augmentation pipeline that normalize the input images - mean=[123.675, 116.28, 103.53], # These keys are the same of img_norm_cfg since the - std=[58.395, 57.12, 57.375], # keys of img_norm_cfg are used here as arguments - to_rgb=True), - dict(type='Pad', # Augmentation pipeline that pad the image to specified size. - size=(512, 1024), # The output size of padding. - pad_val=0, # The padding value for image. - seg_pad_val=255), # The padding value of 'gt_semantic_seg'. - dict(type='DefaultFormatBundle'), # Default format bundle to gather data in the pipeline - dict(type='Collect', # Pipeline that decides which keys in the data should be passed to the segmentor - keys=['img', 'gt_semantic_seg']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), # First pipeline to load images from file path - dict( - type='MultiScaleFlipAug', # An encapsulation that encapsulates the test time augmentations - img_scale=(2048, 1024), # Decides the largest scale for testing, used for the Resize pipeline - flip=False, # Whether to flip images during testing - transforms=[ - dict(type='Resize', # Use resize augmentation - keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be supressed by the img_scale set above. - dict(type='RandomFlip'), # Thought RandomFlip is added in pipeline, it is not used when flip=False - dict( - type='Normalize', # Normalization config, the values are from img_norm_cfg - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='ImageToTensor', # Convert image to tensor - keys=['img']), - dict(type='Collect', # Collect pipeline that collect necessary keys for testing. - keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, # Batch size of a single GPU - workers_per_gpu=2, # Worker to pre-fetch data for each single GPU - train=dict( # Train dataset config - type='CityscapesDataset', # Type of dataset, refer to mmseg/datasets/ for details. - data_root='data/cityscapes/', # The root of dataset. - img_dir='leftImg8bit/train', # The image directory of dataset. - ann_dir='gtFine/train', # The annotation directory of dataset. - pipeline=[ # pipeline, this is passed by the train_pipeline created before. - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict( - type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']) - ]), - val=dict( # Validation dataset config - type='CityscapesDataset', - data_root='data/cityscapes/', - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', - pipeline=[ # Pipeline is passed by test_pipeline created before - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CityscapesDataset', - data_root='data/cityscapes/', - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -log_config = dict( # config to register logger hook - interval=50, # Interval to print the log - hooks=[ - # dict(type='TensorboardLoggerHook') # The Tensorboard logger is also supported - dict(type='TextLoggerHook', by_epoch=False) - ]) -dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. -log_level = 'INFO' # The level of logging. -load_from = None # load models as a pre-trained model from a given path. This will not resume training. -resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. -workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 12 epochs according to the total_epochs. -cudnn_benchmark = True # Whether use cudnn_benchmark to speed up, which is fast for fixed input size. -optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch - type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details - lr=0.01, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch - momentum=0.9, # Momentum - weight_decay=0.0005) # Weight decay of SGD -optimizer_config = dict() # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. -lr_config = dict( - policy='poly', # The policy of scheduler, also support Step, CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. - power=0.9, # The power of polynomial decay. - min_lr=0.0001, # The minimum learning rate to stable the training. - by_epoch=False) # Whethe count by epoch or not. -total_iters = 40000 # Total number of iterations. -checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. - by_epoch=False, # Whethe count by epoch or not. - interval=4000) # The save interval. -evaluation = dict( # The config to build the evaluation hook. Please refer to mmseg/core/evaulation/eval_hook.py for details. - interval=4000, # The interval of evaluation. - metric='mIoU') # The evaluation metric. - - -``` - -## FAQ - -### Ignore some fields in the base configs - -Sometimes, you may set `_delete_=True` to ignore some of fields in base configs. -You may refer to [mmcv](https://mmcv.readthedocs.io/en/latest/utils.html#inherit-from-base-config-with-ignored-fields) for simple inllustration. - -In MMSegmentation, for example, to change the backbone of PSPNet with the following config. - -```python -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='MaskRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict(...), - auxiliary_head=dict(...)) -``` - -`ResNet` and `HRNet` use different keywords to construct. - -```python -_base_ = '../pspnet/psp_r50_512x1024_40ki_cityscpaes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w32', - backbone=dict( - _delete_=True, - type='HRNet', - norm_cfg=norm_cfg, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256)))), - decode_head=dict(...), - auxiliary_head=dict(...)) -``` - -The `_delete_=True` would replace all old keys in `backbone` field with new keys new keys. - -### Use intermediate variables in configs - -Some intermediate variables are used in the configs files, like `train_pipeline`/`test_pipeline` in datasets. -It's worth noting that when modifying intermediate variables in the children configs, user need to pass the intermediate variables into corresponding fields again. -For example, we would like to change multi scale strategy to train/test a PSPNet. `train_pipeline`/`test_pipeline` are intermediate variable we would like modify. -```python -_base_ = '../pspnet/psp_r50_512x1024_40ki_cityscapes.py' -crop_size = (512, 1024) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(1.0, 2.0)), # change to [1., 2.] - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], # change to multi scale testing - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -``` -We first define the new `train_pipeline`/`test_pipeline` and pass them into `data`. diff --git a/docs/Makefile b/docs/en/Makefile similarity index 100% rename from docs/Makefile rename to docs/en/Makefile diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css new file mode 100644 index 0000000000..2e38d0880b --- /dev/null +++ b/docs/en/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../images/mmsegmentation.png"); + background-size: 201px 40px; + height: 40px; + width: 201px; +} diff --git a/docs/en/_static/images/mmsegmentation.png b/docs/en/_static/images/mmsegmentation.png new file mode 100644 index 0000000000..009083a9e8 Binary files /dev/null and b/docs/en/_static/images/mmsegmentation.png differ diff --git a/docs/en/advanced_guides/add_dataset.md b/docs/en/advanced_guides/add_dataset.md new file mode 100644 index 0000000000..4149014e64 --- /dev/null +++ b/docs/en/advanced_guides/add_dataset.md @@ -0,0 +1,156 @@ +# Add New Datasets + +## Customize datasets by reorganizing data + +The simplest way is to convert your dataset to organize your data into folders. + +An example of file structure is as followed. + +```none +├── data +│ ├── my_dataset +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{img_suffix} +│ │ │ │ ├── yyy{img_suffix} +│ │ │ │ ├── zzz{img_suffix} +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{seg_map_suffix} +│ │ │ │ ├── yyy{seg_map_suffix} +│ │ │ │ ├── zzz{seg_map_suffix} +│ │ │ ├── val + +``` + +A training pair will consist of the files with same suffix in img_dir/ann_dir. + +If `split` argument is given, only part of the files in img_dir/ann_dir will be loaded. +We may specify the prefix of files we would like to be included in the split txt. + +More specifically, for a split txt like following, + +```none +xxx +zzz +``` + +Only +`data/my_dataset/img_dir/train/xxx{img_suffix}`, +`data/my_dataset/img_dir/train/zzz{img_suffix}`, +`data/my_dataset/ann_dir/train/xxx{seg_map_suffix}`, +`data/my_dataset/ann_dir/train/zzz{seg_map_suffix}` will be loaded. + +:::{note} +The annotations are images of shape (H, W), the value pixel should fall in range `[0, num_classes - 1]`. +You may use `'P'` mode of [pillow](https://pillow.readthedocs.io/en/stable/handbook/concepts.html#palette) to create your annotation image with color. +::: + +## Customize datasets by mixing dataset + +MMSegmentation also supports to mix dataset for training. +Currently it supports to concat, repeat and multi-image mix datasets. + +### Repeat dataset + +We use `RepeatDataset` as wrapper to repeat the dataset. +For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### Concatenate dataset + +In case the dataset you want to concatenate is different, you can concatenate the dataset configs like the following. + +```python +dataset_A_train = dict() +dataset_B_train = dict() +concatenate_dataset = dict( + type='ConcatDataset', + datasets=[dataset_A_train, dataset_B_train]) +``` + +A more complex example that repeats `Dataset_A` and `Dataset_B` by N and M times, respectively, and then concatenates the repeated datasets is as the following. + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( + type='Dataset_A', + ... + pipeline=train_pipeline + ) +) +dataset_A_val = dict( + ... + pipeline=test_pipeline +) +dataset_A_test = dict( + ... + pipeline=test_pipeline +) +dataset_B_train = dict( + type='RepeatDataset', + times=M, + dataset=dict( + type='Dataset_B', + ... + pipeline=train_pipeline + ) +) +train_dataloader = dict( + dataset=dict('ConcatDataset', datasets=[dataset_A_train, dataset_B_train])) + +val_dataloader = dict(dataset=dataset_A_val) +test_dataloader = dict(dataset=dataset_A_test) + +``` + +You can refer base dataset [tutorial](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html) from mmengine for more details + +### Multi-image Mix Dataset + +We use `MultiImageMixDataset` as a wrapper to mix images from multiple datasets. +`MultiImageMixDataset` can be used by multiple images mixed data augmentation +like mosaic and mixup. + +An example of using `MultiImageMixDataset` with `Mosaic` data augmentation: + +```python +train_pipeline = [ + dict(type='RandomMosaic', prob=1), + dict(type='Resize', img_scale=(1024, 512), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackSegInputs') +] + +train_dataset = dict( + type='MultiImageMixDataset', + dataset=dict( + classes=classes, + palette=palette, + type=dataset_type, + reduce_zero_label=False, + img_dir=data_root + "images/train", + ann_dir=data_root + "annotations/train", + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + ] + ), + pipeline=train_pipeline +) + +``` diff --git a/docs/en/advanced_guides/add_models.md b/docs/en/advanced_guides/add_models.md new file mode 100644 index 0000000000..1f1969db39 --- /dev/null +++ b/docs/en/advanced_guides/add_models.md @@ -0,0 +1,260 @@ +# Add New Modules + +## Develop new components + +We can customize all the components introduced at [the model documentation](./models.md), such as **backbone**, **head**, **loss function** and **data preprocessor**. + +### Add new backbones + +Here we show how to develop a new backbone with an example of MobileNet. + +1. Create a new file `mmseg/models/backbones/mobilenet.py`. + + ```python + import torch.nn as nn + + from mmseg.registry import MODELS + + + @MODELS.register_module() + class MobileNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass + + def init_weights(self, pretrained=None): + pass + ``` + +2. Import the module in `mmseg/models/backbones/__init__.py`. + + ```python + from .mobilenet import MobileNet + ``` + +3. Use it in your config file. + + ```python + model = dict( + ... + backbone=dict( + type='MobileNet', + arg1=xxx, + arg2=xxx), + ... + ``` + +### Add new heads + +In MMSegmentation, we provide a [BaseDecodeHead](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/decode_heads/decode_head.py#L17) for developing all segmentation heads. +All newly implemented decode heads should be derived from it. +Here we show how to develop a new head with the example of [PSPNet](https://arxiv.org/abs/1612.01105) as the following. + +First, add a new decode head in `mmseg/models/decode_heads/psp_head.py`. +PSPNet implements a decode head for segmentation decode. +To implement a decode head, we need to implement three functions of the new module as the following. + +```python +from mmseg.registry import MODELS + +@MODELS.register_module() +class PSPHead(BaseDecodeHead): + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(PSPHead, self).__init__(**kwargs) + + def init_weights(self): + pass + + def forward(self, inputs): + pass +``` + +Next, the users need to add the module in the `mmseg/models/decode_heads/__init__.py`, thus the corresponding registry could find and load them. + +To config file of PSPNet is as the following + +```python +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain_model/resnet50_v1c_trick-2cccc1ad.pth', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) + +``` + +### Add new loss + +Assume you want to add a new loss as `MyLoss` for segmentation decode. +To add a new loss function, the users need to implement it in `mmseg/models/losses/my_loss.py`. +The decorator `weighted_loss` enables the loss to be weighted for each element. + +```python +import torch +import torch.nn as nn + +from mmseg.registry import MODELS +from .utils import weighted_loss + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + +@MODELS.register_module() +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss +``` + +Then the users need to add it in the `mmseg/models/losses/__init__.py`. + +```python +from .my_loss import MyLoss, my_loss + +``` + +To use it, modify the `loss_xxx` field. +Then you need to modify the `loss_decode` field in the head. +`loss_weight` could be used to balance multiple losses. + +```python +loss_decode=dict(type='MyLoss', loss_weight=1.0)) +``` + +### Add new data preprocessor + +In MMSegmentation 1.x versions, we use [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/models/data_preprocessor.py#L13) to copy data to the target device and preprocess the data into the model input format as default. Here we show how to develop a new data preprocessor. + +1. Create a new file `mmseg/models/my_datapreprocessor.py`. + + ```python + from mmengine.model import BaseDataPreprocessor + + from mmseg.registry import MODELS + + @MODELS.register_module() + class MyDataPreProcessor(BaseDataPreprocessor): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def forward(self, data: dict, training: bool=False) -> Dict[str, Any]: + # TODO Define the logic for data pre-processing in the forward method + pass + ``` + +2. Import your data preprocessor in `mmseg/models/__init__.py` + + ```python + from .my_datapreprocessor import MyDataPreProcessor + ``` + +3. Use it in your config file. + + ```python + model = dict( + data_preprocessor=dict(type='MyDataPreProcessor) + ... + ) + ``` + +## Develop new segmentors + +The segmentor is an algorithmic architecture in which users can customize their algorithms by adding customized components and defining the logic of algorithm execution. Please refer to [the model document](https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/advanced_guides/models.md) for more details. + +Since the [BaseSegmentor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/segmentors/base.py#L15) in MMSegmentation unifies three modes for a forward process, to develop a new segmentor, users need to overwrite `loss`, `predict` and `_forward` methods corresponding to the `loss`, `predict` and `tensor` modes. + +Here we show how to develop a new segmentor. + +1. Create a new file `mmseg/models/segmentors/my_segmentor.py`. + + ```python + from typing import Dict, Optional, Union + + import torch + + from mmseg.registry import MODELS + from mmseg.models import BaseSegmentor + + @MODELS.register_module() + class MySegmentor(BaseSegmentor): + def __init__(self, **kwargs): + super().__init__(**kwargs) + # TODO users should build components of the network here + + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + pass + + def predict(self, inputs: Tensor, data_samples: OptSampleList=None) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + pass + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: + """Network forward process. + + Usually includes backbone, neck and head forward without any post- + processing. + """ + pass + ``` + +2. Import your segmentor in `mmseg/models/segmentors/__init__.py`. + + ```python + from .my_segmentor import MySegmentor + ``` + +3. Use it in your config file. + + ```python + model = dict( + type='MySegmentor' + ... + ) + ``` diff --git a/docs/en/advanced_guides/add_transform.md b/docs/en/advanced_guides/add_transform.md new file mode 100644 index 0000000000..69de9d317b --- /dev/null +++ b/docs/en/advanced_guides/add_transform.md @@ -0,0 +1,37 @@ +# Adding New Data Transforms + +1. Write a new pipeline in any file, e.g., `my_pipeline.py`. It takes a dict as input and return a dict. + + ```python + from mmseg.datasets import TRANSFORMS + @TRANSFORMS.register_module() + class MyTransform: + def transform(self, results): + results['dummy'] = True + return results + ``` + +2. Import the new class. + + ```python + from .my_pipeline import MyTransform + ``` + +3. Use it in config files. + + ```python + crop_size = (512, 1024) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='MyTransform'), + dict(type='PackSegInputs'), + ] + ``` diff --git a/docs/en/advanced_guides/customize_runtime.md b/docs/en/advanced_guides/customize_runtime.md new file mode 100644 index 0000000000..f138c226fd --- /dev/null +++ b/docs/en/advanced_guides/customize_runtime.md @@ -0,0 +1,245 @@ +# Customize Runtime Settings + +## Customize optimization settings + +### Customize optimizer supported by Pytorch + +We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. +For example, if you want to use `ADAM` (note that the performance could drop a lot), the modification could be as the following. + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +To modify the learning rate of the model, the users only need to modify the `lr` in the config of optimizer. The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +### Customize self-implemented optimizer + +#### 1. Define a new optimizer + +A customized optimizer could be defined as following. + +Assume you want to add a optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. +You need to create a new directory named `mmseg/core/optimizer`. +And then implement the new optimizer in a file, e.g., in `mmseg/core/optimizer/my_optimizer.py`: + +```python +from .registry import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c) + +``` + +#### 2. Add the optimizer to registry + +To find the above module defined above, this module should be imported into the main namespace at first. There are two options to achieve it. + +- Modify `mmseg/core/optimizer/__init__.py` to import it. + + The newly defined module should be imported in `mmseg/core/optimizer/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_optimizer import MyOptimizer +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmseg.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +The module `mmseg.core.optimizer.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. +Note that only the package containing the class `MyOptimizer` should be imported. +`mmseg.core.optimizer.my_optimizer.MyOptimizer` **cannot** be imported directly. + +Actually users can use a totally different file directory structure using this importing method, as long as the module root can be located in `PYTHONPATH`. + +#### 3. Specify the optimizer in the config file + +Then you can use `MyOptimizer` in `optimizer` field of config files. +In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +To use your own optimizer, the field can be changed to + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### Customize optimizer constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. +The users can do those fine-grained parameter tuning through customizing optimizer constructor. + +```python +from mmcv.utils import build_from_cfg + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS +from mmseg.utils import get_root_logger +from .my_optimizer import MyOptimizer + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor(object): + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + + def __call__(self, model): + + return my_optimizer + +``` + +The default optimizer constructor is implemented [here](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11), which could also serve as a template for new optimizer constructor. + +### Additional settings + +Tricks not implemented by the optimizer should be implemented through optimizer constructor (e.g., set parameter-wise learning rates) or hooks. We list some common settings that could stabilize the training or accelerate the training. Feel free to create PR, issue for more settings. + +- __Use gradient clip to stabilize training__: + Some models need gradient clip to clip the gradients to stabilize the training process. An example is as below: + + ```python + optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) + ``` + + If your config inherits the base config which already sets the `optimizer_config`, you might need `_delete_=True` to override the unnecessary settings. See the [config documentation](https://mmsegmentation.readthedocs.io/en/latest/config.html) for more details. + +- __Use momentum schedule to accelerate model convergence__: + We support momentum scheduler to modify model's momentum according to learning rate, which could make the model converge in a faster way. + Momentum scheduler is usually used with LR scheduler, for example, the following config is used in 3D detection to accelerate convergence. + For more details, please refer to the implementation of [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) and [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130). + + ```python + lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + ) + momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, + ) + ``` + +## Customize training schedules + +By default we use step learning rate with 40k/80k schedule, this calls [`PolyLrUpdaterHook`](https://github.com/open-mmlab/mmcv/blob/826d3a7b68596c824fa1e2cb89b6ac274f52179c/mmcv/runner/hooks/lr_updater.py#L196) in MMCV. +We support many other learning rate schedule [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), such as `CosineAnnealing` and `Poly` schedule. Here are some examples + +- Step schedule: + + ```python + lr_config = dict(policy='step', step=[9, 10]) + ``` + +- ConsineAnnealing schedule: + + ```python + lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + ``` + +## Customize workflow + +Workflow is a list of (phase, epochs) to specify the running order and epochs. +By default it is set to be + +```python +workflow = [('train', 1)] +``` + +which means running 1 epoch for training. +Sometimes user may want to check some metrics (e.g. loss, accuracy) about the model on the validate set. +In such case, we can set the workflow as + +```python +[('train', 1), ('val', 1)] +``` + +so that 1 epoch for training and 1 epoch for validation will be run iteratively. + +:::{note} + +1. The parameters of model will not be updated during val epoch. +2. Keyword `total_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. +3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EvalHook` because `EvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. Therefore, the only difference between `[('train', 1), ('val', 1)]` and `[('train', 1)]` is that the runner will calculate losses on validation set after each training epoch. + +::: + +## Customize hooks + +### Use hooks implemented in MMCV + +If the hook is already implemented in MMCV, you can directly modify the config to use the hook as below + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +### Modify default runtime hooks + +There are some common hooks that are not registered through `custom_hooks`, they are + +- log_config +- checkpoint_config +- evaluation +- lr_config +- optimizer_config +- momentum_config + +In those hooks, only the logger hook has the `VERY_LOW` priority, others' priority are `NORMAL`. +The above-mentioned tutorials already covers how to modify `optimizer_config`, `momentum_config`, and `lr_config`. +Here we reveals how what we can do with `log_config`, `checkpoint_config`, and `evaluation`. + +#### Checkpoint config + +The MMCV runner will use `checkpoint_config` to initialize [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). + +```python +checkpoint_config = dict(interval=1) +``` + +The users could set `max_keep_ckpts` to only save only small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. More details of the arguments are [here](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) + +#### Log config + +The `log_config` wraps multiple logger hooks and enables to set intervals. Now MMCV supports `WandbLoggerHook`, `MlflowLoggerHook`, and `TensorboardLoggerHook`. +The detail usages can be found in the [doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook). + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### Evaluation config + +The config of `evaluation` will be used to initialize the [`EvalHook`](https://github.com/open-mmlab/mmsegmentation/blob/e3f6f655d69b777341aec2fe8829871cc0beadcb/mmseg/core/evaluation/eval_hooks.py#L7). +Except the key `interval`, other arguments such as `metric` will be passed to the `dataset.evaluate()` + +```python +evaluation = dict(interval=1, metric='mIoU') +``` diff --git a/docs/en/advanced_guides/data_flow.md b/docs/en/advanced_guides/data_flow.md new file mode 100644 index 0000000000..59e7ca3294 --- /dev/null +++ b/docs/en/advanced_guides/data_flow.md @@ -0,0 +1 @@ +# Data Flow diff --git a/docs/en/advanced_guides/datasets.md b/docs/en/advanced_guides/datasets.md new file mode 100644 index 0000000000..157ea3aad8 --- /dev/null +++ b/docs/en/advanced_guides/datasets.md @@ -0,0 +1 @@ +# Datasets diff --git a/docs/en/advanced_guides/engine.md b/docs/en/advanced_guides/engine.md new file mode 100644 index 0000000000..eaa55b0c8c --- /dev/null +++ b/docs/en/advanced_guides/engine.md @@ -0,0 +1 @@ +# Engine diff --git a/docs/en/advanced_guides/evaluation.md b/docs/en/advanced_guides/evaluation.md new file mode 100644 index 0000000000..55728281a9 --- /dev/null +++ b/docs/en/advanced_guides/evaluation.md @@ -0,0 +1,158 @@ +# Evaluation + +The evaluation procedure would be executed at [ValLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L300) and [TestLoop](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L373), users can evaluate model performance during training or using the test script with simple settings in the configuration file. The `ValLoop` and `TestLoop` are properties of [Runner](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py#L59), they will be built the first time they are called. To build the `ValLoop` successfully, the `val_dataloader` and `val_evaluator` must be set when building `Runner` since `dataloder` and `evaluator` are required parameters, and the same goes for `TestLoop`. For more information about the Runner's design, please refer to the [documentoation](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md) of [MMEngine](https://github.com/open-mmlab/mmengine). + +
+ +
test_step/val_step dataflow
+
+ +In MMSegmentation, we write the settings of dataloader and metrics in the config files of datasets and the configuration of the evaluation loop in the `schedule_x` config files by default. + +For example, in the ADE20K config file `configs/_base_/dataset/ade20k.py`, on lines 37 to 48, we configured the `val_dataloader`, on line 51, we select `IoUMetric` as the evaluator and set `mIoU` as the metric: + +```python +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +``` + +To be able to evaluate the model during training, for example, we add the evaluation configuration to the file `configs/schedules/schedule_40k.py` on lines 15 to 16: + +```python +train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) +val_cfg = dict(type='ValLoop') +``` + +With the above two settings, MMSegmentation evaluates the **mIoU** metric of the model once every 4000 iterations during the training of 40K iterations. + +If we would like to test the model after training, we need to add the `test_dataloader`, `test_evaluator` and `test_cfg` configs to the config file. + +```python +test_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='images/validation', + seg_map_path='annotations/validation'), + pipeline=test_pipeline)) + +test_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_cfg = dict(type='TestLoop') +``` + +In MMSegmentation, the settings of `test_dataloader` and `test_evaluator` are the same as the `ValLoop`'s dataloader and evaluator by default, we can modify these settings to meet our needs. + +## IoUMetric + +MMSegmentation implements [IoUMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/iou_metric.py) and [CitysMetric](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/evaluation/metrics/citys_metric.py) for evaluating the performance of models, based on the [BaseMetric](https://github.com/open-mmlab/mmengine/blob/main/mmengine/evaluator/metric.py) provided by [MMEngine](https://github.com/open-mmlab/mmengine). Please refer to [the documentation](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) for more details about the unified evaluation interface. + +Here we briefly describe the arguments and the two main methods of `IoUMetric`. + +The constructor of `IoUMetric` has some additional parameters besides the base `collect_device` and `prefix`. + +The arguments of the constructor: + +- ignore_index (int) - Index that will be ignored in evaluation. Default: 255. +- iou_metrics (list\[str\] | str) - Metrics to be calculated, the options includes 'mIoU', 'mDice' and 'mFscore'. +- nan_to_num (int, optional) - If specified, NaN values will be replaced by the numbers defined by the user. Default: None. +- beta (int) - Determines the weight of recall in the combined score. Default: 1. +- collect_device (str) - Device name used for collecting results from different ranks during distributed training. Must be 'cpu' or 'gpu'. Defaults to 'cpu'. +- prefix (str, optional) - The prefix that will be added in the metric names to disambiguate homonymous metrics of different evaluators. If the prefix is not provided in the argument, self.default_prefix will be used instead. Defaults to None. + +`IoUMetric` implements the IoU metric calculation, the core two methods of `IoUMetric` are `process` and `compute_metrics`. + +- `process` method processes one batch of data and data_samples. +- `compute_metrics` method computes the metrics from processed results. + +#### IoUMetric.process + +Parameters: + +- data_batch (Any) - A batch of data from the dataloader. +- data_samples (Sequence\[dict\]) - A batch of outputs from the model. + +Returns: + +This method doesn't have returns since the processed results would be stored in `self.results`, which will be used to compute the metrics when all batches have been processed. + +#### IoUMetric.compute_metrics + +Parameters: + +- results (list) - The processed results of each batch. + +Returns: + +- Dict\[str, float\] - The computed metrics. The keys are the names of the metrics, and the values are corresponding results. The key mainly includes **aAcc**, **mIoU**, **mAcc**, **mDice**, **mFscore**, **mPrecision**, **mRecall**. + +## CitysMetric + +`CitysMetric` uses the official [CityscapesScripts](https://github.com/mcordts/cityscapesScripts) provided by Cityscapes to evaluate model performance. + +### Usage + +Before using it, please install the `cityscapesscripts` package first: + +```shell +pip install cityscapesscripts +``` + +Since the `IoUMetric` is used as the default evaluator in MMSegmentation, if you would like to use `CitysMetric`, customizing the config file is required. In your customized config file, you should overwrite the default evaluator as follows. + +```python +val_evaluator = dict(type='CitysMetric', citys_metrics=['cityscapes']) +test_evaluator = val_evaluator +``` + +### Interface + +The arguments of the constructor: + +- ignore_index (int) - Index that will be ignored in evaluation. Default: 255. +- citys_metrics (list\[str\] | str) - Metrics to be evaluated, Default: \['cityscapes'\]. +- to_label_id (bool) - whether convert output to label_id for submission. Default: True. +- suffix (str): The filename prefix of the png files. If the prefix is "somepath/xxx", the png files will be named "somepath/xxx.png". Default: '.format_cityscapes'. +- collect_device (str): Device name used for collecting results from different ranks during distributed training. Must be 'cpu' or 'gpu'. Defaults to 'cpu'. +- prefix (str, optional): The prefix that will be added in the metric names to disambiguate homonymous metrics of different evaluators. If the prefix is not provided in the argument, self.default_prefix will be used instead. Defaults to None. + +#### CitysMetric.process + +This method would draw the masks on images and save the painted images to `work_dir`. + +Parameters: + +- data_batch (Any) - A batch of data from the dataloader. +- data_samples (Sequence\[dict\]) - A batch of outputs from the model. + +Returns: + +This method doesn't have returns, the annotations' path would be stored in `self.results`, which will be used to compute the metrics when all batches have been processed. + +#### CitysMetric.compute_metrics + +This method would call `cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling` tool to calculate metrics. + +Parameters: + +- results (list) - Testing results of the dataset. + +Returns: + +- dict\[str: float\] - Cityscapes evaluation results. diff --git a/docs/en/advanced_guides/index.rst b/docs/en/advanced_guides/index.rst new file mode 100644 index 0000000000..1cae420c1c --- /dev/null +++ b/docs/en/advanced_guides/index.rst @@ -0,0 +1,26 @@ +Basic Concepts +*************** + +.. toctree:: + :maxdepth: 1 + + data_flow.md + structures.md + models.md + datasets.md + transforms.md + evaluation.md + engine.md + training_tricks.md + +Component Customization +************************ + +.. toctree:: + :maxdepth: 1 + + add_modules.md + add_datasets.md + add_transforms.md + add_metrics.md + customize_runtime.md diff --git a/docs/en/advanced_guides/models.md b/docs/en/advanced_guides/models.md new file mode 100644 index 0000000000..8202e95b7c --- /dev/null +++ b/docs/en/advanced_guides/models.md @@ -0,0 +1,179 @@ +# Models + +# Models + +We usually define a neural network in a deep learning task as a model, and this model is the core of an algorithm. [MMEngine](https://github.com/open-mmlab/mmengine) abstracts a unified model [BaseModel](https://github.com/open-mmlab/mmengine/blob/main/mmengine/model/base_model/base_model.py#L16) to standardize the interfaces for training, testing and other processes. All models implemented by MMSegmentation inherit from `BaseModel`, and in MMSegmentation we implemented forward and added some functions for the semantic segmentation algorithm. + +## Common components + +### Segmentor + +In MMSegmentation, we abstract the network architecture as a **Segmentor**, it is a model that contains all components of a network. We have already implemented **EncoderDecoder** and **CascadeEncoderDecoder**, which typically consist of **Data preprocessor**, **Backbone**, **Decode head** and **Auxiliary head**. + +### Data preprocessor + +**Data preprocessor** is the part that copies data to the target device and preprocesses the data into the model input format. + +### Backbone + +**Backbone** is the part that transforms an image to feature maps, such as a **ResNet-50** without the last fully connected layer. + +### Neck + +**Neck** is the part that connects the backbone and heads. It performs some refinements or reconfigurations on the raw feature maps produced by the backbone. An example is **Feature Pyramid Network (FPN)**. + +### Decode Head + +**Decode Head** is the part that transforms the feature maps into a segmentation mask, such as **PSPNet**. + +### Auxiliary head + +**Auxiliary head** is an optional component that transforms the feature maps into segmentation masks which only used for computing auxiliary losses. + +## Basic interfaces + +MMSegmentation wraps `BaseModel` and implements the [BaseSegmentor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/segmentors/base.py#L15) class, which mainly provides the interfaces `forward`, `train_step`, `val_step` and `test_step`. The following will introduce these interfaces in detail. + +### forward + +
+ +
EncoderDecoder dataflow
+
+ +
+
+
CascadeEncoderDecoder dataflow
+
+ +The `forward` method returns losses or predictions of training, validation, testing, and a simple inference process. + +The method should accept three modes: "tensor", "predict" and "loss": + +- "tensor": Forward the whole network and return the tensor or tuple of tensor without any post-processing, same as a common `nn.Module`. +- "predict": Forward and return the predictions, which are fully processed to a list of `SegDataSample`. +- "loss": Forward and return a `dict` of losses according to the given inputs and data samples. + +**Note:** [SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py) is a data structure interface of MMSegmentation, it is used as an interface between different components. `SegDataSample` implements the abstract data element `mmengine.structures.BaseDataElement`, please refer to [the SegDataSample documentation](https://mmsegmentation.readthedocs.io/en/1.x/advanced_guides/structures.html) and [data element documentation](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/data_element.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information. + +Note that this method doesn't handle either backpropagation or optimizer updating, which are done in the method `train_step`. + +Parameters: + +- inputs (torch.Tensor) - The input tensor with shape (N, C, ...) in general. +- data_sample (list\[[SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py)\]) - The seg data samples. It usually includes information such as `metainfo` and `gt_sem_seg`. Default to None. +- mode (str) - Return what kind of value. Defaults to 'tensor'. + +Returns: + +- `dict` or `list`: + - If `mode == "loss"`, return a `dict` of loss tensor used for backward and logging. + - If `mode == "predict"`, return a `list` of `SegDataSample`, the inference results will be incrementally added to the `data_sample` parameter passed to the forward method, each `SegDataSample` contains the following keys: + - pred_sem_seg (`PixelData`): Prediction of semantic segmentation. + - seg_logits (`PixelData`): Predicted logits of semantic segmentation before normalization. + - If `mode == "tensor"`, return a `tensor` or `tuple of tensor` or `dict` of `tensor` for custom use. + +### prediction modes + +We briefly describe the fields of the model's configuration in [the config documentation](../user_guides/1_config.md), here we elaborate on the `model.test_cfg` field. `model.test_cfg` is used to control forward behavior, the `forward` method in `"predict"` mode can run in two modes: + +- `whole_inference`: If `cfg.model.test_cfg.mode == 'whole'`, model will inference with full images. + + An `whole_inference` mode example config: + + ```python + model = dict( + type='EncoderDecoder' + ... + test_cfg=dict(mode='whole') + ) + ``` + +- `slide_inference`: If `cfg.model.test_cfg.mode == 'slide'`, model will inference by sliding-window. **Note:** if you select the `slide` mode, `cfg.model.test_cfg.stride` and `cfg.model.test_cfg.crop_size` should also be specified. + + An `slide_inference` mode example config: + + ```python + model = dict( + type='EncoderDecoder' + ... + test_cfg=dict(mode='slide', crop_size=256, stride=170) + ) + ``` + +### train_step + +The `train_step` method calls the forward interface of the `loss` mode to get the loss `dict`. The `BaseModel` class implements the default model training process including preprocessing, model forward propagation, loss calculation, optimization, and back-propagation. + +Parameters: + +- data (dict or tuple or list) - Data sampled from the dataset. In MMSegmentation, the data dict contains `inputs` and `data_samples` two fields. +- optim_wrapper (OptimWrapper) - OptimWrapper instance used to update model parameters. + +**Note:** [OptimWrapper](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/optimizer_wrapper.py#L17) provides a common interface for updating parameters, please refer to optimizer wrapper [documentation](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information. + +Returns: + +- Dict\[str, `torch.Tensor`\]: A `dict` of tensor for logging. + +
+ +
train_step dataflow
+
+ +### val_step + +The `val_step` method calls the forward interface of the `predict` mode and returns the prediction result, which is further passed to the process interface of the evaluator and the `after_val_iter` interface of the Hook. + +Parameters: + +- data (`dict` or `tuple` or `list`) - Data sampled from the dataset. In MMSegmentation, the data dict contains `inputs` and `data_samples` two fields. + +Returns: + +- `list` - The predictions of given data. + +
+ +
test_step/val_step dataflow
+
+ +### test_step + +The `BaseModel` implements `test_step` the same as `val_step`. + +## Data Preprocessor + +The [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/data_preprocessor.py#L13) implemented by MMSegmentation inherits from the [BaseDataPreprocessor](https://github.com/open-mmlab/mmengine/blob/main/mmengine/model/base_model/data_preprocessor.py#L18) implemented by [MMEngine](https://github.com/open-mmlab/mmengine) and provides the functions of data preprocessing and copying data to the target device. + +The runner carries the model to the specified device during the construction stage, while the data is carried to the specified device by the [SegDataPreProcessor](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/models/data_preprocessor.py#L13) in `train_step`, `val_step`, and `test_step`, and the processed data is further passed to the model. + +The parameters of the `SegDataPreProcessor` constructor: + +- mean (Sequence\[Number\], optional) - The pixel mean of R, G, B channels. Defaults to None. +- std (Sequence\[Number\], optional) - The pixel standard deviation of R, G, B channels. Defaults to None. +- size (tuple, optional) - Fixed padding size. +- size_divisor (int, optional) - The divisor of padded size. +- pad_val (float, optional) - Padding value. Default: 0. +- seg_pad_val (float, optional) - Padding value of segmentation map. Default: 255. +- bgr_to_rgb (bool) - whether to convert image from BGR to RGB. Defaults to False. +- rgb_to_bgr (bool) - whether to convert image from RGB to RGB. Defaults to False. +- batch_augments (list\[dict\], optional) - Batch-level augmentations. Default to None. + +The data will be processed as follows: + +- Collate and move data to the target device. +- Pad inputs to the input size with defined `pad_val`, and pad seg map with defined `seg_pad_val`. +- Stack inputs to batch_inputs. +- Convert inputs from bgr to rgb if the shape of input is (3, H, W). +- Normalize image with defined std and mean. +- Do batch augmentations like Mixup and Cutmix during training. + +The parameters of the `forward` method: + +- data (dict) - data sampled from dataloader. +- training (bool) - Whether to enable training time augmentation. + +The returns of the `forward` method: + +- Dict: Data in the same format as the model input. diff --git a/docs/en/advanced_guides/structures.md b/docs/en/advanced_guides/structures.md new file mode 100644 index 0000000000..2607242e23 --- /dev/null +++ b/docs/en/advanced_guides/structures.md @@ -0,0 +1,104 @@ +# Structures + +To unify input and output interfaces between different models and modules, OpenMMLab 2.0 MMEngine defines an abstract data structure, +it has implemented basic functions of `Create`, `Read`, `Update`, `Delete`, supported data transferring among different types of devices +and tensor-like or dictionary-like operations such as `.cpu()`, `.cuda()`, `.get()` and `.detach()`. +More details can be found [here](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/data_element.md). + +MMSegmentation also follows this interface protocol and defines `SegDataSample` which is used to encapsulate the data of semantic segmentation task. + +## Semantic Segmentation Data SegDataSample + +[SegDataSample](mmseg.structures.SegDataSample) includes three main fields `gt_sem_seg`, `pred_sem_seg` and `seg_logits`, which are used to store the annotation information and prediction results respectively. + +| Field | Type | Description | +| -------------- | ------------------------- | ------------------------------------------ | +| gt_sem_seg | [`PixelData`](#pixeldata) | Annotation information. | +| pred_instances | [`PixelData`](#pixeldata) | The predicted result. | +| seg_logits | [`PixelData`](#pixeldata) | The raw (non-normalized) predicted result. | + +The following sample code demonstrates the use of `SegDataSample`. + +```python +import torch +from mmengine.structures import PixelData +from mmseg.structures import SegDataSample + +img_meta = dict(img_shape=(4, 4, 3), + pad_shape=(4, 4, 3)) +data_sample = SegDataSample() +# defining gt_segmentations for encapsulate the ground truth data +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) + +# add and process property in SegDataSample +data_sample.gt_sem_seg = gt_segmentations +assert 'gt_sem_seg' in data_sample +assert 'sem_seg' in data_sample.gt_sem_seg +assert 'img_shape' in data_sample.gt_sem_seg.metainfo_keys() +print(data_sample.gt_sem_seg.shape) +''' +(4, 4) +''' +print(data_sample) +''' + +) at 0x1c2aae44d60> +''' + +# delete and change property in SegDataSample +data_sample = SegDataSample() +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) +data_sample.gt_sem_seg = gt_segmentations +data_sample.gt_sem_seg.set_metainfo(dict(img_shape=(4,4,9), pad_shape=(4,4,9))) +del data_sample.gt_sem_seg.img_shape + +# Tensor-like operations +data_sample = SegDataSample() +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) +cuda_gt_segmentations = gt_segmentations.cuda() +cuda_gt_segmentations = gt_segmentations.to('cuda:0') +cpu_gt_segmentations = cuda_gt_segmentations.cpu() +cpu_gt_segmentations = cuda_gt_segmentations.to('cpu') +``` + +## Customize New Property in SegDataSample + +If you want to customize new property in `SegDataSample`, you may follow [SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py) below: + +```python +class SegDataSample(BaseDataElement): + ... + + @property + def xxx_property(self) -> xxxData: + return self._xxx_property + + @xxx_property.setter + def xxx_property(self, value: xxxData) -> None: + self.set_field(value, '_xxx_property', dtype=xxxData) + + @xxx_property.deleter + def xxx_property(self) -> None: + del self._xxx_property +``` + +Then a new property would be added to `SegDataSample`. diff --git a/docs/en/advanced_guides/training_tricks.md b/docs/en/advanced_guides/training_tricks.md new file mode 100644 index 0000000000..6c43230c71 --- /dev/null +++ b/docs/en/advanced_guides/training_tricks.md @@ -0,0 +1,90 @@ +# Training Tricks + +MMSegmentation support following training tricks out of box. + +## Different Learning Rate(LR) for Backbone and Heads + +In semantic segmentation, some methods make the LR of heads larger than backbone to achieve better performance or faster convergence. + +In MMSegmentation, you may add following lines to config to make the LR of heads 10 times of backbone. + +```python +optimizer=dict( + paramwise_cfg = dict( + custom_keys={ + 'head': dict(lr_mult=10.)})) +``` + +With this modification, the LR of any parameter group with `'head'` in name will be multiplied by 10. +You may refer to [MMCV doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.DefaultOptimizerConstructor) for further details. + +## Online Hard Example Mining (OHEM) + +We implement pixel sampler [here](https://github.com/open-mmlab/mmsegmentation/tree/master/mmseg/core/seg/sampler) for training sampling. +Here is an example config of training PSPNet with OHEM enabled. + +```python +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model=dict( + decode_head=dict( + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=100000)) ) +``` + +In this way, only pixels with confidence score under 0.7 are used to train. And we keep at least 100000 pixels during training. If `thresh` is not specified, pixels of top `min_kept` loss will be selected. + +## Class Balanced Loss + +For dataset that is not balanced in classes distribution, you may change the loss weight of each class. +Here is an example for cityscapes dataset. + +```python +_base_ = './pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +model=dict( + decode_head=dict( + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, + # DeepLab used this class weight for cityscapes + class_weight=[0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754, + 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, + 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]))) +``` + +`class_weight` will be passed into `CrossEntropyLoss` as `weight` argument. Please refer to [PyTorch Doc](https://pytorch.org/docs/stable/nn.html?highlight=crossentropy#torch.nn.CrossEntropyLoss) for details. + +## Multiple Losses + +For loss calculation, we support multiple losses training concurrently. Here is an example config of training `unet` on `DRIVE` dataset, whose loss function is `1:3` weighted sum of `CrossEntropyLoss` and `DiceLoss`: + +```python +_base_ = './fcn_unet_s5-d16_64x64_40k_drive.py' +model = dict( + decode_head=dict(loss_decode=[dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]), + auxiliary_head=dict(loss_decode=[dict(type='CrossEntropyLoss', loss_name='loss_ce',loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]), + ) +``` + +In this way, `loss_weight` and `loss_name` will be weight and name in training log of corresponding loss, respectively. + +Note: If you want this loss item to be included into the backward graph, `loss_` must be the prefix of the name. + +## Ignore specified label index in loss calculation + +In default setting, `avg_non_ignore=False` which means each pixel counts for loss calculation although some of them belong to ignore-index labels. + +For loss calculation, we support ignore index of certain label by `avg_non_ignore` and `ignore_index`. In this way, the average loss would only be calculated in non-ignored labels which may achieve better performance, and here is the [reference](https://github.com/open-mmlab/mmsegmentation/pull/1409). Here is an example config of training `unet` on `Cityscapes` dataset: in loss calculation it would ignore label 0 which is background and loss average is only calculated on non-ignore labels: + +```python +_base_ = './unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py' +model = dict( + decode_head=dict( + ignore_index=0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, avg_non_ignore=True), + auxiliary_head=dict( + ignore_index=0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, avg_non_ignore=True)), + )) +``` diff --git a/docs/en/advanced_guides/transforms.md b/docs/en/advanced_guides/transforms.md new file mode 100644 index 0000000000..d42d61a9ef --- /dev/null +++ b/docs/en/advanced_guides/transforms.md @@ -0,0 +1,172 @@ +# Data Transforms + +In this tutorial, we introduce the design of transforms pipeline in MMSegmentation. + +The structure of this guide is as follows: + +- [Data Transforms](#data-transforms) + - [Design of Data pipelines](#design-of-data-pipelines) + - [Customization data transformation](#customization-data-transformation) + +## Design of Data pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in semantic segmentation may not be the same size, +we introduce a new `DataContainer` type in MMCV to help collect and distribute +data of different size. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +In 1.x version of MMSegmentation, all data transformations are inherited from [`BaseTransform`](https://github.com/open-mmlab/mmcv/blob/2.x/mmcv/transforms/base.py#L6). +The input and output types of transformations are both dict. A simple example is as follows: + +```python +>>> from mmseg.datasets.transforms import LoadAnnotations +>>> transforms = LoadAnnotations() +>>> img_path = './data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png.png' +>>> gt_path = './data/cityscapes/gtFine/train/aachen/aachen_000015_000019_gtFine_instanceTrainIds.png' +>>> results = dict( +>>> img_path=img_path, +>>> seg_map_path=gt_path, +>>> reduce_zero_label=False, +>>> seg_fields=[]) +>>> data_dict = transforms(results) +>>> print(data_dict.keys()) +dict_keys(['img_path', 'seg_map_path', 'reduce_zero_label', 'seg_fields', 'gt_seg_map']) +``` + +The data preparation pipeline and the dataset are decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also outputs a dict for the next transform. + +The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. + +Here is a pipeline example for PSPNet. + +```python +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +``` + +For each operation, we list the related dict fields that are `added`/`updated`/`removed`. +Before pipelines, the information we can directly obtain from the datasets are `img_path` and `seg_map_path`. + +### Data loading + +`LoadImageFromFile`: Load an image from file. + +- add: `img`, `img_shape`, `ori_shape` + +`LoadAnnotations`: Load semantic segmentation maps provided by dataset. + +- add: `seg_fields`, `gt_seg_map` + +### Pre-processing + +`RandomResize`: Random resize image & segmentation map. + +- add: `scale`, `scale_factor`, `keep_ratio` +- update: `img`, `img_shape`, `gt_seg_map` + +`Resize`: Resize image & segmentation map. + +- add: `scale`, `scale_factor`, `keep_ratio` +- update: `img`, `gt_seg_map`, `img_shape` + +`RandomCrop`: Random crop image & segmentation map. + +- update: `img`, `gt_seg_map`, `img_shape`. + +`RandomFlip`: Flip the image & segmentation map. + +- add: `flip`, `flip_direction` +- update: `img`, `gt_seg_map` + +`PhotoMetricDistortion`: Apply photometric distortion to image sequentially, +every transformation is applied with a probability of 0.5. +The position of random contrast is in second or second to last(mode 0 or 1 below, respectively). + +``` +1. random brightness +2. random contrast (mode 0) +3. convert color from BGR to HSV +4. random saturation +5. random hue +6. convert color from HSV to BGR +7. random contrast (mode 1) +``` + +- update: `img` + +### Formatting + +`PackSegInputs`: Pack the inputs data for the semantic segmentation. + +- add: `inputs`, `data_sample` +- remove: keys specified by `meta_keys` (merged into the metainfo of data_sample), all other keys + +## Customization data transformation + +The customized data transformation must inherited from `BaseTransform` and implement `transform` function. +Here we use a simple flipping transformation as example: + +```python +import random +import mmcv +from mmcv.transforms import BaseTransform, TRANSFORMS + +@TRANSFORMS.register_module() +class MyFlip(BaseTransform): + def __init__(self, direction: str): + super().__init__() + self.direction = direction + + def transform(self, results: dict) -> dict: + img = results['img'] + results['img'] = mmcv.imflip(img, direction=self.direction) + return results +``` + +Thus, we can instantiate a `MyFlip` object and use it to process the data dict. + +```python +import numpy as np + +transform = MyFlip(direction='horizontal') +data_dict = {'img': np.random.rand(224, 224, 3)} +data_dict = transform(data_dict) +processed_img = data_dict['img'] +``` + +Or, we can use `MyFlip` transformation in data pipeline in our config file. + +```python +pipeline = [ + ... + dict(type='MyFlip', direction='horizontal'), + ... +] +``` + +Note that if you want to use `MyFlip` in config, you must ensure the file containing `MyFlip` is imported during runtime. diff --git a/docs/en/api.rst b/docs/en/api.rst new file mode 100644 index 0000000000..12ec13b2bd --- /dev/null +++ b/docs/en/api.rst @@ -0,0 +1,104 @@ +mmseg.apis +-------------- +.. automodule:: mmseg.apis + :members: + +mmseg.datasets +-------------- + +datasets +^^^^^^^^^^ +.. automodule:: mmseg.datasets + :members: + +transforms +^^^^^^^^^^ +.. automodule:: mmseg.datasets.transforms + :members: + +mmseg.engine +-------------- + +hooks +^^^^^^^^^^ +.. automodule:: mmseg.engine.hooks + :members: + +optimizers +^^^^^^^^^^ +.. automodule:: mmseg.engine.optimizers + :members: + +mmseg.evaluation +----------------- + +metrics +^^^^^^^^^^ +.. automodule:: mmseg.evaluation.metrics + :members: + +mmseg.models +-------------- + +models +^^^^^^^^^^ +.. automodule:: mmseg.models + :members: + +segmentors +^^^^^^^^^^ +.. automodule:: mmseg.models.segmentors + :members: + +backbones +^^^^^^^^^^ +.. automodule:: mmseg.models.backbones + :members: + +decode_heads +^^^^^^^^^^^^ +.. automodule:: mmseg.models.decode_heads + :members: + +losses +^^^^^^^^^^ +.. automodule:: mmseg.models.losses + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmseg.models.utils + :members: + +necks +^^^^^^^^^^ +.. automodule:: mmseg.models.necks + :members: + +mmseg.registry +-------------- +.. automodule:: mmseg.registry + :members: + +mmseg.structures +----------------- + +structures +^^^^^^^^^^ +.. automodule:: mmseg.structures + :members: + +sampler +^^^^^^^^^^ +.. automodule:: mmseg.structures.sampler + :members: + +mmseg.utils +-------------- +.. automodule:: mmseg.utils + :members: + +mmseg.visualization +---------------------- +.. automodule:: mmseg.visualization + :members: diff --git a/docs/en/conf.py b/docs/en/conf.py new file mode 100644 index 0000000000..e20aab14b1 --- /dev/null +++ b/docs/en/conf.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMSegmentation' +copyright = '2020-2021, OpenMMLab' +author = 'MMSegmentation Authors' +version_file = '../../mmseg/version.py' + + +def get_version(): + with open(version_file) as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' +] + +autodoc_mock_imports = [ + 'matplotlib', 'pycocotools', 'mmseg.version', 'mmcv.ops' +] + +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + 'logo_url': + 'https://mmsegmentation.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': + 'Tutorial', + 'url': + 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + 'demo/MMSegmentation_Tutorial.ipynb' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmsegmentation' + }, + { + 'name': + 'Upstream', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': 'Foundational library for computer vision' + }, + ] + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'en' +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] + +language = 'en' + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/en/get_started.md b/docs/en/get_started.md new file mode 100644 index 0000000000..313501e0d3 --- /dev/null +++ b/docs/en/get_started.md @@ -0,0 +1,203 @@ +# Get started: Install and Run MMSeg + +## Prerequisites + +In this section we demonstrate how to prepare an environment with PyTorch. + +MMSegmentation works on Linux, Windows and macOS. It requires Python 3.6+, CUDA 9.2+ and PyTorch 1.5+. + +**Note:** +If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](##installation). Otherwise, you can follow these steps for the preparation. + +**Step 0.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +**Step 1.** Create a conda environment and activate it. + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 2.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. + +On GPU platforms: + +```shell +conda install pytorch torchvision -c pytorch +``` + +On CPU platforms: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +## Installation + +We recommend that users follow our best practices to install MMSegmentation. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. + +### Best Practices + +**Step 0.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +pip install -U openmim +mim install mmengine +mim install "mmcv>=2.0.0rc1" +``` + +**Step 1.** Install MMSegmentation. + +Case a: If you develop and run mmseg directly, install it from source: + +```shell +git clone -b dev-1.x https://github.com/open-mmlab/mmsegmentation.git +cd mmsegmentation +pip install -v -e . +# '-v' means verbose, or more output +# '-e' means installing a project in editable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +Case b: If you use mmsegmentation as a dependency or third-party package, install it with pip: + +```shell +pip install "mmsegmentation>=1.0.0rc0" +``` + +### Verify the installation + +To verify whether MMSegmentation is installed correctly, we provide some sample codes to run an inference demo. + +**Step 1.** We need to download config and checkpoint files. + +```shell +mim download mmsegmentation --config pspnet_r50-d8_4xb2-40k_cityscapes-512x1024 --dest . +``` + +The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py` and `pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth` in your current folder. + +**Step 2.** Verify the inference demo. + +Option (a). If you install mmsegmentation from source, just run the following command. + +```shell +python demo/image_demo.py demo/demo.png configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth --device cuda:0 --out-file result.jpg +``` + +You will see a new image `result.jpg` on your current folder, where segmentation masks are covered on all objects. + +Option (b). If you install mmsegmentation with pip, open you python interpreter and copy&paste the following codes. + +```python +from mmseg.apis import inference_model, init_model, show_result_pyplot +from mmseg.utils import register_all_modules +import mmcv + +register_all_modules() +config_file = 'pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +checkpoint_file = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' + +# build the model from a config file and a checkpoint file +model = init_model(config_file, checkpoint_file, device='cuda:0') + +# test a single image and show the results +img = 'demo/demo.png' # or img = mmcv.imread(img), which will only load it once +result = inference_model(model, img) +# visualize the results in a new window +show_result_pyplot(model, img, result, show=True) +# or save the visualization results to image files +# you can change the opacity of the painted segmentation map in (0, 1]. +show_result_pyplot(model, img, result, show=True, out_file='result.jpg', opacity=0.5) +# test a video and show the results +video = mmcv.VideoReader('video.mp4') +for frame in video: + result = inference_segmentor(model, frame) + show_result_pyplot(model, result, wait_time=1) +``` + +You can modify the code above to test a single image or a video, both of these options can verify that the installation was successful. + +### Customize Installation + +#### CUDA versions + +When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +**Note:** +Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. + +#### Install MMCV without MIM + +MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. + +To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. + +For example, the following command install mmcv==2.0.0rc1 built for PyTorch 1.10.x and CUDA 11.3. + +```shell +pip install mmcv==2.0.0rc1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +#### Install on CPU-only platforms + +MMSegmentation can be built for CPU only environment. In CPU mode you can train (requires MMCV-Lite version >= 2.0.0rc0), test or inference a model. + +#### Install on Google Colab + +[Google Colab](https://research.google.com/) usually has PyTorch installed, +thus we only need to install MMCV and MMSegmentation with the following commands. + +**Step 1.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.0rc1" +``` + +**Step 2.** Install MMSegmentation from the source. + +```shell +!git clone https://github.com/open-mmlab/mmsegmentation.git +%cd mmsegmentation +!git checkout dev-1.x +!pip install -e . +``` + +**Step 3.** Verification. + +```python +import mmseg +print(mmseg.__version__) +# Example output: 1.0.0rc0 +``` + +**Note:** +Within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. + +### Using MMSegmentation with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmsegmentation/blob/master/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.11, CUDA 11.3 +# If you prefer other versions, just modified the Dockerfile +docker build -t mmsegmentation docker/ +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmsegmentation/data mmsegmentation +``` + +## Trouble shooting + +If you have some issues during the installation, please first view the [FAQ](faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmsegmentation/issues/new/choose) on GitHub if no solution is found. diff --git a/docs/en/index.rst b/docs/en/index.rst new file mode 100644 index 0000000000..63cfb924c4 --- /dev/null +++ b/docs/en/index.rst @@ -0,0 +1,59 @@ +Welcome to MMSegmentation's documentation! +=========================================== + +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + overview.md + get_started.md + +.. toctree:: + :maxdepth: 2 + :caption: User Guides + + user_guides/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: Advanced Guides + + advanced_guides/index.rst + +.. toctree:: + :maxdepth: 1 + :caption: Migration + + migration/index.rst + +.. toctree:: + :caption: API Reference + + api.rst + +.. toctree:: + :maxdepth: 1 + :caption: Model Zoo + + model_zoo.md + modelzoo_statistics.md + +.. toctree:: + :maxdepth: 1 + :caption: Notes + + notes/changelog.md + notes/faq.md + +.. toctree:: + :caption: Switch Language + + switch_language.md + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/en/make.bat similarity index 100% rename from docs/make.bat rename to docs/en/make.bat diff --git a/docs/en/migration/index.rst b/docs/en/migration/index.rst new file mode 100644 index 0000000000..2843bdbcfb --- /dev/null +++ b/docs/en/migration/index.rst @@ -0,0 +1,8 @@ +Migration +*************** + +.. toctree:: + :maxdepth: 1 + + interface.md + package.md diff --git a/docs/en/migration/interface.md b/docs/en/migration/interface.md new file mode 100644 index 0000000000..f9a0b86f3d --- /dev/null +++ b/docs/en/migration/interface.md @@ -0,0 +1,441 @@ +# Migration from MMSegmentation 0.x + +## Introduction + +This guide describes the fundamental differences between MMSegmentation 0.x and MMSegmentation 1.x in terms of behaviors and the APIs, and how these all relate to your migration journey. + +## New dependencies + +MMSegmentation 1.x depends on some new packages, you can prepare a new clean environment and install again according to the [installation tutorial](get_started.md). +Or install the below packages manually. + +1. [MMEngine](https://github.com/open-mmlab/mmengine): MMEngine is the core the OpenMMLab 2.0 architecture, and we splited many compentents unrelated to computer vision from MMCV to MMEngine. + +2. [MMCV](https://github.com/open-mmlab/mmcv): The computer vision package of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **2.0.0rc1** version. + +3. [MMClassification](https://github.com/open-mmlab/mmclassification)(Optional): The image classification toolbox and benchmark of OpenMMLab. This is not a new dependency, but you need to upgrade it to above **1.0.0rc0** version. + +## Train launch + +The main improvement of OpenMMLab 2.0 is releasing MMEngine which provides universal and powerful runner for unified interfaces to launch training jobs. + +Compared with MMSeg0.x, MMSeg1.x provides fewer command line arguments in `tools/train.py` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FunctionOriginalNew
Loading pre-trained checkpoint--load_from=$CHECKPOINT--cfg-options load_from=$CHECKPOINT
Resuming Train from specific checkpoint--resume-from=$CHECKPOINT--resume=$CHECKPOINT
Resuming Train from the latest checkpoint--auto-resume--resume='auto'
Whether not to evaluate the checkpoint during training--no-validate--cfg-options val_cfg=None val_dataloader=None val_evaluator=None
Training device assignment--gpu-id=$DEVICE_ID-
Whether or not set different seeds for different ranks--diff-seed--cfg-options randomness.diff_rank_seed=True
Whether to set deterministic options for CUDNN backend--deterministic--cfg-options randomness.deterministic=True
+ +## Configuration file + +### Model settings + +No changes in `model.backbone`, `model.neck`, `model.decode_head` and `model.losses` fields. + +Add `model.data_preprocessor` field to configure the `DataPreProcessor`, including: + +- `mean`(Sequence, optional): The pixel mean of R, G, B channels. Defaults to None. + +- `std`(Sequence, optional): The pixel standard deviation of R, G, B channels. Defaults to None. + +- `size`(Sequence, optional): Fixed padding size. + +- `size_divisor` (int, optional): The divisor of padded size. + +- `seg_pad_val` (float, optional): Padding value of segmentation map. Default: 255. + +- `padding_mode` (str): Type of padding. Default: 'constant'. + + - constant: pads with a constant value, this value is specified with pad_val. + +- `bgr_to_rgb` (bool): whether to convert image from BGR to RGB.Defaults to False. + +- `rgb_to_bgr` (bool): whether to convert image from RGB to RGB. Defaults to False. + +**Note:** +Please refer [models documentation](../advanced_guides/models.md) for more details. + +### Dataset settings + +Changes in **data**: + +The original `data` field is split to `train_dataloader`, `val_dataloader` and `test_dataloader`. This allows us to configure them in fine-grained. For example, you can specify different sampler and batch size during training and test. +The `samples_per_gpu` is renamed to `batch_size`. +The `workers_per_gpu` is renamed to `num_workers`. + + + + + + + + + +
Original + +```python +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict(...), + val=dict(...), + test=dict(...), +) +``` + +
New + +```python +train_dataloader = dict( + batch_size=4, + num_workers=4, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=True) # necessary +) + +val_dataloader = dict( + batch_size=4, + num_workers=4, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=False) # necessary +) + +test_dataloader = val_dataloader +``` + +
+ +Changes in **pipeline** + +- The original formatting transforms **`ToTensor`**、**`ImageToTensor`**、**`Collect`** are combined as [`PackSegInputs`](mmseg.datasets.transforms.PackSegInputs) +- We don't recommend to do **`Normalize`** and **Pad** in the dataset pipeline. Please remove it from pipelines and set it in the `data_preprocessor` field. +- The original **`Resize`** in MMSeg 1.x has been changed to **`RandomResize`** and the input arguments `img_scale` is renamed to `scale`, and the default value of `keep_ratio` is modified to False. + +**Note:** +We move some work of data transforms to the data preprocessor, like normalization, see [the documentation](package.md) for more details. + + + + + + + + + +
Original + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +``` + +
New + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict( + type='RandomResize', + scale=(2560, 640), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +``` + +
+ +Changes in **`evaluation`**: + +- The **`evaluation`** field is split to `val_evaluator` and `test_evaluator`. And it won't support `interval` and `save_best` arguments. + The `interval` is moved to `train_cfg.val_interval`, and the `save_best` + is moved to `default_hooks.checkpoint.save_best`. `pre_eval` has been removed. +- `'mIoU'` has been changed to `'IoUMetric'`. + + + + + + + + + +
Original + +```python +evaluation = dict(interval=2000, metric='mIoU', pre_eval=True) +``` + +
New + +```python +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator +``` + +
+ +### Optimizer and Schedule settings + +Changes in **`optimizer`** and **`optimizer_config`**: + +- Now we use `optim_wrapper` field to specify all configuration about the optimization process. And the + `optimizer` is a sub field of `optim_wrapper` now. +- `paramwise_cfg` is also a sub field of `optim_wrapper`, instead of `optimizer`. +- `optimizer_config` is removed now, and all configurations of it are moved to `optim_wrapper`. +- `grad_clip` is renamed to `clip_grad`. + + + + + + + + + +
Original + +```python +optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +``` + +
New + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0005), + clip_grad=dict(max_norm=1, norm_type=2)) +``` + +
+ +Changes in **`lr_config`**: + +- The `lr_config` field is removed and we use new `param_scheduler` to replace it. +- The `warmup` related arguments are removed, since we use schedulers combination to implement this + functionality. + +The new schedulers combination mechanism is very flexible, and you can use it to design many kinds of learning +rate / momentum curves. See [the tutorial](TODO) for more details. + + + + + + + + + +
Original + +```python +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) +``` + +
New + +```python +param_scheduler = [ + dict( + type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), + dict( + type='PolyLR', + power=1.0, + begin=1500, + end=160000, + eta_min=0.0, + by_epoch=False, + ) +] +``` + +
+ +Changes in **`runner`**: + +Most configuration in the original `runner` field is moved to `train_cfg`, `val_cfg` and `test_cfg`, which +configure the loop in training, validation and test. + + + + + + + + + +
Original + +```python +runner = dict(type='IterBasedRunner', max_iters=20000) +``` + +
New + +```python +# The `val_interval` is the original `evaluation.interval`. +train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000) +val_cfg = dict(type='ValLoop') # Use the default validation loop. +test_cfg = dict(type='TestLoop') # Use the default test loop. +``` + +
+ +In fact, in OpenMMLab 2.0, we introduced `Loop` to control the behaviors in training, validation and test. The functionalities of `Runner` are also changed. You can find more details of [runner tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md) +in [MMEngine](https://github.com/open-mmlab/mmengine/). + +### Runtime settings + +Changes in **`checkpoint_config`** and **`log_config`**: + +The `checkpoint_config` are moved to `default_hooks.checkpoint` and the `log_config` are moved to `default_hooks.logger`. +And we move many hooks settings from the script code to the `default_hooks` field in the runtime configuration. + +```python +default_hooks = dict( + # record the time of every iterations. + timer=dict(type='IterTimerHook'), + + # print log every 50 iterations. + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + + # enable the parameter scheduler. + param_scheduler=dict(type='ParamSchedulerHook'), + + # save checkpoint every 2000 iterations. + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), + + # set sampler seed in distributed environment. + sampler_seed=dict(type='DistSamplerSeedHook'), + + # validation results visualization. + visualization=dict(type='SegVisualizationHook')) +``` + +In addition, we split the original logger to logger and visualizer. The logger is used to record +information and the visualizer is used to show the logger in different backends, like terminal and TensorBoard. + + + + + + + + + +
Original + +```python +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +``` + +
New + +```python +default_hooks = dict( + ... + logger=dict(type='LoggerHook', interval=100), +) +vis_backends = [dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') +``` + +
+ +Changes in **`load_from`** and **`resume_from`**: + +- The `resume_from` is removed. And we use `resume` and `load_from` to replace it. + - If `resume=True` and `load_from` is **not None**, resume training from the checkpoint in `load_from`. + - If `resume=True` and `load_from` is **None**, try to resume from the latest checkpoint in the work directory. + - If `resume=False` and `load_from` is **not None**, only load the checkpoint, not resume training. + - If `resume=False` and `load_from` is **None**, do not load nor resume. + +Changes in **`dist_params`**: The `dist_params` field is a sub field of `env_cfg` now. And there are some new +configurations in the `env_cfg`. + +```python +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) +``` + +Changes in **`workflow`**: `workflow` related functionalities are removed. + +New field **`visualizer`**: The visualizer is a new design in OpenMMLab 2.0 architecture. We use a +visualizer instance in the runner to handle results & log visualization and save to different backends. +See the [visualization tutorial](user_guides/visualization.md) for more details. + +New field **`default_scope`**: The start point to search module for all registries. The `default_scope` in MMSegmentation is `mmseg`. See [the registry tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md) for more details. diff --git a/docs/en/migration/package.md b/docs/en/migration/package.md new file mode 100644 index 0000000000..ca24df5887 --- /dev/null +++ b/docs/en/migration/package.md @@ -0,0 +1,114 @@ +# Package structures changes + +This section is included if you are curious about what has changed between MMSeg 0.x and 1.x. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MMSegmentation 0.xMMSegmentation 1.x
mmseg.apimmseg.api
- mmseg.core+ mmseg.engine
mmseg.datasetsmmseg.datasets
mmseg.modelsmmseg.models
- mmseg.ops+ mmseg.structure
mmseg.utilsmmseg.utils
+ mmseg.evaluation
+ mmseg.registry
+ +## Removed packages + +### `mmseg.core` + +In OpenMMLab 2.0, `core` package has been removed. `hooks` and `optimizers` of `core` are moved in `mmseg.engine`, and `evaluation` in `core` is mmseg.evaluation currently. + +## `mmseg.ops` + +`ops` package included `encoding` and `wrappers`, which are moved in `mmseg.models.utils`. + +## Added packages + +### `mmseg.engine` + +OpenMMLab 2.0 adds a new foundational library for training deep learning, MMEngine. It servers as the training engine of all OpenMMLab codebases. +`engine` package of mmseg is some customized modules for semantic segmentation task, like `SegVisualizationHook` which works for visualizing segmentation mask. + +### `mmseg.structure` + +In OpenMMLab 2.0, we designed data structure for computer vision task, and in mmseg, we implements `SegDataSample` in `structure` package. + +### `mmseg.evaluation` + +We move all evaluation metric in `mmseg.evaluation`. + +### `mmseg.registry` + +We moved registry implementations for all kinds of modules in MMSegmentation in `mmseg.registry`. + +## Modified packages + +### `mmseg.apis` + +OpenMMLab 2.0 tries to support unified interface for multitasking of Computer Vision, +and releases much stronger [`Runner`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/design/runner.md), +so MMSeg 1.x removed modules in `train.py` and `test.py` renamed `init_segmentor` to `init_model` and `inference_segmentor` to `inference_model` +Here is the changes of `mmseg.apis`: + +| Function | Changes | +| :-------------------: | :---------------------------------------------- | +| `init_segmentor` | Renamed to `init_model` | +| `inference_segmentor` | Rename to `inference_segmentor` | +| `show_result_pyplot` | Implemented based on `SegLocalVisualizer` | +| `train_model` | Removed, use `runner.train` to train. | +| `multi_gpu_test` | Removed, use `runner.test` to test. | +| `single_gpu_test` | Removed, use `runner.test` to test. | +| `set_random_seed` | Removed, use `mmengine.runner.set_random_seed`. | +| `init_random_seed` | Removed, use `mmengine.dist.sync_random_seed`. | + +### `mmseg.datasets` + +OpenMMLab 2.0 defines the `BaseDataset` to function and interface of dataset, and MMSegmentation 1.x also follow this protocol and defines the `BaseSegDataset` inherited from `BaseDataset`. MMCV 2.x collects general data transforms for multiple tasks e.g. classification, detection, segmentation, so MMSegmentation 1.x uses these data transforms and removes them from mmseg.datasets + +| Packages/Modules | Changes | +| :-------------------: | :------------------------------------------------------------------------------------------ | +| `mmseg.pipelines` | Renamed to `mmseg.transforms` | +| `mmseg.sampler` | Move in `mmengine.dataset.sampler` | +| `CustomDataset` | Renamed to `BaseDataset` and inherited from `BaseDataset` in MMEngine | +| `DefaultFormatBundle` | Replaced with `PackSegInputs` | +| `LoadImageFromFile` | Move in `mmcv.transforms.LoadImageFromFile` | +| `LoadAnnotations` | Moved in `mmcv.transforms.LoadAnnotations` | +| `Resize` | Moved in `mmcv.transforms` and split into `Resize`, `RandomResize` and `RandomChoiseResize` | +| `RandomFlip` | Moved in `mmcv.transforms.RandomFlip` | +| `Pad` | Moved in `mmcv.transforms.Pad` | +| `Normalize` | Moved in `mmcv.transforms.Normalize` | +| `Compose` | Moved in `mmcv.transforms.Compose` | +| `ImageToTensor` | Moved in `mmcv.transforms.ImageToTensor` | + +### `mmseg.models` + +`models` has not changed a lot, just added the `encoding` and `wrappers` from previous `mmseg.ops` diff --git a/docs/en/model_zoo.md b/docs/en/model_zoo.md new file mode 100644 index 0000000000..782a47002f --- /dev/null +++ b/docs/en/model_zoo.md @@ -0,0 +1,186 @@ +# Benchmark and Model Zoo + +## Common settings + +- We use distributed training with 4 GPUs by default. + +- All pytorch-style pretrained backbones on ImageNet are train by ourselves, with the same procedure in the [paper](https://arxiv.org/pdf/1812.01187.pdf). + Our ResNet style backbone are based on ResNetV1c variant, where the 7x7 conv in the input stem is replaced with three 3x3 convs. + +- For the consistency across different hardwares, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 4 GPUs with `torch.backends.cudnn.benchmark=False`. + Note that this value is usually less than what `nvidia-smi` shows. + +- We report the inference time as the total time of network forwarding and post-processing, excluding the data loading time. + Results are obtained with the script `tools/benchmark.py` which computes the average time on 200 images with `torch.backends.cudnn.benchmark=False`. + +- There are two inference modes in this framework. + + - `slide` mode: The `test_cfg` will be like `dict(mode='slide', crop_size=(769, 769), stride=(513, 513))`. + + In this mode, multiple patches will be cropped from input image, passed into network individually. + The crop size and stride between patches are specified by `crop_size` and `stride`. + The overlapping area will be merged by average + + - `whole` mode: The `test_cfg` will be like `dict(mode='whole')`. + + In this mode, the whole imaged will be passed into network directly. + + By default, we use `slide` inference for 769x769 trained model, `whole` inference for the rest. + +- For input size of 8x+1 (e.g. 769), `align_corner=True` is adopted as a traditional practice. + Otherwise, for input size of 8x (e.g. 512, 1024), `align_corner=False` is adopted. + +## Baselines + +### FCN + +Please refer to [FCN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn) for details. + +### PSPNet + +Please refer to [PSPNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet) for details. + +### DeepLabV3 + +Please refer to [DeepLabV3](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3) for details. + +### PSANet + +Please refer to [PSANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet) for details. + +### DeepLabV3+ + +Please refer to [DeepLabV3+](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus) for details. + +### UPerNet + +Please refer to [UPerNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet) for details. + +### NonLocal Net + +Please refer to [NonLocal Net](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net) for details. + +### EncNet + +Please refer to [EncNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet) for details. + +### CCNet + +Please refer to [CCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet) for details. + +### DANet + +Please refer to [DANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet) for details. + +### APCNet + +Please refer to [APCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet) for details. + +### HRNet + +Please refer to [HRNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet) for details. + +### GCNet + +Please refer to [GCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet) for details. + +### DMNet + +Please refer to [DMNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet) for details. + +### ANN + +Please refer to [ANN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann) for details. + +### OCRNet + +Please refer to [OCRNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet) for details. + +### Fast-SCNN + +Please refer to [Fast-SCNN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn) for details. + +### ResNeSt + +Please refer to [ResNeSt](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest) for details. + +### Semantic FPN + +Please refer to [Semantic FPN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn) for details. + +### PointRend + +Please refer to [PointRend](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend) for details. + +### MobileNetV2 + +Please refer to [MobileNetV2](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2) for details. + +### MobileNetV3 + +Please refer to [MobileNetV3](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3) for details. + +### EMANet + +Please refer to [EMANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet) for details. + +### DNLNet + +Please refer to [DNLNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet) for details. + +### CGNet + +Please refer to [CGNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet) for details. + +### Mixed Precision (FP16) Training + +Please refer [Mixed Precision (FP16) Training on BiSeNetV2](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py) for details. + +### U-Net + +Please refer to [U-Net](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/README.md) for details. + +### ViT + +Please refer to [ViT](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/README.md) for details. + +### Swin + +Please refer to [Swin](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/README.md) for details. + +### SETR + +Please refer to [SETR](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/README.md) for details. + +## Speed benchmark + +### Hardware + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz + +### Software environment + +- Python 3.7 +- PyTorch 1.5 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 + +### Training speed + +For fair comparison, we benchmark all implementations with ResNet-101V1c. +The input size is fixed to 1024x512 with batch size 2. + +The training speed is reported as followed, in terms of second per iter (s/iter). The lower, the better. + +| Implementation | PSPNet (s/iter) | DeepLabV3+ (s/iter) | +| --------------------------------------------------------------------------- | --------------- | ------------------- | +| [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) | **0.83** | **0.85** | +| [SegmenTron](https://github.com/LikeLy-Journey/SegmenTron) | 0.84 | 0.85 | +| [CASILVision](https://github.com/CSAILVision/semantic-segmentation-pytorch) | 1.15 | N/A | +| [vedaseg](https://github.com/Media-Smart/vedaseg) | 0.95 | 1.25 | + +:::{note} +The output stride of DeepLabV3+ is 8. +::: diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md new file mode 100644 index 0000000000..a808f57c6d --- /dev/null +++ b/docs/en/notes/changelog.md @@ -0,0 +1,165 @@ +# Changelog of v1.x + +## v1.0.0rc2(6/12/2022) + +### Highlights + +- Support MaskFormer ([#2215](https://github.com/open-mmlab/mmsegmentation/pull/2215)) +- Support Mask2Former ([#2255](https://github.com/open-mmlab/mmsegmentation/pull/2255)) + +### Features + +- Add ResizeShortestEdge transform ([#2339](https://github.com/open-mmlab/mmsegmentation/pull/2339)) +- Support padding in data pre-processor for model testing([#2290](https://github.com/open-mmlab/mmsegmentation/pull/2290)) +- Fix the problem of post-processing not removing padding ([#2367](https://github.com/open-mmlab/mmsegmentation/pull/2367)) + +### Bug fix + +- Fix links in README ([#2024](https://github.com/open-mmlab/mmsegmentation/pull/2024)) +- Fix swin load state_dict ([#2304](https://github.com/open-mmlab/mmsegmentation/pull/2304)) +- Fix typo of BaseSegDataset docstring ([#2322](https://github.com/open-mmlab/mmsegmentation/pull/2322)) +- Fix the bug in the visualization step ([#2326](https://github.com/open-mmlab/mmsegmentation/pull/2326)) +- Fix ignore class id from -1 to 255 in BaseSegDataset ([#2332](https://github.com/open-mmlab/mmsegmentation/pull/2332)) +- Fix KNet IterativeDecodeHead bug ([#2334](https://github.com/open-mmlab/mmsegmentation/pull/2334)) +- Add input argument for datasets ([#2379](https://github.com/open-mmlab/mmsegmentation/pull/2379)) +- Fix typo in warning on binary classification ([#2382](https://github.com/open-mmlab/mmsegmentation/pull/2382)) + +### Enhancement + +- Fix ci for 1.x ([#2011](https://github.com/open-mmlab/mmsegmentation/pull/2011), [#2019](https://github.com/open-mmlab/mmsegmentation/pull/2019)) +- Fix lint and pre-commit hook ([#2308](https://github.com/open-mmlab/mmsegmentation/pull/2308)) +- Add `data` string in .gitignore file in dev-1.x branch ([#2336](https://github.com/open-mmlab/mmsegmentation/pull/2336)) +- Make scipy as a default dependency in runtime ([#2362](https://github.com/open-mmlab/mmsegmentation/pull/2362)) +- Delete mmcls in runtime.txt ([#2368](https://github.com/open-mmlab/mmsegmentation/pull/2368)) + +### Documentation + +- Update configuration documentation ([#2048](https://github.com/open-mmlab/mmsegmentation/pull/2048)) +- Update inference documentation ([#2052](https://github.com/open-mmlab/mmsegmentation/pull/2052)) +- Update train test documentation ([#2061](https://github.com/open-mmlab/mmsegmentation/pull/2061)) +- Update get started documentatin ([#2148](https://github.com/open-mmlab/mmsegmentation/pull/2148)) +- Update transforms documentation ([#2088](https://github.com/open-mmlab/mmsegmentation/pull/2088)) +- Add MMEval projects like in README ([#2259](https://github.com/open-mmlab/mmsegmentation/pull/2259)) +- Translate the visualization.md ([#2298](https://github.com/open-mmlab/mmsegmentation/pull/2298)) + +## v1.0.0rc1 (2/11/2022) + +### Highlights + +- Support PoolFormer ([#2191](https://github.com/open-mmlab/mmsegmentation/pull/2191)) +- Add Decathlon dataset ([#2227](https://github.com/open-mmlab/mmsegmentation/pull/2227)) + +### Features + +- Add BioMedical data loading ([#2176](https://github.com/open-mmlab/mmsegmentation/pull/2176)) +- Add LIP dataset ([#2251](https://github.com/open-mmlab/mmsegmentation/pull/2251)) +- Add `GenerateEdge` data transform ([#2210](https://github.com/open-mmlab/mmsegmentation/pull/2210)) + +### Bug fix + +- Fix segmenter-vit-s_fcn config ([#2037](https://github.com/open-mmlab/mmsegmentation/pull/2037)) +- Fix binary segmentation ([#2101](https://github.com/open-mmlab/mmsegmentation/pull/2101)) +- Fix MMSegmentation colab demo ([#2089](https://github.com/open-mmlab/mmsegmentation/pull/2089)) +- Fix ResizeToMultiple transform ([#2185](https://github.com/open-mmlab/mmsegmentation/pull/2185)) +- Use SyncBN in mobilenet_v2 ([#2198](https://github.com/open-mmlab/mmsegmentation/pull/2198)) +- Fix typo in installation ([#2175](https://github.com/open-mmlab/mmsegmentation/pull/2175)) +- Fix typo in visualization.md ([#2116](https://github.com/open-mmlab/mmsegmentation/pull/2116)) + +### Enhancement + +- Add mim extras_requires in setup.py ([#2012](https://github.com/open-mmlab/mmsegmentation/pull/2012)) +- Fix CI ([#2029](https://github.com/open-mmlab/mmsegmentation/pull/2029)) +- Remove ops module ([#2063](https://github.com/open-mmlab/mmsegmentation/pull/2063)) +- Add pyupgrade pre-commit hook ([#2078](https://github.com/open-mmlab/mmsegmentation/pull/2078)) +- Add `out_file` in `add_datasample` of `SegLocalVisualizer` to directly save image ([#2090](https://github.com/open-mmlab/mmsegmentation/pull/2090)) +- Upgrade pre commit hooks ([#2154](https://github.com/open-mmlab/mmsegmentation/pull/2154)) +- Ignore test timm in CI when torch\<1.7 ([#2158](https://github.com/open-mmlab/mmsegmentation/pull/2158)) +- Update requirements ([#2186](https://github.com/open-mmlab/mmsegmentation/pull/2186)) +- Fix Windows platform CI ([#2202](https://github.com/open-mmlab/mmsegmentation/pull/2202)) + +### Documentation + +- Add `Overview` documentation ([#2042](https://github.com/open-mmlab/mmsegmentation/pull/2042)) +- Add `Evaluation` documentation ([#2077](https://github.com/open-mmlab/mmsegmentation/pull/2077)) +- Add `Migration` documentation ([#2066](https://github.com/open-mmlab/mmsegmentation/pull/2066)) +- Add `Structures` documentation ([#2070](https://github.com/open-mmlab/mmsegmentation/pull/2070)) +- Add `Structures` ZN documentation ([#2129](https://github.com/open-mmlab/mmsegmentation/pull/2129)) +- Add `Engine` ZN documentation ([#2157](https://github.com/open-mmlab/mmsegmentation/pull/2157)) +- Update `Prepare datasets` and `Visualization` doc ([#2054](https://github.com/open-mmlab/mmsegmentation/pull/2054)) +- Update `Models` documentation ([#2160](https://github.com/open-mmlab/mmsegmentation/pull/2160)) +- Update `Add New Modules` documentation ([#2067](https://github.com/open-mmlab/mmsegmentation/pull/2067)) +- Fix the installation commands in get_started.md ([#2174](https://github.com/open-mmlab/mmsegmentation/pull/2174)) +- Add MMYOLO to README.md ([#2220](https://github.com/open-mmlab/mmsegmentation/pull/2220)) + +## v1.0.0rc0 (31/8/2022) + +We are excited to announce the release of MMSegmentation 1.0.0rc0. +MMSeg 1.0.0rc0 is the first version of MMSegmentation 1.x, a part of the OpenMMLab 2.0 projects. +Built upon the new [training engine](https://github.com/open-mmlab/mmengine), +MMSeg 1.x unifies the interfaces of dataset, models, evaluation, and visualization with faster training and testing speed. + +### Highlights + +1. **New engines** MMSeg 1.x is based on [MMEngine](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. + +2. **Unified interfaces** As a part of the OpenMMLab 2.0 projects, MMSeg 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. + +3. **Faster speed** We optimize the training and inference speed for common models. + +4. **New features**: + + - Support TverskyLoss function + +5. **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmsegmentation.readthedocs.io/en/1.x/). + +### Breaking Changes + +We briefly list the major breaking changes here. +We will update the [migration guide](../migration.md) to provide complete details and migration instructions. + +#### Training and testing + +- MMSeg 1.x runs on PyTorch>=1.6. We have deprecated the support of PyTorch 1.5 to embrace the mixed precision training and other new features since PyTorch 1.6. Some models can still run on PyTorch 1.5, but the full functionality of MMSeg 1.x is not guaranteed. + +- MMSeg 1.x uses Runner in [MMEngine](https://github.com/open-mmlab/mmengine) rather than that in MMCV. The new Runner implements and unifies the building logic of dataset, model, evaluation, and visualizer. Therefore, MMSeg 1.x no longer maintains the building logics of those modules in `mmseg.train.apis` and `tools/train.py`. Those code have been migrated into [MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py). Please refer to the [migration guide of Runner in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/runner.html) for more details. + +- The Runner in MMEngine also supports testing and validation. The testing scripts are also simplified, which has similar logic as that in training scripts to build the runner. + +- The execution points of hooks in the new Runner have been enriched to allow more flexible customization. Please refer to the [migration guide of Hook in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/hook.html) for more details. + +- Learning rate and momentum scheduling has been migrated from `Hook` to `Parameter Scheduler` in MMEngine. Please refer to the [migration guide of Parameter Scheduler in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/param_scheduler.html) for more details. + +#### Configs + +- The [Runner in MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py) uses a different config structures to ease the understanding of the components in runner. Users can read the [config example of mmseg](../user_guides/config.md) or refer to the [migration guide in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/runner.html) for migration details. +- The file names of configs and models are also refactored to follow the new rules unified across OpenMMLab 2.0 projects. Please refer to the [user guides of config](../user_guides/1_config.md) for more details. + +#### Components + +- Dataset +- Data Transforms +- Model +- Evaluation +- Visualization + +### Improvements + +- Support mixed precision training of all the models. However, some models may got Nan results due to some numerical issues. We will update the documentation and list their results (accuracy of failure) of mixed precision training. + +### Bug Fixes + +- Fix several config file errors [#1994](https://github.com/open-mmlab/mmsegmentation/pull/1994) + +### New Features + +1. Support data structures and encapsulating `seg_logits` in data samples, which can be return from models to support more common evaluation metrics. + +### Ongoing changes + +1. Test-time augmentation: which is supported in MMSeg 0.x is not implemented in this version due to limited time slot. We will support it in the following releases with a new and simplified design. + +2. Inference interfaces: a unified inference interfaces will be supported in the future to ease the use of released models. + +3. Interfaces of useful tools that can be used in notebook: more useful tools that implemented in the `tools` directory will have their python interfaces so that they can be used through notebook and in downstream libraries. + +4. Documentation: we will add more design docs, tutorials, and migration guidance so that the community can deep dive into our new design, participate the future development, and smoothly migrate downstream libraries to MMSeg 1.x. diff --git a/docs/en/notes/changelog_v0.x.md b/docs/en/notes/changelog_v0.x.md new file mode 100644 index 0000000000..d347a444d8 --- /dev/null +++ b/docs/en/notes/changelog_v0.x.md @@ -0,0 +1,720 @@ +## Changelog + +### V0.24.1 (5/1/2022) + +**Bug Fixes** + +- Fix `LayerDecayOptimizerConstructor` for MAE training ([#1539](https://github.com/open-mmlab/mmsegmentation/pull/1539), [#1540](https://github.com/open-mmlab/mmsegmentation/pull/1540)) + +### V0.24.0 (4/29/2022) + +**Highlights** + +- Support MAE: Masked Autoencoders Are Scalable Vision Learners +- Support Resnet strikes back + +**New Features** + +- Support MAE: Masked Autoencoders Are Scalable Vision Learners ([#1307](https://github.com/open-mmlab/mmsegmentation/pull/1307), [#1523](https://github.com/open-mmlab/mmsegmentation/pull/1523)) +- Support Resnet strikes back ([#1390](https://github.com/open-mmlab/mmsegmentation/pull/1390)) +- Support extra dataloader settings in configs ([#1435](https://github.com/open-mmlab/mmsegmentation/pull/1435)) + +**Bug Fixes** + +- Fix input previous results for the last cascade_decode_head ([#1450](https://github.com/open-mmlab/mmsegmentation/pull/1450)) +- Fix validation loss logging ([#1494](https://github.com/open-mmlab/mmsegmentation/pull/1494)) +- Fix the bug in binary_cross_entropy ([1527](https://github.com/open-mmlab/mmsegmentation/pull/1527)) +- Support single channel prediction for Binary Cross Entropy Loss ([#1454](https://github.com/open-mmlab/mmsegmentation/pull/1454)) +- Fix potential bugs in accuracy.py ([1496](https://github.com/open-mmlab/mmsegmentation/pull/1496)) +- Avoid converting label ids twice by label map during evaluation ([1417](https://github.com/open-mmlab/mmsegmentation/pull/1417)) +- Fix bug about label_map ([1445](https://github.com/open-mmlab/mmsegmentation/pull/1445)) +- Fix image save path bug in Windows ([1423](https://github.com/open-mmlab/mmsegmentation/pull/1423)) +- Fix MMSegmentation Colab demo ([1501](https://github.com/open-mmlab/mmsegmentation/pull/1501), [1452](https://github.com/open-mmlab/mmsegmentation/pull/1452)) +- Migrate azure blob for beit checkpoints ([1503](https://github.com/open-mmlab/mmsegmentation/pull/1503)) +- Fix bug in `tools/analyse_logs.py` caused by wrong plot_iter in some cases ([1428](https://github.com/open-mmlab/mmsegmentation/pull/1428)) + +**Improvements** + +- Merge BEiT and ConvNext's LR decay optimizer constructors ([#1438](https://github.com/open-mmlab/mmsegmentation/pull/1438)) +- Register optimizer constructor with mmseg ([#1456](https://github.com/open-mmlab/mmsegmentation/pull/1456)) +- Refactor transformer encode layer in ViT and BEiT backbone ([#1481](https://github.com/open-mmlab/mmsegmentation/pull/1481)) +- Add `build_pos_embed` and `build_layers` for BEiT ([1517](https://github.com/open-mmlab/mmsegmentation/pull/1517)) +- Add `with_cp` to mit and vit ([1431](https://github.com/open-mmlab/mmsegmentation/pull/1431)) +- Fix inconsistent dtype of `seg_label` in stdc decode ([1463](https://github.com/open-mmlab/mmsegmentation/pull/1463)) +- Delete random seed for training in `dist_train.sh` ([1519](https://github.com/open-mmlab/mmsegmentation/pull/1519)) +- Revise high `workers_per_gpus` in config file ([#1506](https://github.com/open-mmlab/mmsegmentation/pull/1506)) +- Add GPG keys and del mmcv version in Dockerfile ([1534](https://github.com/open-mmlab/mmsegmentation/pull/1534)) +- Update checkpoint for model in deeplabv3plus ([#1487](https://github.com/open-mmlab/mmsegmentation/pull/1487)) +- Add `DistSamplerSeedHook` to set epoch number to dataloader when runner is `EpochBasedRunner` ([1449](https://github.com/open-mmlab/mmsegmentation/pull/1449)) +- Provide URLs of Swin Transformer pretrained models ([1389](https://github.com/open-mmlab/mmsegmentation/pull/1389)) +- Updating Dockerfiles From Docker Directory and `get_started.md` to reach latest stable version of Python, PyTorch and MMCV ([1446](https://github.com/open-mmlab/mmsegmentation/pull/1446)) + +**Documentation** + +- Add more clearly statement of CPU training/inference ([1518](https://github.com/open-mmlab/mmsegmentation/pull/1518)) + +**Contributors** + +- @jiangyitong made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1431 +- @kahkeng made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1447 +- @Nourollah made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1446 +- @androbaza made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1452 +- @Yzichen made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1445 +- @whu-pzhang made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1423 +- @panfeng-hover made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1417 +- @Johnson-Wang made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1496 +- @jere357 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1460 +- @mfernezir made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1494 +- @donglixp made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1503 +- @YuanLiuuuuuu made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1307 +- @Dawn-bin made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1527 + +### V0.23.0 (4/1/2022) + +**Highlights** + +- Support BEiT: BERT Pre-Training of Image Transformers +- Support K-Net: Towards Unified Image Segmentation +- Add `avg_non_ignore` of CELoss to support average loss over non-ignored elements +- Support dataset initialization with file client + +**New Features** + +- Support BEiT: BERT Pre-Training of Image Transformers ([#1404](https://github.com/open-mmlab/mmsegmentation/pull/1404)) +- Support K-Net: Towards Unified Image Segmentation ([#1289](https://github.com/open-mmlab/mmsegmentation/pull/1289)) +- Support dataset initialization with file client ([#1402](https://github.com/open-mmlab/mmsegmentation/pull/1402)) +- Add class name function for STARE datasets ([#1376](https://github.com/open-mmlab/mmsegmentation/pull/1376)) +- Support different seeds on different ranks when distributed training ([#1362](https://github.com/open-mmlab/mmsegmentation/pull/1362)) +- Add `nlc2nchw2nlc` and `nchw2nlc2nchw` to simplify tensor with different dimension operation ([#1249](https://github.com/open-mmlab/mmsegmentation/pull/1249)) + +**Improvements** + +- Synchronize random seed for distributed sampler ([#1411](https://github.com/open-mmlab/mmsegmentation/pull/1411)) +- Add script and documentation for multi-machine distributed training ([#1383](https://github.com/open-mmlab/mmsegmentation/pull/1383)) + +**Bug Fixes** + +- Add `avg_non_ignore` of CELoss to support average loss over non-ignored elements ([#1409](https://github.com/open-mmlab/mmsegmentation/pull/1409)) +- Fix some wrong URLs of models or logs in `./configs` ([#1336](https://github.com/open-mmlab/mmsegmentation/pull/1433)) +- Add title and color theme arguments to plot function in `tools/confusion_matrix.py` ([#1401](https://github.com/open-mmlab/mmsegmentation/pull/1401)) +- Fix outdated link in Colab demo ([#1392](https://github.com/open-mmlab/mmsegmentation/pull/1392)) +- Fix typos ([#1424](https://github.com/open-mmlab/mmsegmentation/pull/1424), [#1405](https://github.com/open-mmlab/mmsegmentation/pull/1405), [#1371](https://github.com/open-mmlab/mmsegmentation/pull/1371), [#1366](https://github.com/open-mmlab/mmsegmentation/pull/1366), [#1363](https://github.com/open-mmlab/mmsegmentation/pull/1363)) + +**Documentation** + +- Add FAQ document ([#1420](https://github.com/open-mmlab/mmsegmentation/pull/1420)) +- Fix the config name style description in official docs([#1414](https://github.com/open-mmlab/mmsegmentation/pull/1414)) + +**Contributors** + +- @kinglintianxia made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1371 +- @CCODING04 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1376 +- @mob5566 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1401 +- @xiongnemo made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1392 +- @Xiangxu-0103 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1405 + +### V0.22.1 (3/9/2022) + +**Bug Fixes** + +- Fix the ZeroDivisionError that all pixels in one image is ignored. ([#1336](https://github.com/open-mmlab/mmsegmentation/pull/1336)) + +**Improvements** + +- Provide URLs of STDC, Segmenter and Twins pretrained models ([#1272](https://github.com/open-mmlab/mmsegmentation/pull/1357)) + +### V0.22 (3/04/2022) + +**Highlights** + +- Support ConvNeXt: A ConvNet for the 2020s. Please use the latest MMClassification (0.21.0) to try it out. +- Support iSAID aerial Dataset. +- Officially Support inference on Windows OS. + +**New Features** + +- Support ConvNeXt: A ConvNet for the 2020s. ([#1216](https://github.com/open-mmlab/mmsegmentation/pull/1216)) +- Support iSAID aerial Dataset. ([#1115](https://github.com/open-mmlab/mmsegmentation/pull/1115) +- Generating and plotting confusion matrix. ([#1301](https://github.com/open-mmlab/mmsegmentation/pull/1301)) + +**Improvements** + +- Refactor 4 decoder heads (ASPP, FCN, PSP, UPer): Split forward function into `_forward_feature` and `cls_seg`. ([#1299](https://github.com/open-mmlab/mmsegmentation/pull/1299)) +- Add `min_size` arg in `Resize` to keep the shape after resize bigger than slide window. ([#1318](https://github.com/open-mmlab/mmsegmentation/pull/1318)) +- Revise pre-commit-hooks. ([#1315](https://github.com/open-mmlab/mmsegmentation/pull/1315)) +- Add win-ci. ([#1296](https://github.com/open-mmlab/mmsegmentation/pull/1296)) + +**Bug Fixes** + +- Fix `mlp_ratio` type in Swin Transformer. ([#1274](https://github.com/open-mmlab/mmsegmentation/pull/1274)) +- Fix path errors in `./demo` . ([#1269](https://github.com/open-mmlab/mmsegmentation/pull/1269)) +- Fix bug in conversion of potsdam. ([#1279](https://github.com/open-mmlab/mmsegmentation/pull/1279)) +- Make accuracy take into account `ignore_index`. ([#1259](https://github.com/open-mmlab/mmsegmentation/pull/1259)) +- Add Pytorch HardSwish assertion in unit test. ([#1294](https://github.com/open-mmlab/mmsegmentation/pull/1294)) +- Fix wrong palette value in vaihingen. ([#1292](https://github.com/open-mmlab/mmsegmentation/pull/1292)) +- Fix the bug that SETR cannot load pretrain. ([#1293](https://github.com/open-mmlab/mmsegmentation/pull/1293)) +- Update correct `In Collection` in metafile of each configs. ([#1239](https://github.com/open-mmlab/mmsegmentation/pull/1239)) +- Upload completed STDC models. ([#1332](https://github.com/open-mmlab/mmsegmentation/pull/1332)) +- Fix `DNLHead` exports onnx inference difference type Cast error. ([#1161](https://github.com/open-mmlab/mmsegmentation/pull/1332)) + +**Contributors** + +- @JiaYanhao made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1269 +- @andife made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1281 +- @SBCV made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1279 +- @HJoonKwon made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1259 +- @Tsingularity made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1290 +- @Waterman0524 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1115 +- @MeowZheng made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1315 +- @linfangjian01 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1318 + +### V0.21.1 (2/9/2022) + +**Bug Fixes** + +- Fix typos in docs. ([#1263](https://github.com/open-mmlab/mmsegmentation/pull/1263)) +- Fix repeating log by `setup_multi_processes`. ([#1267](https://github.com/open-mmlab/mmsegmentation/pull/1267)) +- Upgrade isort in pre-commit hook. ([#1270](https://github.com/open-mmlab/mmsegmentation/pull/1270)) + +**Improvements** + +- Use MMCV load_state_dict func in ViT/Swin. ([#1272](https://github.com/open-mmlab/mmsegmentation/pull/1272)) +- Add exception for PointRend for support CPU-only. ([#1271](https://github.com/open-mmlab/mmsegmentation/pull/1270)) + +### V0.21 (1/29/2022) + +**Highlights** + +- Officially Support CPUs training and inference, please use the latest MMCV (1.4.4) to try it out. +- Support Segmenter: Transformer for Semantic Segmentation (ICCV'2021). +- Support ISPRS Potsdam and Vaihingen Dataset. +- Add Mosaic transform and `MultiImageMixDataset` class in `dataset_wrappers`. + +**New Features** + +- Support Segmenter: Transformer for Semantic Segmentation (ICCV'2021) ([#955](https://github.com/open-mmlab/mmsegmentation/pull/955)) +- Support ISPRS Potsdam and Vaihingen Dataset ([#1097](https://github.com/open-mmlab/mmsegmentation/pull/1097), [#1171](https://github.com/open-mmlab/mmsegmentation/pull/1171)) +- Add segformer‘s benchmark on cityscapes ([#1155](https://github.com/open-mmlab/mmsegmentation/pull/1155)) +- Add auto resume ([#1172](https://github.com/open-mmlab/mmsegmentation/pull/1172)) +- Add Mosaic transform and `MultiImageMixDataset` class in `dataset_wrappers` ([#1093](https://github.com/open-mmlab/mmsegmentation/pull/1093), [#1105](https://github.com/open-mmlab/mmsegmentation/pull/1105)) +- Add log collector ([#1175](https://github.com/open-mmlab/mmsegmentation/pull/1175)) + +**Improvements** + +- New-style CPU training and inference ([#1251](https://github.com/open-mmlab/mmsegmentation/pull/1251)) +- Add UNet benchmark with multiple losses supervision ([#1143](https://github.com/open-mmlab/mmsegmentation/pull/1143)) + +**Bug Fixes** + +- Fix the model statistics in doc for readthedoc ([#1153](https://github.com/open-mmlab/mmsegmentation/pull/1153)) +- Set random seed for `palette` if not given ([#1152](https://github.com/open-mmlab/mmsegmentation/pull/1152)) +- Add `COCOStuffDataset` in `class_names.py` ([#1222](https://github.com/open-mmlab/mmsegmentation/pull/1222)) +- Fix bug in non-distributed multi-gpu training/testing ([#1247](https://github.com/open-mmlab/mmsegmentation/pull/1247)) +- Delete unnecessary lines of STDCHead ([#1231](https://github.com/open-mmlab/mmsegmentation/pull/1231)) + +**Contributors** + +- @jbwang1997 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1152 +- @BeaverCC made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1206 +- @Echo-minn made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1214 +- @rstrudel made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/955 + +### V0.20.2 (12/15/2021) + +**Bug Fixes** + +- Revise --option to --options to avoid BC-breaking. ([#1140](https://github.com/open-mmlab/mmsegmentation/pull/1140)) + +### V0.20.1 (12/14/2021) + +**Improvements** + +- Change options to cfg-options ([#1129](https://github.com/open-mmlab/mmsegmentation/pull/1129)) + +**Bug Fixes** + +- Fix `` in metafile. ([#1127](https://github.com/open-mmlab/mmsegmentation/pull/1127)) +- Fix correct `num_classes` of HRNet in `LoveDA` dataset ([#1136](https://github.com/open-mmlab/mmsegmentation/pull/1136)) + +### V0.20 (12/10/2021) + +**Highlights** + +- Support Twins ([#989](https://github.com/open-mmlab/mmsegmentation/pull/989)) +- Support a real-time segmentation model STDC ([#995](https://github.com/open-mmlab/mmsegmentation/pull/995)) +- Support a widely-used segmentation model in lane detection ERFNet ([#960](https://github.com/open-mmlab/mmsegmentation/pull/960)) +- Support A Remote Sensing Land-Cover Dataset LoveDA ([#1028](https://github.com/open-mmlab/mmsegmentation/pull/1028)) +- Support focal loss ([#1024](https://github.com/open-mmlab/mmsegmentation/pull/1024)) + +**New Features** + +- Support Twins ([#989](https://github.com/open-mmlab/mmsegmentation/pull/989)) +- Support a real-time segmentation model STDC ([#995](https://github.com/open-mmlab/mmsegmentation/pull/995)) +- Support a widely-used segmentation model in lane detection ERFNet ([#960](https://github.com/open-mmlab/mmsegmentation/pull/960)) +- Add SETR cityscapes benchmark ([#1087](https://github.com/open-mmlab/mmsegmentation/pull/1087)) +- Add BiSeNetV1 COCO-Stuff 164k benchmark ([#1019](https://github.com/open-mmlab/mmsegmentation/pull/1019)) +- Support focal loss ([#1024](https://github.com/open-mmlab/mmsegmentation/pull/1024)) +- Add Cutout transform ([#1022](https://github.com/open-mmlab/mmsegmentation/pull/1022)) + +**Improvements** + +- Set a random seed when the user does not set a seed ([#1039](https://github.com/open-mmlab/mmsegmentation/pull/1039)) +- Add CircleCI setup ([#1086](https://github.com/open-mmlab/mmsegmentation/pull/1086)) +- Skip CI on ignoring given paths ([#1078](https://github.com/open-mmlab/mmsegmentation/pull/1078)) +- Add abstract and image for every paper ([#1060](https://github.com/open-mmlab/mmsegmentation/pull/1060)) +- Create a symbolic link on windows ([#1090](https://github.com/open-mmlab/mmsegmentation/pull/1090)) +- Support video demo using trained model ([#1014](https://github.com/open-mmlab/mmsegmentation/pull/1014)) + +**Bug Fixes** + +- Fix incorrectly loading init_cfg or pretrained models of several transformer models ([#999](https://github.com/open-mmlab/mmsegmentation/pull/999), [#1069](https://github.com/open-mmlab/mmsegmentation/pull/1069), [#1102](https://github.com/open-mmlab/mmsegmentation/pull/1102)) +- Fix EfficientMultiheadAttention in SegFormer ([#1037](https://github.com/open-mmlab/mmsegmentation/pull/1037)) +- Remove `fp16` folder in `configs` ([#1031](https://github.com/open-mmlab/mmsegmentation/pull/1031)) +- Fix several typos in .yml file (Dice Metric [#1041](https://github.com/open-mmlab/mmsegmentation/pull/1041), ADE20K dataset [#1120](https://github.com/open-mmlab/mmsegmentation/pull/1120), Training Memory (GB) [#1083](https://github.com/open-mmlab/mmsegmentation/pull/1083)) +- Fix test error when using `--show-dir` ([#1091](https://github.com/open-mmlab/mmsegmentation/pull/1091)) +- Fix dist training infinite waiting issue ([#1035](https://github.com/open-mmlab/mmsegmentation/pull/1035)) +- Change the upper version of mmcv to 1.5.0 ([#1096](https://github.com/open-mmlab/mmsegmentation/pull/1096)) +- Fix symlink failure on Windows ([#1038](https://github.com/open-mmlab/mmsegmentation/pull/1038)) +- Cancel previous runs that are not completed ([#1118](https://github.com/open-mmlab/mmsegmentation/pull/1118)) +- Unified links of readthedocs in docs ([#1119](https://github.com/open-mmlab/mmsegmentation/pull/1119)) + +**Contributors** + +- @Junjue-Wang made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1028 +- @ddebby made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1066 +- @del-zhenwu made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1078 +- @KangBK0120 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1106 +- @zergzzlun made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1091 +- @fingertap made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1035 +- @irvingzhang0512 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1014 +- @littleSunlxy made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/989 +- @lkm2835 +- @RockeyCoss +- @MengzhangLI +- @Junjun2016 +- @xiexinch +- @xvjiarui + +### V0.19 (11/02/2021) + +**Highlights** + +- Support TIMMBackbone wrapper ([#998](https://github.com/open-mmlab/mmsegmentation/pull/998)) +- Support custom hook ([#428](https://github.com/open-mmlab/mmsegmentation/pull/428)) +- Add codespell pre-commit hook ([#920](https://github.com/open-mmlab/mmsegmentation/pull/920)) +- Add FastFCN benchmark on ADE20K ([#972](https://github.com/open-mmlab/mmsegmentation/pull/972)) + +**New Features** + +- Support TIMMBackbone wrapper ([#998](https://github.com/open-mmlab/mmsegmentation/pull/998)) +- Support custom hook ([#428](https://github.com/open-mmlab/mmsegmentation/pull/428)) +- Add FastFCN benchmark on ADE20K ([#972](https://github.com/open-mmlab/mmsegmentation/pull/972)) +- Add codespell pre-commit hook and fix typos ([#920](https://github.com/open-mmlab/mmsegmentation/pull/920)) + +**Improvements** + +- Make inputs & channels smaller in unittests ([#1004](https://github.com/open-mmlab/mmsegmentation/pull/1004)) +- Change `self.loss_decode` back to `dict` in Single Loss situation ([#1002](https://github.com/open-mmlab/mmsegmentation/pull/1002)) + +**Bug Fixes** + +- Fix typo in usage example ([#1003](https://github.com/open-mmlab/mmsegmentation/pull/1003)) +- Add contiguous after permutation in ViT ([#992](https://github.com/open-mmlab/mmsegmentation/pull/992)) +- Fix the invalid link ([#985](https://github.com/open-mmlab/mmsegmentation/pull/985)) +- Fix bug in CI with python 3.9 ([#994](https://github.com/open-mmlab/mmsegmentation/pull/994)) +- Fix bug when loading class name form file in custom dataset ([#923](https://github.com/open-mmlab/mmsegmentation/pull/923)) + +**Contributors** + +- @ShoupingShan made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/923 +- @RockeyCoss made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/954 +- @HarborYuan made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/992 +- @lkm2835 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/1003 +- @gszh made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/428 +- @VVsssssk +- @MengzhangLI +- @Junjun2016 + +### V0.18 (10/07/2021) + +**Highlights** + +- Support three real-time segmentation models (ICNet [#884](https://github.com/open-mmlab/mmsegmentation/pull/884), BiSeNetV1 [#851](https://github.com/open-mmlab/mmsegmentation/pull/851), and BiSeNetV2 [#804](https://github.com/open-mmlab/mmsegmentation/pull/804)) +- Support one efficient segmentation model (FastFCN [#885](https://github.com/open-mmlab/mmsegmentation/pull/885)) +- Support one efficient non-local/self-attention based segmentation model (ISANet [#70](https://github.com/open-mmlab/mmsegmentation/pull/70)) +- Support COCO-Stuff 10k and 164k datasets ([#625](https://github.com/open-mmlab/mmsegmentation/pull/625)) +- Support evaluate concated dataset separately ([#833](https://github.com/open-mmlab/mmsegmentation/pull/833)) +- Support loading GT for evaluation from multi-file backend ([#867](https://github.com/open-mmlab/mmsegmentation/pull/867)) + +**New Features** + +- Support three real-time segmentation models (ICNet [#884](https://github.com/open-mmlab/mmsegmentation/pull/884), BiSeNetV1 [#851](https://github.com/open-mmlab/mmsegmentation/pull/851), and BiSeNetV2 [#804](https://github.com/open-mmlab/mmsegmentation/pull/804)) +- Support one efficient segmentation model (FastFCN [#885](https://github.com/open-mmlab/mmsegmentation/pull/885)) +- Support one efficient non-local/self-attention based segmentation model (ISANet [#70](https://github.com/open-mmlab/mmsegmentation/pull/70)) +- Support COCO-Stuff 10k and 164k datasets ([#625](https://github.com/open-mmlab/mmsegmentation/pull/625)) +- Support evaluate concated dataset separately ([#833](https://github.com/open-mmlab/mmsegmentation/pull/833)) + +**Improvements** + +- Support loading GT for evaluation from multi-file backend ([#867](https://github.com/open-mmlab/mmsegmentation/pull/867)) +- Auto-convert SyncBN to BN when training on DP automatly([#772](https://github.com/open-mmlab/mmsegmentation/pull/772)) +- Refactor Swin-Transformer ([#800](https://github.com/open-mmlab/mmsegmentation/pull/800)) + +**Bug Fixes** + +- Update mmcv installation in dockerfile ([#860](https://github.com/open-mmlab/mmsegmentation/pull/860)) +- Fix number of iteration bug when resuming checkpoint in distributed train ([#866](https://github.com/open-mmlab/mmsegmentation/pull/866)) +- Fix parsing parse in val_step ([#906](https://github.com/open-mmlab/mmsegmentation/pull/906)) + +### V0.17 (09/01/2021) + +**Highlights** + +- Support SegFormer +- Support DPT +- Support Dark Zurich and Nighttime Driving datasets +- Support progressive evaluation + +**New Features** + +- Support SegFormer ([#599](https://github.com/open-mmlab/mmsegmentation/pull/599)) +- Support DPT ([#605](https://github.com/open-mmlab/mmsegmentation/pull/605)) +- Support Dark Zurich and Nighttime Driving datasets ([#815](https://github.com/open-mmlab/mmsegmentation/pull/815)) +- Support progressive evaluation ([#709](https://github.com/open-mmlab/mmsegmentation/pull/709)) + +**Improvements** + +- Add multiscale_output interface and unittests for HRNet ([#830](https://github.com/open-mmlab/mmsegmentation/pull/830)) +- Support inherit cityscapes dataset ([#750](https://github.com/open-mmlab/mmsegmentation/pull/750)) +- Fix some typos in README.md ([#824](https://github.com/open-mmlab/mmsegmentation/pull/824)) +- Delete convert function and add instruction to ViT/Swin README.md ([#791](https://github.com/open-mmlab/mmsegmentation/pull/791)) +- Add vit/swin/mit convert weight scripts ([#783](https://github.com/open-mmlab/mmsegmentation/pull/783)) +- Add copyright files ([#796](https://github.com/open-mmlab/mmsegmentation/pull/796)) + +**Bug Fixes** + +- Fix invalid checkpoint link in inference_demo.ipynb ([#814](https://github.com/open-mmlab/mmsegmentation/pull/814)) +- Ensure that items in dataset have the same order across multi machine ([#780](https://github.com/open-mmlab/mmsegmentation/pull/780)) +- Fix the log error ([#766](https://github.com/open-mmlab/mmsegmentation/pull/766)) + +### V0.16 (08/04/2021) + +**Highlights** + +- Support PyTorch 1.9 +- Support SegFormer backbone MiT +- Support md2yml pre-commit hook +- Support frozen stage for HRNet + +**New Features** + +- Support SegFormer backbone MiT ([#594](https://github.com/open-mmlab/mmsegmentation/pull/594)) +- Support md2yml pre-commit hook ([#732](https://github.com/open-mmlab/mmsegmentation/pull/732)) +- Support mim ([#717](https://github.com/open-mmlab/mmsegmentation/pull/717)) +- Add mmseg2torchserve tool ([#552](https://github.com/open-mmlab/mmsegmentation/pull/552)) + +**Improvements** + +- Support hrnet frozen stage ([#743](https://github.com/open-mmlab/mmsegmentation/pull/743)) +- Add template of reimplementation questions ([#741](https://github.com/open-mmlab/mmsegmentation/pull/741)) +- Output pdf and epub formats for readthedocs ([#742](https://github.com/open-mmlab/mmsegmentation/pull/742)) +- Refine the docstring of ResNet ([#723](https://github.com/open-mmlab/mmsegmentation/pull/723)) +- Replace interpolate with resize ([#731](https://github.com/open-mmlab/mmsegmentation/pull/731)) +- Update resource limit ([#700](https://github.com/open-mmlab/mmsegmentation/pull/700)) +- Update config.md ([#678](https://github.com/open-mmlab/mmsegmentation/pull/678)) + +**Bug Fixes** + +- Fix ATTENTION registry ([#729](https://github.com/open-mmlab/mmsegmentation/pull/729)) +- Fix analyze log script ([#716](https://github.com/open-mmlab/mmsegmentation/pull/716)) +- Fix doc api display ([#725](https://github.com/open-mmlab/mmsegmentation/pull/725)) +- Fix patch_embed and pos_embed mismatch error ([#685](https://github.com/open-mmlab/mmsegmentation/pull/685)) +- Fix efficient test for multi-node ([#707](https://github.com/open-mmlab/mmsegmentation/pull/707)) +- Fix init_cfg in resnet backbone ([#697](https://github.com/open-mmlab/mmsegmentation/pull/697)) +- Fix efficient test bug ([#702](https://github.com/open-mmlab/mmsegmentation/pull/702)) +- Fix url error in config docs ([#680](https://github.com/open-mmlab/mmsegmentation/pull/680)) +- Fix mmcv installation ([#676](https://github.com/open-mmlab/mmsegmentation/pull/676)) +- Fix torch version ([#670](https://github.com/open-mmlab/mmsegmentation/pull/670)) + +**Contributors** + +@sshuair @xiexinch @Junjun2016 @mmeendez8 @xvjiarui @sennnnn @puhsu @BIGWangYuDong @keke1u @daavoo + +### V0.15 (07/04/2021) + +**Highlights** + +- Support ViT, SETR, and Swin-Transformer +- Add Chinese documentation +- Unified parameter initialization + +**Bug Fixes** + +- Fix typo and links ([#608](https://github.com/open-mmlab/mmsegmentation/pull/608)) +- Fix Dockerfile ([#607](https://github.com/open-mmlab/mmsegmentation/pull/607)) +- Fix ViT init ([#609](https://github.com/open-mmlab/mmsegmentation/pull/609)) +- Fix mmcv version compatible table ([#658](https://github.com/open-mmlab/mmsegmentation/pull/658)) +- Fix model links of DMNEt ([#660](https://github.com/open-mmlab/mmsegmentation/pull/660)) + +**New Features** + +- Support loading DeiT weights ([#538](https://github.com/open-mmlab/mmsegmentation/pull/538)) +- Support SETR ([#531](https://github.com/open-mmlab/mmsegmentation/pull/531), [#635](https://github.com/open-mmlab/mmsegmentation/pull/635)) +- Add config and models for ViT backbone with UperHead ([#520](https://github.com/open-mmlab/mmsegmentation/pull/531), [#635](https://github.com/open-mmlab/mmsegmentation/pull/520)) +- Support Swin-Transformer ([#511](https://github.com/open-mmlab/mmsegmentation/pull/511)) +- Add higher accuracy FastSCNN ([#606](https://github.com/open-mmlab/mmsegmentation/pull/606)) +- Add Chinese documentation ([#666](https://github.com/open-mmlab/mmsegmentation/pull/666)) + +**Improvements** + +- Unified parameter initialization ([#567](https://github.com/open-mmlab/mmsegmentation/pull/567)) +- Separate CUDA and CPU in github action CI ([#602](https://github.com/open-mmlab/mmsegmentation/pull/602)) +- Support persistent dataloader worker ([#646](https://github.com/open-mmlab/mmsegmentation/pull/646)) +- Update meta file fields ([#661](https://github.com/open-mmlab/mmsegmentation/pull/661), [#664](https://github.com/open-mmlab/mmsegmentation/pull/664)) + +### V0.14 (06/02/2021) + +**Highlights** + +- Support ONNX to TensorRT +- Support MIM + +**Bug Fixes** + +- Fix ONNX to TensorRT verify ([#547](https://github.com/open-mmlab/mmsegmentation/pull/547)) +- Fix save best for EvalHook ([#575](https://github.com/open-mmlab/mmsegmentation/pull/575)) + +**New Features** + +- Support loading DeiT weights ([#538](https://github.com/open-mmlab/mmsegmentation/pull/538)) +- Support ONNX to TensorRT ([#542](https://github.com/open-mmlab/mmsegmentation/pull/542)) +- Support output results for ADE20k ([#544](https://github.com/open-mmlab/mmsegmentation/pull/544)) +- Support MIM ([#549](https://github.com/open-mmlab/mmsegmentation/pull/549)) + +**Improvements** + +- Add option for ViT output shape ([#530](https://github.com/open-mmlab/mmsegmentation/pull/530)) +- Infer batch size using len(result) ([#532](https://github.com/open-mmlab/mmsegmentation/pull/532)) +- Add compatible table between MMSeg and MMCV ([#558](https://github.com/open-mmlab/mmsegmentation/pull/558)) + +### V0.13 (05/05/2021) + +**Highlights** + +- Support Pascal Context Class-59 dataset. +- Support Visual Transformer Backbone. +- Support mFscore metric. + +**Bug Fixes** + +- Fixed Colaboratory tutorial ([#451](https://github.com/open-mmlab/mmsegmentation/pull/451)) +- Fixed mIoU calculation range ([#471](https://github.com/open-mmlab/mmsegmentation/pull/471)) +- Fixed sem_fpn, unet README.md ([#492](https://github.com/open-mmlab/mmsegmentation/pull/492)) +- Fixed `num_classes` in FCN for Pascal Context 60-class dataset ([#488](https://github.com/open-mmlab/mmsegmentation/pull/488)) +- Fixed FP16 inference ([#497](https://github.com/open-mmlab/mmsegmentation/pull/497)) + +**New Features** + +- Support dynamic export and visualize to pytorch2onnx ([#463](https://github.com/open-mmlab/mmsegmentation/pull/463)) +- Support export to torchscript ([#469](https://github.com/open-mmlab/mmsegmentation/pull/469), [#499](https://github.com/open-mmlab/mmsegmentation/pull/499)) +- Support Pascal Context Class-59 dataset ([#459](https://github.com/open-mmlab/mmsegmentation/pull/459)) +- Support Visual Transformer backbone ([#465](https://github.com/open-mmlab/mmsegmentation/pull/465)) +- Support UpSample Neck ([#512](https://github.com/open-mmlab/mmsegmentation/pull/512)) +- Support mFscore metric ([#509](https://github.com/open-mmlab/mmsegmentation/pull/509)) + +**Improvements** + +- Add more CI for PyTorch ([#460](https://github.com/open-mmlab/mmsegmentation/pull/460)) +- Add print model graph args for tools/print_config.py ([#451](https://github.com/open-mmlab/mmsegmentation/pull/451)) +- Add cfg links in modelzoo README.md ([#468](https://github.com/open-mmlab/mmsegmentation/pull/469)) +- Add BaseSegmentor import to segmentors/__init__.py ([#495](https://github.com/open-mmlab/mmsegmentation/pull/495)) +- Add MMOCR, MMGeneration links ([#501](https://github.com/open-mmlab/mmsegmentation/pull/501), [#506](https://github.com/open-mmlab/mmsegmentation/pull/506)) +- Add Chinese QR code ([#506](https://github.com/open-mmlab/mmsegmentation/pull/506)) +- Use MMCV MODEL_REGISTRY ([#515](https://github.com/open-mmlab/mmsegmentation/pull/515)) +- Add ONNX testing tools ([#498](https://github.com/open-mmlab/mmsegmentation/pull/498)) +- Replace data_dict calling 'img' key to support MMDet3D ([#514](https://github.com/open-mmlab/mmsegmentation/pull/514)) +- Support reading class_weight from file in loss function ([#513](https://github.com/open-mmlab/mmsegmentation/pull/513)) +- Make tags as comment ([#505](https://github.com/open-mmlab/mmsegmentation/pull/505)) +- Use MMCV EvalHook ([#438](https://github.com/open-mmlab/mmsegmentation/pull/438)) + +### V0.12 (04/03/2021) + +**Highlights** + +- Support FCN-Dilate 6 model. +- Support Dice Loss. + +**Bug Fixes** + +- Fixed PhotoMetricDistortion Doc ([#388](https://github.com/open-mmlab/mmsegmentation/pull/388)) +- Fixed install scripts ([#399](https://github.com/open-mmlab/mmsegmentation/pull/399)) +- Fixed Dice Loss multi-class ([#417](https://github.com/open-mmlab/mmsegmentation/pull/417)) + +**New Features** + +- Support Dice Loss ([#396](https://github.com/open-mmlab/mmsegmentation/pull/396)) +- Add plot logs tool ([#426](https://github.com/open-mmlab/mmsegmentation/pull/426)) +- Add opacity option to show_result ([#425](https://github.com/open-mmlab/mmsegmentation/pull/425)) +- Speed up mIoU metric ([#430](https://github.com/open-mmlab/mmsegmentation/pull/430)) + +**Improvements** + +- Refactor unittest file structure ([#440](https://github.com/open-mmlab/mmsegmentation/pull/440)) +- Fix typos in the repo ([#449](https://github.com/open-mmlab/mmsegmentation/pull/449)) +- Include class-level metrics in the log ([#445](https://github.com/open-mmlab/mmsegmentation/pull/445)) + +### V0.11 (02/02/2021) + +**Highlights** + +- Support memory efficient test, add more UNet models. + +**Bug Fixes** + +- Fixed TTA resize scale ([#334](https://github.com/open-mmlab/mmsegmentation/pull/334)) +- Fixed CI for pip 20.3 ([#307](https://github.com/open-mmlab/mmsegmentation/pull/307)) +- Fixed ADE20k test ([#359](https://github.com/open-mmlab/mmsegmentation/pull/359)) + +**New Features** + +- Support memory efficient test ([#330](https://github.com/open-mmlab/mmsegmentation/pull/330)) +- Add more UNet benchmarks ([#324](https://github.com/open-mmlab/mmsegmentation/pull/324)) +- Support Lovasz Loss ([#351](https://github.com/open-mmlab/mmsegmentation/pull/351)) + +**Improvements** + +- Move train_cfg/test_cfg inside model ([#341](https://github.com/open-mmlab/mmsegmentation/pull/341)) + +### V0.10 (01/01/2021) + +**Highlights** + +- Support MobileNetV3, DMNet, APCNet. Add models of ResNet18V1b, ResNet18V1c, ResNet50V1b. + +**Bug Fixes** + +- Fixed CPU TTA ([#276](https://github.com/open-mmlab/mmsegmentation/pull/276)) +- Fixed CI for pip 20.3 ([#307](https://github.com/open-mmlab/mmsegmentation/pull/307)) + +**New Features** + +- Add ResNet18V1b, ResNet18V1c, ResNet50V1b, ResNet101V1b models ([#316](https://github.com/open-mmlab/mmsegmentation/pull/316)) +- Support MobileNetV3 ([#268](https://github.com/open-mmlab/mmsegmentation/pull/268)) +- Add 4 retinal vessel segmentation benchmark ([#315](https://github.com/open-mmlab/mmsegmentation/pull/315)) +- Support DMNet ([#313](https://github.com/open-mmlab/mmsegmentation/pull/313)) +- Support APCNet ([#299](https://github.com/open-mmlab/mmsegmentation/pull/299)) + +**Improvements** + +- Refactor Documentation page ([#311](https://github.com/open-mmlab/mmsegmentation/pull/311)) +- Support resize data augmentation according to original image size ([#291](https://github.com/open-mmlab/mmsegmentation/pull/291)) + +### V0.9 (30/11/2020) + +**Highlights** + +- Support 4 medical dataset, UNet and CGNet. + +**New Features** + +- Support RandomRotate transform ([#215](https://github.com/open-mmlab/mmsegmentation/pull/215), [#260](https://github.com/open-mmlab/mmsegmentation/pull/260)) +- Support RGB2Gray transform ([#227](https://github.com/open-mmlab/mmsegmentation/pull/227)) +- Support Rerange transform ([#228](https://github.com/open-mmlab/mmsegmentation/pull/228)) +- Support ignore_index for BCE loss ([#210](https://github.com/open-mmlab/mmsegmentation/pull/210)) +- Add modelzoo statistics ([#263](https://github.com/open-mmlab/mmsegmentation/pull/263)) +- Support Dice evaluation metric ([#225](https://github.com/open-mmlab/mmsegmentation/pull/225)) +- Support Adjust Gamma transform ([#232](https://github.com/open-mmlab/mmsegmentation/pull/232)) +- Support CLAHE transform ([#229](https://github.com/open-mmlab/mmsegmentation/pull/229)) + +**Bug Fixes** + +- Fixed detail API link ([#267](https://github.com/open-mmlab/mmsegmentation/pull/267)) + +### V0.8 (03/11/2020) + +**Highlights** + +- Support 4 medical dataset, UNet and CGNet. + +**New Features** + +- Support customize runner ([#118](https://github.com/open-mmlab/mmsegmentation/pull/118)) +- Support UNet ([#161](https://github.com/open-mmlab/mmsegmentation/pull/162)) +- Support CHASE_DB1, DRIVE, STARE, HRD ([#203](https://github.com/open-mmlab/mmsegmentation/pull/203)) +- Support CGNet ([#223](https://github.com/open-mmlab/mmsegmentation/pull/223)) + +### V0.7 (07/10/2020) + +**Highlights** + +- Support Pascal Context dataset and customizing class dataset. + +**Bug Fixes** + +- Fixed CPU inference ([#153](https://github.com/open-mmlab/mmsegmentation/pull/153)) + +**New Features** + +- Add DeepLab OS16 models ([#154](https://github.com/open-mmlab/mmsegmentation/pull/154)) +- Support Pascal Context dataset ([#133](https://github.com/open-mmlab/mmsegmentation/pull/133)) +- Support customizing dataset classes ([#71](https://github.com/open-mmlab/mmsegmentation/pull/71)) +- Support customizing dataset palette ([#157](https://github.com/open-mmlab/mmsegmentation/pull/157)) + +**Improvements** + +- Support 4D tensor output in ONNX ([#150](https://github.com/open-mmlab/mmsegmentation/pull/150)) +- Remove redundancies in ONNX export ([#160](https://github.com/open-mmlab/mmsegmentation/pull/160)) +- Migrate to MMCV DepthwiseSeparableConv ([#158](https://github.com/open-mmlab/mmsegmentation/pull/158)) +- Migrate to MMCV collect_env ([#137](https://github.com/open-mmlab/mmsegmentation/pull/137)) +- Use img_prefix and seg_prefix for loading ([#153](https://github.com/open-mmlab/mmsegmentation/pull/153)) + +### V0.6 (10/09/2020) + +**Highlights** + +- Support new methods i.e. MobileNetV2, EMANet, DNL, PointRend, Semantic FPN, Fast-SCNN, ResNeSt. + +**Bug Fixes** + +- Fixed sliding inference ONNX export ([#90](https://github.com/open-mmlab/mmsegmentation/pull/90)) + +**New Features** + +- Support MobileNet v2 ([#86](https://github.com/open-mmlab/mmsegmentation/pull/86)) +- Support EMANet ([#34](https://github.com/open-mmlab/mmsegmentation/pull/34)) +- Support DNL ([#37](https://github.com/open-mmlab/mmsegmentation/pull/37)) +- Support PointRend ([#109](https://github.com/open-mmlab/mmsegmentation/pull/109)) +- Support Semantic FPN ([#94](https://github.com/open-mmlab/mmsegmentation/pull/94)) +- Support Fast-SCNN ([#58](https://github.com/open-mmlab/mmsegmentation/pull/58)) +- Support ResNeSt backbone ([#47](https://github.com/open-mmlab/mmsegmentation/pull/47)) +- Support ONNX export (experimental) ([#12](https://github.com/open-mmlab/mmsegmentation/pull/12)) + +**Improvements** + +- Support Upsample in ONNX ([#100](https://github.com/open-mmlab/mmsegmentation/pull/100)) +- Support Windows install (experimental) ([#75](https://github.com/open-mmlab/mmsegmentation/pull/75)) +- Add more OCRNet results ([#20](https://github.com/open-mmlab/mmsegmentation/pull/20)) +- Add PyTorch 1.6 CI ([#64](https://github.com/open-mmlab/mmsegmentation/pull/64)) +- Get version and githash automatically ([#55](https://github.com/open-mmlab/mmsegmentation/pull/55)) + +### v0.5.1 (11/08/2020) + +**Highlights** + +- Support FP16 and more generalized OHEM + +**Bug Fixes** + +- Fixed Pascal VOC conversion script (#19) +- Fixed OHEM weight assign bug (#54) +- Fixed palette type when palette is not given (#27) + +**New Features** + +- Support FP16 (#21) +- Generalized OHEM (#54) + +**Improvements** + +- Add load-from flag (#33) +- Fixed training tricks doc about different learning rates of model (#26) diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md new file mode 100644 index 0000000000..4868d138ac --- /dev/null +++ b/docs/en/notes/faq.md @@ -0,0 +1,39 @@ +# Frequently Asked Questions (FAQ) + +We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, please create an issue using the [provided templates](https://github.com/open-mmlab/mmsegmentation/blob/master/.github/ISSUE_TEMPLATE/error-report.md/) and make sure you fill in all required information in the template. + +## Installation + +The compatible MMSegmentation and MMCV versions are as below. Please install the correct version of MMCV to avoid installation issues. + +| MMSegmentation version | MMCV version | MMClassification (optional) version | MMDetection (optional) version | +| :--------------------: | :-------------------------: | :---------------------------------: | :----------------------------: | +| 1.0.0rc2 | mmcv >= 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4 | +| 1.0.0rc1 | mmcv >= 2.0.0rc1 | mmcls>=1.0.0rc0 | Not required | +| 1.0.0rc0 | mmcv >= 2.0.0rc1 | mmcls>=1.0.0rc0 | Not required | +| master | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.24.1 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.23.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.22.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.21.1 | mmcv-full>=1.4.4, \<=1.6.0 | Not required | Not required | +| 0.20.2 | mmcv-full>=1.3.13, \<=1.6.0 | Not required | Not required | +| 0.19.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | +| 0.18.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | +| 0.17.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.16.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.15.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.14.1 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.14.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | +| 0.13.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | +| 0.12.0 | mmcv-full>=1.1.4, \<1.3.2 | Not required | Not required | +| 0.11.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.10.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.9.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.8.0 | mmcv-full>=1.1.4, \<1.2.0 | Not required | Not required | +| 0.7.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | +| 0.6.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | + +## How to know the number of GPUs needed to train the model + +- Infer from the name of the config file of the model. You can refer to the `Config Name Style` part of [Learn about Configs](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/tutorials/config.md). For example, for config file with name `segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py`, `8xb1` means training the model corresponding to it needs 8 GPUs, and the batch size of each GPU is 1. +- Infer from the log file. Open the log file of the model and search `nGPU` in the file. The number of figures following `nGPU` is the number of GPUs needed to train the model. For instance, searching for `nGPU` in the log file yields the record `nGPU 0,1,2,3,4,5,6,7`, which indicates that eight GPUs are needed to train the model. diff --git a/docs/en/overview.md b/docs/en/overview.md new file mode 100644 index 0000000000..399f343fd4 --- /dev/null +++ b/docs/en/overview.md @@ -0,0 +1,85 @@ +# Overview + +This chapter introduces you to the framework of MMSegmentation, and the basic conception of semantic segmentation. It also provides links to detailed tutorials about MMSegmentation. + +## What is semantic segmentation? + +Semantic segmentation is the task of clustering parts of an image together that belong to the same object class. +It is a form of pixel-level prediction because each pixel in an image is classified according to a category. +Some example benchmarks for this task are [Cityscapes](https://www.cityscapes-dataset.com/benchmarks/), [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/) and [ADE20K](https://groups.csail.mit.edu/vision/datasets/ADE20K/). +Models are usually evaluated with the Mean Intersection-Over-Union (Mean IoU) and Pixel Accuracy metrics. + +## What is MMSegmentation? + +MMSegmentation is a toolbox that provides a framework for unified implementation and evaluation of semant +ic segmentation methods, +and contains high-quality implementations of popular semantic segmentation methods and datasets. + +MMSeg consists of 7 main parts including apis, structures, datasets, models, engine, evaluation and visualization. + +- **apis** provides high-level APIs for model inference. + +- **structures** provides segmentation data structure `SegDataSample`. + +- **datasets** supports various datasets for semantic segmentation. + + - **transforms** contains a lot of useful data augmentation transforms. + +- **models** is the most vital part for segmentors and contains different components of a segmentor. + + - **segmentors** defines all of the segmentation model classes. + - **data_preprocessors** works for preprocessing the input data of the model. + - **backbones** contains various backbone networks that transform an image to feature maps. + - **necks** contains various neck components that connect the backbone and heads. + - **decode_heads** contains various head components that take feature map as input and predict segmentation results. + - **losses** contains various loss functions. + +- **engine** is a part for runtime components that extends function of [MMEngine](https://github.com/open-mmlab/mmengine). + + - **optimizers** provides optimizers and optimizer wrappers. + - **hooks** provides various hooks of the runner. + +- **evaluation** provides different metrics for evaluating model performance. + +- **visualization** is for visualizing segmentation results. + +## How to use this documentation + +Here is a detailed step-by-step guide to learn more about MMSegmentation: + +1. For installation instructions, please see [get_started](getting_started.md). + +2. For beginners, MMSegmentation is the best place to start the journey of semantic segmentation + as there are many SOTA and classic segmentation [models](model_zoo.md), + and it is easier to carry out a segmentation task by plugging together building blocks and convenient high-level apis. + Refer to the tutorials below for the basic usage of MMSegmentation: + + - [Config](user_guides/1_config.md) + - [Dataset Preparation](user_guides/2_dataset_prepare.md) + - [Inference](user_guides/3_inference.md) + - [Train and Test](user_guides/4_train_test.md) + +3. If you would like to learn about the fundamental classes and features that make MMSegmentation work, + please refer to the tutorials below to dive deeper: + + - [Data flow](advanced_guides/data_flow.md) + - [Structures](advanced_guides/structures.md) + - [Models](advanced_guides/models.md) + - [Datasets](advanced_guides/datasets.md) + - [Evaluation](advanced_guides/evaluation.md) + +4. MMSegmentation also provide tutorials for customization and advanced research, + please refer to the below guides to build your own segmentation project: + + - [Add new models](advanced_guides/add_models.md) + - [Add new datasets](advanced_guides/add_dataset.md) + - [Add new transforms](advanced_guides/add_transform.md) + - [Customize runtime](advanced_guides/customize_runtime.md) + +5. If you are more familiar with MMSegmentation v0.x, there is documentation about migration from MMSegmentation v0.x to v1.x + + - [migration](migration/index.rst) + +## References + +- https://paperswithcode.com/task/semantic-segmentation/codeless#task-home diff --git a/docs/en/stat.py b/docs/en/stat.py new file mode 100755 index 0000000000..c458ee3c1e --- /dev/null +++ b/docs/en/stat.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import functools as func +import glob +import os.path as osp +import re + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + +files = sorted(glob.glob('../../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../../', url_prefix)) + + with open(f) as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('#', '').strip() + ckpts = { + x.lower().strip() + for x in re.findall(r'https?://download.*\.pth', content) + if 'mmsegmentation' in x + } + if len(ckpts) == 0: + continue + + _papertype = [ + x for x in re.findall(r'', content) + ] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = {(papertype, title)} + + titles.append(title) + num_ckpts += len(ckpts) + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Model Zoo Statistics + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md new file mode 100644 index 0000000000..80e30dc3ae --- /dev/null +++ b/docs/en/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/docs/en/user_guides/1_config.md b/docs/en/user_guides/1_config.md new file mode 100644 index 0000000000..54c1f36363 --- /dev/null +++ b/docs/en/user_guides/1_config.md @@ -0,0 +1,588 @@ +# Tutorial 1: Learn about Configs + +We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. +If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. +You may also pass `--cfg-options xxx.yyy=zzz` to see updated config. + +## Config File Structure + +There are 4 basic component types under `config/_base_`, datasets, models, schedules, default_runtime. +Many methods could be easily constructed with one of each like DeepLabV3, PSPNet. +The configs that are composed by components from `_base_` are called _primitive_. + +For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. + +For easy understanding, we recommend contributors to inherit from existing methods. +For example, if some modification is made base on DeepLabV3, user may first inherit the basic DeepLabV3 structure by specifying `_base_ = ../deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py`, then modify the necessary fields in the config files. + +If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder `xxxnet` under `configs`, + +Please refer to [mmengine](https://mmengine.readthedocs.io/en/latest/tutorials/config.html) for detailed documentation. + +## Config Name Style + +We follow the below style to name config files. Contributors are advised to follow the same style. + +```text +{algorithm name}_{model component names [component1]_[component2]_[...]}_{training settings}_{training dataset information}_{testing dataset information} +``` + +The file name is divided to five parts. All parts and components are connected with `_` and words of each part or component should be connected with `-`. + +- `{algorithm name}`: The name of the algorithm, such as `deeplabv3`, `pspnet`, etc. +- `{model component names}`: Names of the components used in the algorithm such as backbone, head, etc. For example, `r50-d8` means using ResNet50 backbone and use output of backbone is 8 times downsampling as input. +- `{training settings}`: Information of training settings such as batch size, augmentations, loss, learning rate scheduler, and epochs/iterations. For example: `4xb4-ce-linearlr-40K` means using 4-gpus x 4-images-per-gpu, CrossEntropy loss, Linear learning rate scheduler, and train 40K iterations. + Some abbreviations: + - `{gpu x batch_per_gpu}`: GPUs and samples per GPU. `bN` indicates N batch size per GPU. E.g. `8xb2` is the short term of 8-gpus x 2-images-per-gpu. And `4xb4` is used by default if not mentioned. + - `{schedule}`: training schedule, options are `20k`, `40k`, etc. `20k` and `40k` means 20000 iterations and 40000 iterations respectively. +- `{training dataset information}`: Training dataset names like `cityscapes`, `ade20k`, etc, and input resolutions. For example: `cityscapes-768x768` means training on `cityscapes` dataset and the input shape is `768x768`. +- `{testing dataset information}` (optional): Testing dataset name for models trained on one dataset but tested on another. If not mentioned, it means the model was trained and tested on the same dataset type. + +## An Example of PSPNet + +To help the users have a basic idea of a complete config and the modules in a modern semantic segmentation system, +we make brief comments on the config of PSPNet using ResNet50V1c as the following. +For more detailed usage and the corresponding alternative for each module, please refer to the API documentation. + +```python +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] # base config file which we build new config file on. +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +``` + +`_base_/models/pspnet_r50-d8.py` is a basic model cfg file for PSPNet using ResNet50V1c + +```python +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) # Segmentation usually uses SyncBN +data_preprocessor = dict( # The config of data preprocessor, usually includes image normalization and augmentation. + type='SegDataPreProcessor', # The type of data preprocessor. + mean=[123.675, 116.28, 103.53], # Mean values used for normalizing the input images. + std=[58.395, 57.12, 57.375], # Standard variance used for normalizing the input images. + bgr_to_rgb=True, # Whether to convert image from BGR to RGB. + pad_val=0, # Padding value of image. + seg_pad_val=255) # Padding value of segmentation map. +model = dict( + type='EncoderDecoder', # Name of segmentor + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', # The ImageNet pretrained backbone to be loaded + backbone=dict( + type='ResNetV1c', # The type of backbone. Please refer to mmseg/models/backbones/resnet.py for details. + depth=50, # Depth of backbone. Normally 50, 101 are used. + num_stages=4, # Number of stages of backbone. + out_indices=(0, 1, 2, 3), # The index of output feature maps produced in each stages. + dilations=(1, 1, 2, 4), # The dilation rate of each layer. + strides=(1, 2, 1, 1), # The stride of each layer. + norm_cfg=norm_cfg, # The configuration of norm layer. + norm_eval=False, # Whether to freeze the statistics in BN + style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. + contract_dilation=True), # When dilation > 1, whether contract first layer of dilation. + decode_head=dict( + type='PSPHead', # Type of decode head. Please refer to mmseg/models/decode_heads for available options. + in_channels=2048, # Input channel of decode head. + in_index=3, # The index of feature map to select. + channels=512, # The intermediate channels of decode head. + pool_scales=(1, 2, 3, 6), # The avg pooling scales of PSPHead. Please refer to paper for details. + dropout_ratio=0.1, # The dropout ratio before final classification layer. + num_classes=19, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. + norm_cfg=norm_cfg, # The configuration of norm layer. + align_corners=False, # The align_corners argument for resize in decoding. + loss_decode=dict( # Config of loss function for the decode_head. + type='CrossEntropyLoss', # Type of loss used for segmentation. + use_sigmoid=False, # Whether use sigmoid activation for segmentation. + loss_weight=1.0)), # Loss weight of decode_head. + auxiliary_head=dict( + type='FCNHead', # Type of auxiliary head. Please refer to mmseg/models/decode_heads for available options. + in_channels=1024, # Input channel of auxiliary head. + in_index=2, # The index of feature map to select. + channels=256, # The intermediate channels of decode head. + num_convs=1, # Number of convs in FCNHead. It is usually 1 in auxiliary head. + concat_input=False, # Whether concat output of convs with input before classification layer. + dropout_ratio=0.1, # The dropout ratio before final classification layer. + num_classes=19, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. + norm_cfg=norm_cfg, # The configuration of norm layer. + align_corners=False, # The align_corners argument for resize in decoding. + loss_decode=dict( # Config of loss function for the auxiliary_head. + type='CrossEntropyLoss', # Type of loss used for segmentation. + use_sigmoid=False, # Whether use sigmoid activation for segmentation. + loss_weight=0.4)), # Loss weight of auxiliary_head. + # model training and testing settings + train_cfg=dict(), # train_cfg is just a place holder for now. + test_cfg=dict(mode='whole')) # The test mode, options are 'whole' and 'slide'. 'whole': whole image fully-convolutional test. 'slide': sliding crop window on the image. +``` + +`_base_/datasets/cityscapes.py` is the configuration file of the dataset + +```python +# dataset settings +dataset_type = 'CityscapesDataset' # Dataset type, this will be used to define the dataset. +data_root = 'data/cityscapes/' # Root path of data. +crop_size = (512, 1024) # The crop size during training. +train_pipeline = [ # Training pipeline. + dict(type='LoadImageFromFile'), # First pipeline to load images from file path. + dict(type='LoadAnnotations'), # Second pipeline to load annotations for current image. + dict(type='RandomResize', # Augmentation pipeline that resize the images and their annotations. + scale=(2048, 1024), # The scale of image. + ratio_range=(0.5, 2.0), # The augmented scale range as ratio. + keep_ratio=True), # Whether to keep the aspect ratio when resizing the image. + dict(type='RandomCrop', # Augmentation pipeline that randomly crop a patch from current image. + crop_size=crop_size, # The crop size of patch. + cat_max_ratio=0.75), # The max area ratio that could be occupied by single category. + dict(type='RandomFlip', # Augmentation pipeline that flip the images and their annotations + prob=0.5), # The ratio or probability to flip + dict(type='PhotoMetricDistortion'), # Augmentation pipeline that distort current image with several photo metric methods. + dict(type='PackSegInputs') # Pack the inputs data for the semantic segmentation. +] +test_pipeline = [ + dict(type='LoadImageFromFile'), # First pipeline to load images from file path + dict(type='Resize', # Use resize augmentation + scale=(2048, 1024), # Images scales for resizing. + keep_ratio=True), # Whether to keep the aspect ratio when resizing the image. + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), # Load annotations for semantic segmentation provided by dataset. + dict(type='PackSegInputs') # Pack the inputs data for the semantic segmentation. +] +train_dataloader = dict( # Train dataloader config + batch_size=2, # Batch size of a single GPU + num_workers=2, # Worker to pre-fetch data for each single GPU + persistent_workers=True, # Shut down the worker processes after an epoch end, which can accelerate training speed. + sampler=dict(type='InfiniteSampler', shuffle=True), # Randomly shuffle during training. + dataset=dict( # Train dataset config + type=dataset_type, # Type of dataset, refer to mmseg/datasets/ for details. + data_root=data_root, # The root of dataset. + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), # Prefix for training data. + pipeline=train_pipeline)) # Processing pipeline. This is passed by the train_pipeline created before. +val_dataloader = dict( + batch_size=1, # Batch size of a single GPU + num_workers=4, # Worker to pre-fetch data for each single GPU + persistent_workers=True, # Shut down the worker processes after an epoch end, which can accelerate testing speed. + sampler=dict(type='DefaultSampler', shuffle=False), # Not shuffle during validation and testing. + dataset=dict( # Test dataset config + type=dataset_type, # Type of dataset, refer to mmseg/datasets/ for details. + data_root=data_root, # The root of dataset. + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), # Prefix for testing data. + pipeline=test_pipeline)) # Processing pipeline. This is passed by the test_pipeline created before. +test_dataloader = val_dataloader +# The metric to measure the accuracy. Here, we use IoUMetric. +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator +``` + +`_base_/schedules/schedule_40k.py` + +```python +# optimizer +optimizer = dict(type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/default_constructor.py for more details + lr=0.01, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch + momentum=0.9, # Momentum + weight_decay=0.0005) # Weight decay of SGD +optim_wrapper = dict(type='OptimWrapper', # Optimizer wrapper provides a common interface for updating parameters. + optimizer=optimizer, # Optimizer used to update model parameters. + clip_grad=None) # If ``clip_grad`` is not None, it will be the arguments of ``torch.nn.utils.clip_grad``. +# learning policy +param_scheduler = [ + dict( + type='PolyLR', # The policy of scheduler, also support Step, CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py + eta_min=1e-4, # Minimum learning rate at the end of scheduling. + power=0.9, # The power of polynomial decay. + begin=0, # Step at which to start updating the parameters. + end=40000, # Step at which to stop updating the parameters. + by_epoch=False) # Whether count by epoch or not. +] +# training schedule for 40k iteration +train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +# default hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), # Log the time spent during iteration. + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), # Collect and write logs from different components of ``Runner``. + param_scheduler=dict(type='ParamSchedulerHook'), # update some hyper-parameters in optimizer, e.g., learning rate. + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), # Save checkpoints periodically. + sampler_seed=dict(type='DistSamplerSeedHook')) # Data-loading sampler for distributed training. +``` + +in `_base_/default_runtime.py` + +```python +# Set the default scope of the registry to mmseg. +default_scope = 'mmseg' +# environment +env_cfg = dict( + cudnn_benchmark=True, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) +log_level = 'INFO' +log_processor = dict(by_epoch=False) +load_from = None # Load checkpoint from file. +resume = False # Whether to resume from existed model. +``` + +These are all the configs for training and testing PSPNet, to load and parse them, we can use [Config](https://mmengine.readthedocs.io/en/latest/tutorials/config.html) implemented in [MMEngine](https://github.com/open-mmlab/mmengine) + +```python +from mmengine.config import Config + +cfg = Config.fromfile('configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py') +print(cfg.train_dataloader) +``` + +```shell +{'batch_size': 2, + 'num_workers': 2, + 'persistent_workers': True, + 'sampler': {'type': 'InfiniteSampler', 'shuffle': True}, + 'dataset': {'type': 'CityscapesDataset', + 'data_root': 'data/cityscapes/', + 'data_prefix': {'img_path': 'leftImg8bit/train', + 'seg_map_path': 'gtFine/train'}, + 'pipeline': [{'type': 'LoadImageFromFile'}, + {'type': 'LoadAnnotations'}, + {'type': 'RandomResize', + 'scale': (2048, 1024), + 'ratio_range': (0.5, 2.0), + 'keep_ratio': True}, + {'type': 'RandomCrop', 'crop_size': (512, 1024), 'cat_max_ratio': 0.75}, + {'type': 'RandomFlip', 'prob': 0.5}, + {'type': 'PhotoMetricDistortion'}, + {'type': 'PackSegInputs'}]}} +``` + +`cfg` is an instance of `mmengine.config.Config`, its interface is the same as a dict object and also allows access config values as attributes. See [config tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/config.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information. + +## FAQ + +### Ignore some fields in the base configs + +Sometimes, you may set `_delete_=True` to ignore some of the fields in base configs. +See [config tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/config.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for simple illustration. + +In MMSegmentation, for example, if you would like to modify the backbone of PSPNet with the following config file `pspnet.py`: + +```python +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) +``` + +Load and parse the config file `pspnet.py` in the code as follows: + +```python +from mmengine.config import Config + +cfg = Config.fromfile('pspnet.py') +print(cfg.model) +``` + +```shell +{'type': 'EncoderDecoder', + 'pretrained': 'torchvision://resnet50', + 'backbone': {'type': 'ResNetV1c', + 'depth': 50, + 'num_stages': 4, + 'out_indices': (0, 1, 2, 3), + 'dilations': (1, 1, 2, 4), + 'strides': (1, 2, 1, 1), + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'norm_eval': False, + 'style': 'pytorch', + 'contract_dilation': True}, + 'decode_head': {'type': 'PSPHead', + 'in_channels': 2048, + 'in_index': 3, + 'channels': 512, + 'pool_scales': (1, 2, 3, 6), + 'dropout_ratio': 0.1, + 'num_classes': 19, + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'align_corners': False, + 'loss_decode': {'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0}}} +``` + +`ResNet` and `HRNet` use different keywords to construct, write a new config file `hrnet.py` as follows: + +```python +_base_ = 'pspnet.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w32', + backbone=dict( + _delete_=True, + type='HRNet', + norm_cfg=norm_cfg, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))))) +``` + +Load and parse the config file `hrnet.py` in the code as follows: + +```python +from mmengine.config import Config +cfg = Config.fromfile('hrnet.py') +print(cfg.model) +``` + +```shell +{'type': 'EncoderDecoder', + 'pretrained': 'open-mmlab://msra/hrnetv2_w32', + 'backbone': {'type': 'HRNet', + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'extra': {'stage1': {'num_modules': 1, + 'num_branches': 1, + 'block': 'BOTTLENECK', + 'num_blocks': (4,), + 'num_channels': (64,)}, + 'stage2': {'num_modules': 1, + 'num_branches': 2, + 'block': 'BASIC', + 'num_blocks': (4, 4), + 'num_channels': (32, 64)}, + 'stage3': {'num_modules': 4, + 'num_branches': 3, + 'block': 'BASIC', + 'num_blocks': (4, 4, 4), + 'num_channels': (32, 64, 128)}, + 'stage4': {'num_modules': 3, + 'num_branches': 4, + 'block': 'BASIC', + 'num_blocks': (4, 4, 4, 4), + 'num_channels': (32, 64, 128, 256)}}}, + 'decode_head': {'type': 'PSPHead', + 'in_channels': 2048, + 'in_index': 3, + 'channels': 512, + 'pool_scales': (1, 2, 3, 6), + 'dropout_ratio': 0.1, + 'num_classes': 19, + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'align_corners': False, + 'loss_decode': {'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0}}} +``` + +The `_delete_=True` would replace all old keys in `backbone` field with new keys. + +### Use intermediate variables in configs + +Some intermediate variables are used in the configs files, like `train_pipeline`/`test_pipeline` in datasets. +It's worth noting that when modifying intermediate variables in the children configs, user need to pass the intermediate variables into corresponding fields again. +For example, we would like to change multi scale strategy to train/test a PSPNet. `train_pipeline`/`test_pipeline` are intermediate variable we would like to modify. + +```python +_base_ = '../pspnet/pspnet_r50-d8_4xb4-40k_cityscpaes-512x1024.py' +crop_size = (512, 1024) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='RandomResize', + img_scale=(2048, 1024), + ratio_range=(1., 2.), + keep_ration=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', + scale=(2048, 1024), + keep_ratio=True), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), + pipeline=train_pipeline) +test_dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), + pipeline=test_pipeline) +train_dataloader = dict(dataset=train_dataset) +val_dataloader = dict(dataset=test_dataset) +test_dataloader = val_dataloader +``` + +We first define the new `train_pipeline`/`test_pipeline` and pass them into `dataset`. + +Similarly, if we would like to switch from `SyncBN` to `BN` or `MMSyncBN`, we need to substitute every `norm_cfg` in the config. + +```python +_base_ = '../pspnet/pspnet_r50-d8_4xb4-40k_cityscpaes-512x1024.py' +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg), + decode_head=dict(norm_cfg=norm_cfg), + auxiliary_head=dict(norm_cfg=norm_cfg)) +``` + +## Modify config through script arguments + +In the [training script](https://github.com/open-mmlab/mmsegmentation/blob/1.x/tools/train.py) and the [testing script](https://github.com/open-mmlab/mmsegmentation/blob/1.x/tools/test.py), we support the script argument `--cfg-options`, it may help users override some settings in the used config, the key-value pair in `xxx=yyy` format will be merged into config file. + +For example, this is a simplified script `demo_script.py`: + +```python +import argparse + +from mmengine.config import Config, DictAction + +def parse_args(): + parser = argparse.ArgumentParser(description='Script Example') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + print(cfg) + +if __name__ == '__main__': + main() +``` + +A example config file `demo_config.py` as follows: + +```python +backbone = dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_eval=False, + style='pytorch', + contract_dilation=True) +``` + +Run `demo_script.py`: + +```shell +python demo_script.py demo_config.py +``` + +```shell +Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 2, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} +``` + +Modify config through script arguments: + +```shell +python demo_script.py demo_config.py --cfg-options backbone.depth=101 +``` + +```shell +Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 101, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 2, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} +``` + +- Update values of list/tuples. + + If the value to be updated is a list or a tuple. For example, the config file `demo_config.py` sets `strides=(1, 2, 1, 1)` in `backbone`. + If you want to change this key, you may specify in two ways: + + 1. `--cfg-options backbone.strides="(1, 1, 1, 1)"`. Note that the quotation mark " is necessary to support list/tuple data types. + + ```shell + python demo_script.py demo_config.py --cfg-options backbone.strides="(1, 1, 1, 1)" + ``` + + ```shell + Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 1, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} + ``` + + 2. `--cfg-options backbone.strides=1,1,1,1`. Note that **NO** white space is allowed in the specified value. + In addition, if the original type is tuple, it will be automatically converted to list after this way. + + ```shell + python demo_script.py demo_config.py --cfg-options backbone.strides=1,1,1,1 + ``` + + ```shell + Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': [1, 1, 1, 1], 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} + ``` + +```{note} + This modification method only supports modifying configuration items of string, int, float, boolean, None, list and tuple types. + More specifically, for list and tuple types, the elements inside them must also be one of the above seven types. +``` diff --git a/docs/en/user_guides/2_dataset_prepare.md b/docs/en/user_guides/2_dataset_prepare.md new file mode 100644 index 0000000000..5c24119ed7 --- /dev/null +++ b/docs/en/user_guides/2_dataset_prepare.md @@ -0,0 +1,500 @@ +## Prepare datasets + +It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +```none +mmsegmentation +├── mmseg +├── tools +├── configs +├── data +│ ├── cityscapes +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2012 +│ │ │ ├── JPEGImages +│ │ │ ├── SegmentationClass +│ │ │ ├── ImageSets +│ │ │ │ ├── Segmentation +│ │ ├── VOC2010 +│ │ │ ├── JPEGImages +│ │ │ ├── SegmentationClassContext +│ │ │ ├── ImageSets +│ │ │ │ ├── SegmentationContext +│ │ │ │ │ ├── train.txt +│ │ │ │ │ ├── val.txt +│ │ │ ├── trainval_merged.json +│ │ ├── VOCaug +│ │ │ ├── dataset +│ │ │ │ ├── cls +│ ├── ade +│ │ ├── ADEChallengeData2016 +│ │ │ ├── annotations +│ │ │ │ ├── training +│ │ │ │ ├── validation +│ │ │ ├── images +│ │ │ │ ├── training +│ │ │ │ ├── validation +│ ├── coco_stuff10k +│ │ ├── images +│ │ │ ├── train2014 +│ │ │ ├── test2014 +│ │ ├── annotations +│ │ │ ├── train2014 +│ │ │ ├── test2014 +│ │ ├── imagesLists +│ │ │ ├── train.txt +│ │ │ ├── test.txt +│ │ │ ├── all.txt +│ ├── coco_stuff164k +│ │ ├── images +│ │ │ ├── train2017 +│ │ │ ├── val2017 +│ │ ├── annotations +│ │ │ ├── train2017 +│ │ │ ├── val2017 +│ ├── CHASE_DB1 +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── DRIVE +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── HRF +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── STARE +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +| ├── dark_zurich +| │   ├── gps +| │   │   ├── val +| │   │   └── val_ref +| │   ├── gt +| │   │   └── val +| │   ├── LICENSE.txt +| │   ├── lists_file_names +| │   │   ├── val_filenames.txt +| │   │   └── val_ref_filenames.txt +| │   ├── README.md +| │   └── rgb_anon +| │   | ├── val +| │   | └── val_ref +| ├── NighttimeDrivingTest +| | ├── gtCoarse_daytime_trainvaltest +| | │   └── test +| | │   └── night +| | └── leftImg8bit +| | | └── test +| | | └── night +│ ├── loveDA +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ │ ├── test +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── potsdam +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── vaihingen +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── iSAID +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ │ ├── test +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── synapse +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +``` + +### Cityscapes + +The data could be found [here](https://www.cityscapes-dataset.com/downloads/) after registration. + +By convention, `**labelTrainIds.png` are used for cityscapes training. +We provided a [scripts](https://github.com/open-mmlab/mmsegmentation/blob/1.x/tools/dataset_converters/cityscapes.py) based on [cityscapesscripts](https://github.com/mcordts/cityscapesScripts) +to generate `**labelTrainIds.png`. + +```shell +# --nproc means 8 process for conversion, which could be omitted as well. +python tools/dataset_converters/cityscapes.py data/cityscapes --nproc 8 +``` + +### Pascal VOC + +Pascal VOC 2012 could be downloaded from [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). +Beside, most recent works on Pascal VOC dataset usually exploit extra augmentation data, which could be found [here](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz). + +If you would like to use augmented VOC dataset, please run following command to convert augmentation annotations into proper format. + +```shell +# --nproc means 8 process for conversion, which could be omitted as well. +python tools/dataset_converters/voc_aug.py data/VOCdevkit data/VOCdevkit/VOCaug --nproc 8 +``` + +Please refer to [concat dataset](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/advanced_guides/datasets.md) for details about how to concatenate them and train them together. + +### ADE20K + +The training and validation set of ADE20K could be download from this [link](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip). +We may also download test set from [here](http://data.csail.mit.edu/places/ADEchallenge/release_test.zip). + +### Pascal Context + +The training and validation set of Pascal Context could be download from [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar). You may also download test set from [here](http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2010test.tar) after registration. + +To split the training and validation set from original dataset, you may download trainval_merged.json from [here](https://codalabuser.blob.core.windows.net/public/trainval_merged.json). + +If you would like to use Pascal Context dataset, please install [Detail](https://github.com/zhanghang1989/detail-api) and then run the following command to convert annotations into proper format. + +```shell +python tools/dataset_converters/pascal_context.py data/VOCdevkit data/VOCdevkit/VOC2010/trainval_merged.json +``` + +### COCO Stuff 10k + +The data could be downloaded [here](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/cocostuff-10k-v1.1.zip) by wget. + +For COCO Stuff 10k dataset, please run the following commands to download and convert the dataset. + +```shell +# download +mkdir coco_stuff10k && cd coco_stuff10k +wget http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/cocostuff-10k-v1.1.zip + +# unzip +unzip cocostuff-10k-v1.1.zip + +# --nproc means 8 process for conversion, which could be omitted as well. +python tools/dataset_converters/coco_stuff10k.py /path/to/coco_stuff10k --nproc 8 +``` + +By convention, mask labels in `/path/to/coco_stuff164k/annotations/*2014/*_labelTrainIds.png` are used for COCO Stuff 10k training and testing. + +### COCO Stuff 164k + +For COCO Stuff 164k dataset, please run the following commands to download and convert the augmented dataset. + +```shell +# download +mkdir coco_stuff164k && cd coco_stuff164k +wget http://images.cocodataset.org/zips/train2017.zip +wget http://images.cocodataset.org/zips/val2017.zip +wget http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip + +# unzip +unzip train2017.zip -d images/ +unzip val2017.zip -d images/ +unzip stuffthingmaps_trainval2017.zip -d annotations/ + +# --nproc means 8 process for conversion, which could be omitted as well. +python tools/dataset_converters/coco_stuff164k.py /path/to/coco_stuff164k --nproc 8 +``` + +By convention, mask labels in `/path/to/coco_stuff164k/annotations/*2017/*_labelTrainIds.png` are used for COCO Stuff 164k training and testing. + +The details of this dataset could be found at [here](https://github.com/nightrome/cocostuff#downloads). + +### CHASE DB1 + +The training and validation set of CHASE DB1 could be download from [here](https://staffnet.kingston.ac.uk/~ku15565/CHASE_DB1/assets/CHASEDB1.zip). + +To convert CHASE DB1 dataset to MMSegmentation format, you should run the following command: + +```shell +python tools/dataset_converters/chase_db1.py /path/to/CHASEDB1.zip +``` + +The script will make directory structure automatically. + +### DRIVE + +The training and validation set of DRIVE could be download from [here](https://drive.grand-challenge.org/). Before that, you should register an account. Currently '1st_manual' is not provided officially. + +To convert DRIVE dataset to MMSegmentation format, you should run the following command: + +```shell +python tools/dataset_converters/drive.py /path/to/training.zip /path/to/test.zip +``` + +The script will make directory structure automatically. + +### HRF + +First, download [healthy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy.zip), [glaucoma.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma.zip), [diabetic_retinopathy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy.zip), [healthy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy_manualsegm.zip), [glaucoma_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma_manualsegm.zip) and [diabetic_retinopathy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy_manualsegm.zip). + +To convert HRF dataset to MMSegmentation format, you should run the following command: + +```shell +python tools/dataset_converters/hrf.py /path/to/healthy.zip /path/to/healthy_manualsegm.zip /path/to/glaucoma.zip /path/to/glaucoma_manualsegm.zip /path/to/diabetic_retinopathy.zip /path/to/diabetic_retinopathy_manualsegm.zip +``` + +The script will make directory structure automatically. + +### STARE + +First, download [stare-images.tar](http://cecas.clemson.edu/~ahoover/stare/probing/stare-images.tar), [labels-ah.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-ah.tar) and [labels-vk.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-vk.tar). + +To convert STARE dataset to MMSegmentation format, you should run the following command: + +```shell +python tools/dataset_converters/stare.py /path/to/stare-images.tar /path/to/labels-ah.tar /path/to/labels-vk.tar +``` + +The script will make directory structure automatically. + +### Dark Zurich + +Since we only support test models on this dataset, you may only download [the validation set](https://data.vision.ee.ethz.ch/csakarid/shared/GCMA_UIoU/Dark_Zurich_val_anon.zip). + +### Nighttime Driving + +Since we only support test models on this dataset, you may only download [the test set](http://data.vision.ee.ethz.ch/daid/NighttimeDriving/NighttimeDrivingTest.zip). + +### LoveDA + +The data could be downloaded from Google Drive [here](https://drive.google.com/drive/folders/1ibYV0qwn4yuuh068Rnc-w4tPi0U0c-ti?usp=sharing). + +Or it can be downloaded from [zenodo](https://zenodo.org/record/5706578#.YZvN7SYRXdF), you should run the following command: + +```shell +# Download Train.zip +wget https://zenodo.org/record/5706578/files/Train.zip +# Download Val.zip +wget https://zenodo.org/record/5706578/files/Val.zip +# Download Test.zip +wget https://zenodo.org/record/5706578/files/Test.zip +``` + +For LoveDA dataset, please run the following command to download and re-organize the dataset. + +```shell +python tools/dataset_converters/loveda.py /path/to/loveDA +``` + +Using trained model to predict test set of LoveDA and submit it to server can be found [here](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/3_inference.md). + +More details about LoveDA can be found [here](https://github.com/Junjue-Wang/LoveDA). + +### ISPRS Potsdam + +The [Potsdam](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-potsdam/) +dataset is for urban semantic segmentation used in the 2D Semantic Labeling Contest - Potsdam. + +The dataset can be requested at the challenge [homepage](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/). +The '2_Ortho_RGB.zip' and '5_Labels_all_noBoundary.zip' are required. + +For Potsdam dataset, please run the following command to download and re-organize the dataset. + +```shell +python tools/dataset_converters/potsdam.py /path/to/potsdam +``` + +In our default setting, it will generate 3456 images for training and 2016 images for validation. + +### ISPRS Vaihingen + +The [Vaihingen](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-vaihingen/) +dataset is for urban semantic segmentation used in the 2D Semantic Labeling Contest - Vaihingen. + +The dataset can be requested at the challenge [homepage](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/). +The 'ISPRS_semantic_labeling_Vaihingen.zip' and 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE.zip' are required. + +For Vaihingen dataset, please run the following command to download and re-organize the dataset. + +```shell +python tools/dataset_converters/vaihingen.py /path/to/vaihingen +``` + +In our default setting (`clip_size` =512, `stride_size`=256), it will generate 344 images for training and 398 images for validation. + +### iSAID + +The data images could be download from [DOTA-v1.0](https://captain-whu.github.io/DOTA/dataset.html) (train/val/test) + +The data annotations could be download from [iSAID](https://captain-whu.github.io/iSAID/dataset.html) (train/val) + +The dataset is a Large-scale Dataset for Instance Segmentation (also have segmantic segmentation) in Aerial Images. + +You may need to follow the following structure for dataset preparation after downloading iSAID dataset. + +```none +├── data +│ ├── iSAID +│ │ ├── train +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ │ ├── part2.zip +│ │ │ │ ├── part3.zip +│ │ │ ├── Semantic_masks +│ │ │ │ ├── images.zip +│ │ ├── val +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ ├── Semantic_masks +│ │ │ │ ├── images.zip +│ │ ├── test +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ │ ├── part2.zip +``` + +```shell +python tools/dataset_converters/isaid.py /path/to/iSAID +``` + +In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation. + +## LIP(Look Into Person) dataset + +This dataset could be download from [this page](https://lip.sysuhcp.com/overview.php). + +Please run the following commands to unzip dataset. + +```shell +unzip LIP.zip +cd LIP +unzip TrainVal_images.zip +unzip TrainVal_parsing_annotations.zip +cd TrainVal_parsing_annotations +unzip TrainVal_parsing_annotations.zip +mv train_segmentations ../ +mv val_segmentations ../ +cd .. +``` + +The contents of LIP datasets include: + +```none +├── data +│ ├── LIP +│ │ ├── train_images +│   │ │ ├── 1000_1234574.jpg +│   │ │ ├── ... +│ │ ├── train_segmentations +│   │ │ ├── 1000_1234574.png +│   │ │ ├── ... +│ │ ├── val_images +│   │ │ ├── 100034_483681.jpg +│   │ │ ├── ... +│ │ ├── val_segmentations +│   │ │ ├── 100034_483681.png +│   │ │ ├── ... +``` + +## Synapse dataset + +This dataset could be download from [this page](https://www.synapse.org/#!Synapse:syn3193805/wiki/) + +Please run the following command to prepare the dataset. + +```shell +unzip RawData.zip +cd ./RawData/Training +``` + +Then create train.txt and val.txt to split dataset. + +According to TransUnet, the following is the data set division. + +train.txt + +```none +img0005.nii.gz +img0006.nii.gz +img0007.nii.gz +img0009.nii.gz +img0010.nii.gz +img0021.nii.gz +img0023.nii.gz +img0024.nii.gz +img0026.nii.gz +img0027.nii.gz +img0028.nii.gz +img0030.nii.gz +img0031.nii.gz +img0033.nii.gz +img0034.nii.gz +img0037.nii.gz +img0039.nii.gz +img0040.nii.gz +``` + +val.txt + +```none +img0008.nii.gz +img0022.nii.gz +img0038.nii.gz +img0036.nii.gz +img0032.nii.gz +img0002.nii.gz +img0029.nii.gz +img0003.nii.gz +img0001.nii.gz +img0004.nii.gz +img0025.nii.gz +img0035.nii.gz +``` + +The contents of synapse datasets include: + +```none +├── Training +│ ├── img +│ │ ├── img0001.nii.gz +│ │ ├── img0002.nii.gz +│ │ ├── ... +│ ├── label +│ │ ├── label0001.nii.gz +│ │ ├── label0002.nii.gz +│ │ ├── ... +│ ├── train.txt +│ ├── val.txt +``` + +Then, use this command to convert synapse dataset. + +```shell +python tools/dataset_converters/synapse.py --dataset-path /path/to/synapse +``` diff --git a/docs/en/user_guides/3_inference.md b/docs/en/user_guides/3_inference.md new file mode 100644 index 0000000000..6b6f6f7f3b --- /dev/null +++ b/docs/en/user_guides/3_inference.md @@ -0,0 +1,143 @@ +# Tutorial 3: Inference with existing models + +MMSegmentation provides pre-trained models for semantic segmentation in [Model Zoo](../model_zoo.md), and supports multiple standard datasets, including Cityscapes, ADE20K, etc. +This note will show how to use existing models to inference on given images. +As for how to test existing models on standard datasets, please see this [guide](./4_train_test.md) + +## Inference API + +MMSegmentation provides several interfaces for users to easily use pre-trained models for inference. + +- [mmseg.apis.init_model](#mmsegapisinit_model) +- [mmseg.apis.inference_model](#mmsegapisinference_model) +- [mmseg.apis.show_result_pyplot](#mmsegapisshow_result_pyplot) + +### mmseg.apis.init_model + +Initialize a segmentor from config file. + +Parameters: + +- config (str, `Path`, or `mmengine.Config`) - Config file path or the config object. +- checkpoint (str, optional) - Checkpoint path. If left as None, the model will not load any weights. +- device (str, optional) - CPU/CUDA device option. Default 'cuda:0'. +- cfg_options (dict, optional) - Options to override some settings in the used config. + +Returns: + +- nn.Module: The constructed segmentor. + +Example: + +```python +from mmseg.apis import init_model +from mmseg.utils import register_all_modules + +config_path = 'configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +checkpoint_path = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' + +# register all modules in mmseg into the registries +register_all_modules() + +# initialize model without checkpoint +model = init_model(config_path) + +# init model and load checkpoint +model = init_model(config_path, checkpoint_path) + +# init model and load checkpoint on CPU +model = init_model(config_path, checkpoint_path, 'cpu') +``` + +### mmseg.apis.inference_model + +Inference image(s) with the segmentor. + +Parameters: + +- model (nn.Module) - The loaded segmentor +- imgs (str, np.ndarray, or list\[str/np.ndarray\]) - Either image files or loaded images + +Returns: + +- `SegDataSample` or list\[`SegDataSample`\]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the segmentation results directly. + +**Note:** [SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py) is a data structure interface of MMSegmentation, it is used as interfaces between different components. `SegDataSample` implement the abstract data element `mmengine.structures.BaseDataElement`, please refer to data element [documentation](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/data_element.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information. + +The attributes in `SegDataSample` are divided into several parts: + +- `gt_sem_seg` (`PixelData`) - Ground truth of semantic segmentation. +- `pred_sem_seg` (`PixelData`) - Prediction of semantic segmentation. +- `seg_logits` (`PixelData`) - Predicted logits of semantic segmentation. + +**Note** [PixelData](https://github.com/open-mmlab/mmengine/blob/main/mmengine/structures/pixel_data.py) is the data structure for pixel-level annotations or predictions, please refer to PixelData [documentation](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/data_element.html) in [MMEngine](https://github.com/open-mmlab/mmengine) for more information. + +Example: + +```python +from mmseg.apis import init_model, inference_model +from mmseg.utils import register_all_modules + +config_path = 'configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +checkpoint_path = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' +img_path = 'demo/demo.png' + +# register all modules in mmseg into the registries +register_all_modules() + +model = init_model(config_path, checkpoint_path) +result = inference_model(model, img_path) +``` + +### mmseg.apis.show_result_pyplot + +Visualize the segmentation results on the image. + +Parameters: + +- model (nn.Module) - The loaded segmentor. +- img (str or np.ndarray) - Image filename or loaded image. +- result (`SegDataSample`) - The prediction SegDataSample result. +- opacity (float) - Opacity of painted segmentation map. Default `0.5`, must be in `(0, 1]` range. +- title (str) - The title of pyplot figure. Default is ''. +- draw_gt (bool) - Whether to draw GT SegDataSample. Default to `True`. +- draw_pred (draws_pred) - Whether to draw Prediction SegDataSample. Default to `True`. +- wait_time (float) - The interval of show (s), 0 is the special value that means "forever". Default to `0`. +- show (bool) - Whether to display the drawn image. Default to `True`. +- save_dir (str, optional) - Save file dir for all storage backends. If it is `None`, the backend storage will not save any data. +- out_file (str, optional) - Path to output file. Default to `None`. + +Returns: + +- np.ndarray: the drawn image which channel is RGB. + +Example: + +```python +from mmseg.apis import init_model, inference_model, show_result_pyplot +from mmseg.utils import register_all_modules + +config_path = 'configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +checkpoint_path = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' +img_path = 'demo/demo.png' + +# register all modules in mmseg into the registries +register_all_modules() + +# build the model from a config file and a checkpoint file +model = init_model(config_path, checkpoint_path, device='cuda:0') + +# inference on given image +result = inference_model(model, img_path) + +# display the segmentation result +vis_image = show_result_pyplot(model, img_path, result) + +# save the visualization result, the output image would be found at the path `work_dirs/result.png` +vis_iamge = show_result_pyplot(model, img_path, result, out_file='work_dirs/result.png') + +# Modify the time of displaying images, note that 0 is the special value that means "forever". +vis_image = show_result_pyplot(model, img_path, result, wait_time=5) +``` + +**Note:** If your current device doesn't have graphical user interface, it is recommended that setting `show` to `False` and specify the `out_file` or `save_dir` to save the results. If you would like to display the result on a window, no special settings are required. diff --git a/docs/en/user_guides/4_train_test.md b/docs/en/user_guides/4_train_test.md new file mode 100644 index 0000000000..2fee1ac23b --- /dev/null +++ b/docs/en/user_guides/4_train_test.md @@ -0,0 +1,219 @@ +# Tutorial 4: Train and test with existing models + +MMSegmentation supports training and testing models on a variety of devices, which are described below for single-GPU, distributed, and cluster training and testing, respectively. Through this tutorial, you will learn how to train and test using the scripts provided by MMSegmentation. + +## Training and testing on a single GPU + +### Training on a single GPU + +We provide `tools/train.py` to launch training jobs on a single GPU. +The basic usage is as follows. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +This tool accepts several optional arguments, including: + +- `--work-dir ${WORK_DIR}`: Override the working directory. +- `--amp`: Use auto mixed precision training. +- `--resume`: Resume from the latest checkpoint in the work_dir automatically. +- `--cfg-options ${OVERRIDE_CONFIGS}`: Override some settings in the used config, and the key-value pair in xxx=yyy format will be merged into the config file. + For example, '--cfg-option model.encoder.in_channels=6'. Please see this [guide](./1_config.md#Modify-config-through-script-arguments) for more details. + +Below are the optional arguments for the multi-gpu test: + +- `--launcher`: Items for distributed job initialization launcher. Allowed choices are `none`, `pytorch`, `slurm`, `mpi`. Especially, if set to none, it will test in a non-distributed mode. +- `--local_rank`: ID for local rank. If not specified, it will be set to 0. + +**Note:** Difference between the argument `--resume` and the field `load_from` in the config file: + +`--resume` only determines whether to resume from the latest checkpoint in the work_dir. It is usually used for resuming the training process that is interrupted accidentally. + +`load_from` will specify the checkpoint to be loaded and the training iteration starts from 0. It is usually used for fine-tuning. + +If you would like to resume training from a specific checkpoint, you can use: + +```python +python tools/train.py ${CONFIG_FILE} --resume --cfg-options load_from=${CHECKPOINT} +``` + +**Training on CPU**: The process of training on the CPU is consistent with single GPU training if a machine does not have GPU. If it has GPUs but not wanting to use them, we just need to disable GPUs before the training process. + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +And then run the script [above](#training-on-a-single-gpu). + +### Testing on a single GPU + +We provide `tools/test.py` to launch training jobs on a single GPU. +The basic usage is as follows. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +This tool accepts several optional arguments, including: + +- `--work-dir`: If specified, results will be saved in this directory. If not specified, the results will be automatically saved to `work_dirs/{CONFIG_NAME}`. +- `--show`: Show prediction results at runtime, available when `--show-dir` is not specified. +- `--show-dir`: Directory where painted images will be saved. If specified, the visualized segmentation mask will be saved to the `work_dir/timestamp/show_dir`. +- `--wait-time`: The interval of show (s), which takes effect when `--show` is activated. Default to 2. +- `--cfg-options`: If specified, the key-value pair in xxx=yyy format will be merged into the config file. + +**Testing on CPU**: The process of testing on the CPU is consistent with single GPU testing if a machine does not have GPU. If it has GPUs but not wanting to use them, we just need to disable GPUs before the training process. + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +And then run the script [above](#testing-on-a-single-gpu). + +## Training and testing on multiple GPUs and multiple machines + +### Training on multiple GPUs + +OpenMMLab2.0 implements **distributed** training with `MMDistributedDataParallel`. +We provide `tools/dist_train.sh` to launch training on multiple GPUs. + +The basic usage is as follows: + +```shell +sh tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments remain the same as stated [above](#training-on-a-single-gpu) and have additional arguments to specify the number of GPUs. + +An example: + +```shell +# checkpoints and logs saved in WORK_DIR=work_dirs/pspnet_r50-d8_4xb4-80k_ade20k-512x512/ +# If work_dir is not set, it will be generated automatically. +sh tools/dist_train.sh configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py 8 --work-dir work_dirs/pspnet_r50-d8_4xb4-80k_ade20k-512x512 +``` + +**Note**: During training, checkpoints and logs are saved in the same folder structure as the config file under `work_dirs/`. A custom work directory is not recommended since evaluation scripts infer work directories from the config file name. If you want to save your weights somewhere else, please use a symlink, for example: + +```shell +ln -s ${YOUR_WORK_DIRS} ${MMSEG}/work_dirs +``` + +### Testing on multiple GPUs + +We provide `tools/dist_test.sh` to launch testing on multiple GPUs. +The basic usage is as follows. + +```shell +sh tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments remain the same as stated [above](#testing-on-a-single-gpu) and have additional arguments to specify the number of GPUs. + +An example: + +```shell +./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth 4 +``` + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, you need to specify different ports (29500 by default) for each job to avoid communication conflict. Otherwise, there will be an error message saying `RuntimeError: Address already in use`. +If you use `dist_train.sh` to launch training jobs, you can set the port in commands with the environment variable `PORT`. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 sh tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 sh tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +### Training with multiple machines + +MMSegmentation relies on `torch.distributed` package for distributed training. +Thus, as a basic usage, one can launch distributed training via PyTorch's [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). + +If you launch with multiple machines simply connected with ethernet, you can simply run the following commands: +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} sh tools/dist_train.sh ${CONFIG_FILE} ${GPUS} +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} sh tools/dist_train.sh ${CONFIG_FILE} ${GPUS} +``` + +Usually, it is slow if you do not have high-speed networking like InfiniBand. + +## Manage jobs with Slurm + +[Slurm](https://slurm.schedmd.com/) is a good job scheduling system for computing clusters. + +### Training on a cluster with Slurm + +On a cluster managed by Slurm, you can use `slurm_train.sh` to spawn training jobs. It supports both single-node and multi-node training. + +The basic usage is as follows: + +```shell +[GPUS=${GPUS}] sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} [optional arguments] +``` + +Below is an example of using 4 GPUs to train PSPNet on a Slurm partition named _dev_, and set the work-dir to some shared file systems. + +```shell +GPUS=4 sh tools/slurm_train.sh dev pspnet configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py --work-dir work_dir/pspnet +``` + +You can check [the source code](../../../tools/slurm_train.sh) to review full arguments and environment variables. + +### Testing on a cluster with Slurm + +Similar to the training task, MMSegmentation provides `slurm_test.sh` to launch testing jobs. + +The basic usage is as follows: + +```shell +[GPUS=${GPUS}] sh tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +You can check [the source code](../../../tools/slurm_test.sh) to review full arguments and environment variables. + +**Note:** When using Slurm, the port option needs to be set in one of the following ways: + +1. Set the port through `--cfg-options`. This is more recommended since it does not change the original configs. + + ```shell + GPUS=4 GPUS_PER_NODE=4 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --cfg-options env_cfg.dist_cfg.port=29500 + GPUS=4 GPUS_PER_NODE=4 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --cfg-options env_cfg.dist_cfg.port=29501 + ``` + +2. Modify the config files to set different communication ports. + In `config1.py`: + + ```python + enf_cfg = dict(dist_cfg=dict(backend='nccl', port=29500)) + ``` + + In `config2.py`: + + ```python + enf_cfg = dict(dist_cfg=dict(backend='nccl', port=29501)) + ``` + + Then you can launch two jobs with config1.py and config2.py. + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} + ``` + +3. Set the port in the command using the environment variable 'MASTER_PORT': + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 MASTER_PORT=29500 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 MASTER_PORT=29501 sh tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` diff --git a/docs/en/user_guides/deployment.md b/docs/en/user_guides/deployment.md new file mode 100644 index 0000000000..036db997da --- /dev/null +++ b/docs/en/user_guides/deployment.md @@ -0,0 +1,184 @@ +# Deployment + +> ## [Try the new MMDeploy to deploy your model](https://mmdeploy.readthedocs.io/) + +## Convert to ONNX (experimental) + +We provide a script to convert model to [ONNX](https://github.com/onnx/onnx) format. The converted model could be visualized by tools like [Netron](https://github.com/lutzroeder/netron). Besides, we also support comparing the output results between PyTorch and ONNX model. + +```bash +python tools/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${ONNX_FILE} \ + --input-img ${INPUT_IMG} \ + --shape ${INPUT_SHAPE} \ + --rescale-shape ${RESCALE_SHAPE} \ + --show \ + --verify \ + --dynamic-export \ + --cfg-options \ + model.test_cfg.mode="whole" +``` + +Description of arguments: + +- `config` : The path of a model config file. +- `--checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`. +- `--input-img` : The path of an input image for conversion and visualize. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to img_scale of test_pipeline. +- `--rescale-shape`: rescale shape of output, set this value to avoid OOM, only work on `slide` mode. +- `--show`: Determines whether to print the architecture of the exported model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. +- `--dynamic-export`: Determines whether to export ONNX model with dynamic input and output shapes. If not specified, it will be set to `False`. +- `--cfg-options`:Update config options. + +:::{note} +This tool is still experimental. Some customized operators are not supported for now. +::: + +### Evaluate ONNX model + +We provide `tools/deploy_test.py` to evaluate ONNX model with different backend. + +### Prerequisite + +- Install onnx and onnxruntime-gpu + + ```shell + pip install onnx onnxruntime-gpu + ``` + +- Install TensorRT following [how-to-build-tensorrt-plugins-in-mmcv](https://mmcv.readthedocs.io/en/latest/tensorrt_plugin.html#how-to-build-tensorrt-plugins-in-mmcv)(optional) + +### Usage + +```bash +python tools/deploy_test.py \ + ${CONFIG_FILE} \ + ${MODEL_FILE} \ + ${BACKEND} \ + --out ${OUTPUT_FILE} \ + --eval ${EVALUATION_METRICS} \ + --show \ + --show-dir ${SHOW_DIRECTORY} \ + --cfg-options ${CFG_OPTIONS} \ + --eval-options ${EVALUATION_OPTIONS} \ + --opacity ${OPACITY} \ +``` + +Description of all arguments + +- `config`: The path of a model config file. +- `model`: The path of a converted model file. +- `backend`: Backend of the inference, options: `onnxruntime`, `tensorrt`. +- `--out`: The path of output result file in pickle format. +- `--format-only` : Format the output results without perform evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. If not specified, it will be set to `False`. Note that this argument is **mutually exclusive** with `--eval`. +- `--eval`: Evaluation metrics, which depends on the dataset, e.g., "mIoU" for generic datasets, and "cityscapes" for Cityscapes. Note that this argument is **mutually exclusive** with `--format-only`. +- `--show`: Show results flag. +- `--show-dir`: Directory where painted images will be saved +- `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. +- `--eval-options`: Custom options for evaluation, the key-value pair in `xxx=yyy` format will be kwargs for `dataset.evaluate()` function +- `--opacity`: Opacity of painted segmentation map. In (0, 1\] range. + +### Results and Models + +| Model | Config | Dataset | Metric | PyTorch | ONNXRuntime | TensorRT-fp32 | TensorRT-fp16 | +| :--------: | :---------------------------------------------: | :--------: | :----: | :-----: | :---------: | :-----------: | :-----------: | +| FCN | fcn_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 72.2 | 72.2 | 72.2 | 72.2 | +| PSPNet | pspnet_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 77.8 | 77.8 | 77.8 | 77.8 | +| deeplabv3 | deeplabv3_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 79.0 | 79.0 | 79.0 | 79.0 | +| deeplabv3+ | deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 79.6 | 79.5 | 79.5 | 79.5 | +| PSPNet | pspnet_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.2 | 78.1 | | | +| deeplabv3 | deeplabv3_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.5 | 78.3 | | | +| deeplabv3+ | deeplabv3plus_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.9 | 78.7 | | | + +:::{note} +TensorRT is only available on configs with `whole mode`. +::: + +## Convert to TorchScript (experimental) + +We also provide a script to convert model to [TorchScript](https://pytorch.org/docs/stable/jit.html) format. You can use the pytorch C++ API [LibTorch](https://pytorch.org/docs/stable/cpp_index.html) inference the trained model. The converted model could be visualized by tools like [Netron](https://github.com/lutzroeder/netron). Besides, we also support comparing the output results between PyTorch and TorchScript model. + +```shell +python tools/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${ONNX_FILE} + --shape ${INPUT_SHAPE} + --verify \ + --show +``` + +Description of arguments: + +- `config` : The path of a pytorch model config file. +- `--checkpoint` : The path of a pytorch model checkpoint file. +- `--output-file`: The path of output TorchScript model. If not specified, it will be set to `tmp.pt`. +- `--input-img` : The path of an input image for conversion and visualize. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `512 512`. +- `--show`: Determines whether to print the traced graph of the exported model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. + +:::{note} +It's only support PyTorch>=1.8.0 for now. +::: + +:::{note} +This tool is still experimental. Some customized operators are not supported for now. +::: + +Examples: + +- Convert the cityscapes PSPNet pytorch model. + + ```shell + python tools/pytorch2torchscript.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + --checkpoint checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + --output-file checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pt \ + --shape 512 1024 + ``` + +## Convert to TensorRT (experimental) + +A script to convert [ONNX](https://github.com/onnx/onnx) model to [TensorRT](https://developer.nvidia.com/tensorrt) format. + +Prerequisite + +- install `mmcv-full` with ONNXRuntime custom ops and TensorRT plugins follow [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) and [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md). +- Use [pytorch2onnx](#convert-to-onnx-experimental) to convert the model from PyTorch to ONNX. + +Usage + +```bash +python ${MMSEG_PATH}/tools/onnx2tensorrt.py \ + ${CFG_PATH} \ + ${ONNX_PATH} \ + --trt-file ${OUTPUT_TRT_PATH} \ + --min-shape ${MIN_SHAPE} \ + --max-shape ${MAX_SHAPE} \ + --input-img ${INPUT_IMG} \ + --show \ + --verify +``` + +Description of all arguments + +- `config` : Config file of the model. +- `model` : Path to the input ONNX model. +- `--trt-file` : Path to the output TensorRT engine. +- `--max-shape` : Maximum shape of model input. +- `--min-shape` : Minimum shape of model input. +- `--fp16` : Enable fp16 model conversion. +- `--workspace-size` : Max workspace size in GiB. +- `--input-img` : Image for visualize. +- `--show` : Enable result visualize. +- `--dataset` : Palette provider, `CityscapesDataset` as default. +- `--verify` : Verify the outputs of ONNXRuntime and TensorRT. +- `--verbose` : Whether to verbose logging messages while creating TensorRT engine. Defaults to False. + +:::{note} +Only tested on whole mode. +::: diff --git a/docs/en/user_guides/index.rst b/docs/en/user_guides/index.rst new file mode 100644 index 0000000000..9e7d365925 --- /dev/null +++ b/docs/en/user_guides/index.rst @@ -0,0 +1,20 @@ +Train & Test +************** + +.. toctree:: + :maxdepth: 1 + + 1_config.md + 2_dataset_prepare.md + 3_inference.md + 4_train_test.md + +Useful Tools +************* + +.. toctree:: + :maxdepth: 2 + + visualization.md + useful_tools.md + deployment.md diff --git a/docs/en/user_guides/useful_tools.md b/docs/en/user_guides/useful_tools.md new file mode 100644 index 0000000000..128397b804 --- /dev/null +++ b/docs/en/user_guides/useful_tools.md @@ -0,0 +1,245 @@ +# Useful Tools + +Apart from training/testing scripts, We provide lots of useful tools under the +`tools/` directory. + +## Analysis Tools + +### Plot training logs + +`tools/analyze_logs.py` plots loss/mIoU curves given a training log file. `pip install seaborn` first to install the dependency. + +```shell +python tools/analysis_tools/analyze_logs.py xxx.json [--keys ${KEYS}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +Examples: + +- Plot the mIoU, mAcc, aAcc metrics. + + ```shell + python tools/analysis_tools/analyze_logs.py log.json --keys mIoU mAcc aAcc --legend mIoU mAcc aAcc + ``` + +- Plot loss metric. + + ```shell + python tools/analysis_tools/analyze_logs.py log.json --keys loss --legend loss + ``` + +### Confusion Matrix (experimental) + +In order to generate and plot a `nxn` confusion matrix where `n` is the number of classes, you can follow the steps: + +#### 1.Generate a prediction result in pkl format using `test.py` + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${PATH_TO_RESULT_FILE}] +``` + +Example: + +```shell +python tools/test.py \ +configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py \ +checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \ +--out result/pred_result.pkl +``` + +#### 2. Use `confusion_matrix.py` to generate and plot a confusion matrix + +```shell +python tools/confusion_matrix.py ${CONFIG_FILE} ${PATH_TO_RESULT_FILE} ${SAVE_DIR} --show +``` + +Description of arguments: + +- `config`: Path to the test config file. +- `prediction_path`: Path to the prediction .pkl result. +- `save_dir`: Directory where confusion matrix will be saved. +- `--show`: Enable result visualize. +- `--color-theme`: Theme of the matrix color map. +- `--cfg_options`: Custom options to replace the config file. + +Example: + +```shell +python tools/confusion_matrix.py \ +configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \ +result/pred_result.pkl \ +result/confusion_matrix \ +--show +``` + +### Get the FLOPs and params (experimental) + +We provide a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +You will get the result like this. + +```none +============================== +Input shape: (3, 2048, 1024) +Flops: 1429.68 GMac +Params: 48.98 M +============================== +``` + +:::{note} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. +::: + +(1) FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 1280, 800). +(2) Some operators are not counted into FLOPs like GN and custom operators. + +## Miscellaneous + +### Publish a model + +Before you upload a model to AWS, you may want to +(1) convert model weights to CPU tensors, (2) delete the optimizer states and +(3) compute the hash of the checkpoint file and append the hash id to the filename. + +```shell +python tools/misc/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/publish_model.py work_dirs/pspnet/latest.pth psp_r50_512x1024_40k_cityscapes.pth +``` + +The final output filename will be `psp_r50_512x1024_40k_cityscapes-{hash id}.pth`. + +### Print the entire config + +`tools/misc/print_config.py` prints the whole config verbatim, expanding all its +imports. + +```shell +python tools/misc/print_config.py \ + ${CONFIG} \ + --graph \ + --cfg-options ${OPTIONS [OPTIONS...]} \ +``` + +Description of arguments: + +- `config` : The path of a pytorch model config file. +- `--graph` : Determines whether to print the models graph. +- `--cfg-options`: Custom options to replace the config file. + +## Model conversion + +`tools/model_converters/` provide several scripts to convert pretrain models released by other repos to MMSegmentation style. + +### ViT Swin MiT Transformer Models + +- ViT + + `tools/model_converters/vit2mmseg.py` convert keys in timm pretrained vit models to MMSegmentation style. + + ```shell + python tools/model_converters/vit2mmseg.py ${SRC} ${DST} + ``` + +- Swin + + `tools/model_converters/swin2mmseg.py` convert keys in official pretrained swin models to MMSegmentation style. + + ```shell + python tools/model_converters/swin2mmseg.py ${SRC} ${DST} + ``` + +- SegFormer + + `tools/model_converters/mit2mmseg.py` convert keys in official pretrained mit models to MMSegmentation style. + + ```shell + python tools/model_converters/mit2mmseg.py ${SRC} ${DST} + ``` + +## Model Serving + +In order to serve an `MMSegmentation` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +### 1. Convert model from MMSegmentation to TorchServe + +```shell +python tools/torchserve/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +:::{note} +${MODEL_STORE} needs to be an absolute path to a folder. +::: + +### 2. Build `mmseg-serve` docker image + +```shell +docker build -t mmseg-serve:latest docker/serve/ +``` + +### 3. Run `mmseg-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in CPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmseg-serve:latest +``` + +[Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md) about the Inference (8080), Management (8081) and Metrics (8082) APIs + +### 4. Test deployment + +```shell +curl -O https://raw.githubusercontent.com/open-mmlab/mmsegmentation/master/resources/3dogs.jpg +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T 3dogs.jpg -o 3dogs_mask.png +``` + +The response will be a ".png" mask. + +You can visualize the output as follows: + +```python +import matplotlib.pyplot as plt +import mmcv +plt.imshow(mmcv.imread("3dogs_mask.png", "grayscale")) +plt.show() +``` + +You should see something similar to: + +![3dogs_mask](../../resources/3dogs_mask.png) + +And you can use `test_torchserve.py` to compare result of torchserve and pytorch, and visualize them. + +```shell +python tools/torchserve/test_torchserve.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--result-image ${RESULT_IMAGE}] [--device ${DEVICE}] +``` + +Example: + +```shell +python tools/torchserve/test_torchserve.py \ +demo/demo.png \ +configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \ +checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \ +fcn +``` diff --git a/docs/en/user_guides/visualization.md b/docs/en/user_guides/visualization.md new file mode 100644 index 0000000000..e7c3359cc9 --- /dev/null +++ b/docs/en/user_guides/visualization.md @@ -0,0 +1,174 @@ +# Visualization + +MMSegmentation 1.x provides convenient ways for monitoring training status or visualizing data and model predictions. + +## Training status Monitor + +MMSegmentation 1.x uses TensorBoard to monitor training status. + +### TensorBoard Configuration + +Install TensorBoard following [official instructions](https://www.tensorflow.org/install) e.g. + +```shell +pip install tensorboardX +pip install future tensorboard +``` + +Add `TensorboardVisBackend` in `vis_backend` of `visualizer` in `default_runtime.py` config file: + +```python +vis_backends = [dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') +``` + +### Examining scalars in TensorBoard + +Launch training experiment e.g. + +```shell +python tools/train.py configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py --work-dir work_dir/test_visual +``` + +Find the `vis_data` path of `work_dir` after starting training, for example, the vis_data path of this particular test is as follows: + +```shell +work_dirs/test_visual/20220810_115248/vis_data +``` + +The scalar file in vis_data path includes learning rate, losses and data_time etc, also record metrics results and you can refer [logging tutorial](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/logging.html) in MMEngine to log custom data. The tensorboard visualization results are executed with the following command: + +```shell +tensorboard --logdir work_dirs/test_visual/20220810_115248/vis_data +``` + +## Data and Results visualization + +### Visualizer Data Samples during Model Testing or Validation + +MMSegmentation provides `SegVisualizationHook` which is a [hook](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/hook.md) working to visualize ground truth and prediction of segmentation during model testing and evaluation. Its configuration is in `default_hooks`, please see [Runner tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/runner.md) for more details. + +For example, In `_base_/schedules/schedule_20k.py`, modify the `SegVisualizationHook` configuration, set `draw` to `True` to enable the storage of network inference results, `interval` indicates the sampling interval of the prediction results, and when set to 1, each inference result of the network will be saved. `interval` is set to 50 by default: + +```python +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook', draw=True, interval=1)) + +``` + +After launch training experiment, visualization results will be stored in the local folder in validation loop, +or when launch evaluation a model on one dataset, the prediction results will be store in the local. +The stored results of the local visualization are kept in `vis_image` under `$WORK_DIRS/vis_data`, e.g.: + +```shell +work_dirs/test_visual/20220810_115248/vis_data/vis_image +``` + +In addition, if `TensorboardVisBackend` is add in `vis_backends`, like [above](#tensorboard-configuration), +we can also run the following command to view them in TensorBoard: + +```shell +tensorboard --logdir work_dirs/test_visual/20220810_115248/vis_data +``` + +### Visualize a Single Data Sample + +If you want to visualize a single data sample, we suggest to use `SegLocalVisualizer`. + +`SegLocalVisualizer` is child class inherits from `Visualizer` in MMEngine and works for MMSegmentation visualization, for more details about `Visualizer` please refer to [visualization tutorial](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/visualization.md) in MMEngine. + +Here is an example about `SegLocalVisualizer`, first you may download example data below by following commands: + +
+ +
+ +```shell +wget https://user-images.githubusercontent.com/24582831/189833109-eddad58f-f777-4fc0-b98a-6bd429143b06.png --output-document aachen_000000_000019_leftImg8bit.png +wget https://user-images.githubusercontent.com/24582831/189833143-15f60f8a-4d1e-4cbb-a6e7-5e2233869fac.png --output-document aachen_000000_000019_gtFine_labelTrainIds.png +``` + +Then you can find their local path and use the scripts below to visualize: + +```python +import mmcv +import os.path as osp +import torch +# `PixelData` is data structure for pixel-level annotations or predictions defined in MMEngine. +# Please refer to below tutorial file of data structures in MMEngine: +# https://github.com/open-mmlab/mmengine/tree/main/docs/en/advanced_tutorials/data_element.md + +from mmengine.structures import PixelData + +# `SegDataSample` is data structure interface between different components +# defined in MMSegmentation, it includes ground truth, prediction and +# predicted logits of semantic segmentation. +# Please refer to below tutorial file of `SegDataSample` for more details: +# https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/advanced_guides/structures.md + +from mmseg.structures import SegDataSample +from mmseg.visualization import SegLocalVisualizer + +out_file = 'out_file_cityscapes' +save_dir = './work_dirs' + +image = mmcv.imread( + osp.join( + osp.dirname(__file__), + './aachen_000000_000019_leftImg8bit.png' + ), + 'color') +sem_seg = mmcv.imread( + osp.join( + osp.dirname(__file__), + './aachen_000000_000019_gtFine_labelTrainIds.png' # noqa + ), + 'unchanged') +sem_seg = torch.from_numpy(sem_seg) +gt_sem_seg_data = dict(data=sem_seg) +gt_sem_seg = PixelData(**gt_sem_seg_data) +data_sample = SegDataSample() +data_sample.gt_sem_seg = gt_sem_seg + +seg_local_visualizer = SegLocalVisualizer( + vis_backends=[dict(type='LocalVisBackend')], + save_dir=save_dir) + +# The meta information of dataset usually includes `classes` for class names and +# `palette` for visualization color of each foreground. +# All class names and palettes are defined in the file: +# https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/utils/class_names.py + +seg_local_visualizer.dataset_meta = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', + 'pole', 'traffic light', 'traffic sign', + 'vegetation', 'terrain', 'sky', 'person', 'rider', + 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], + [102, 102, 156], [190, 153, 153], [153, 153, 153], + [250, 170, 30], [220, 220, 0], [107, 142, 35], + [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], + [119, 11, 32]]) +# When `show=True`, the results would be shown directly, +# else if `show=False`, the results would be saved in local directory folder. +seg_local_visualizer.add_datasample(out_file, image, + data_sample, show=False) +``` + +Then the visualization result of image with its corresponding ground truth could be found in `./work_dirs/vis_data/vis_image/` whose name is `out_file_cityscapes_0.png`: + +
+ +
+ +If you would like to know more visualization usage, you can refer to [visualization tutorial](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/visualization.html) in MMEngine. diff --git a/docs/getting_started.md b/docs/getting_started.md deleted file mode 100644 index a5ad9b888d..0000000000 --- a/docs/getting_started.md +++ /dev/null @@ -1,332 +0,0 @@ -# Getting Started - -This page provides basic tutorials about the usage of MMSegmentation. -For installation instructions, please see [install.md](install.md). - -## Prepare datasets - -It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`. -If your folder structure is different, you may need to change the corresponding paths in config files. - -``` -mmsegmentation -├── mmseg -├── tools -├── configs -├── data -│ ├── cityscapes -│ │ ├── leftImg8bit -│ │ │ ├── train -│ │ │ ├── val -│ │ ├── gtFine -│ │ │ ├── train -│ │ │ ├── val -│ ├── VOCdevkit -│ │ ├── VOC2012 -│ │ │ ├── JPEGImages -│ │ │ ├── SegmentationClass -│ │ │ ├── ImageSets -│ │ │ │ ├── Segmentation -│ │ ├── VOCaug -│ │ │ ├── dataset -│ │ │ │ ├── cls -│ ├── ade -│ │ ├── ADEChallengeData2016 -│ │ │ ├── annotations -│ │ │ │ ├── training -│ │ │ │ ├── validation -│ │ │ ├── images -│ │ │ │ ├── training -│ │ │ │ ├── validation - -``` - -### Cityscapes -The data could be found [here](https://www.cityscapes-dataset.com/downloads/) after registration. - -By convention, `**labelTrainIds.png` are used for cityscapes training. -We provided a [scripts](../tools/convert_datasets/cityscapes.py) based on [cityscapesscripts](https://github.com/mcordts/cityscapesScripts) -to generate `**labelTrainIds.png`. -```shell -# --nproc means 8 process for conversion, which could be omitted as well. -python tools/convert_datasets/cityscapes.py data/cityscapes --nproc 8 -``` - -### Pascal VOC -Pascal VOC 2012 could be downloaded from [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). -Beside, most recent works on Pascal VOC dataset usually exploit extra augmentation data, which could be found [here](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz). - -If you would like to use augmented VOC dataset, please run following command to convert augmentation annotations into proper format. -```shell -# --nproc means 8 process for conversion, which could be omitted as well. -python tools/convert_datasets/voc_aug.py data/VOCdevkit data/VOCdevkit/VOCaug --nproc 8 -``` - -Please refer to [concat dataset](tutorials/new_dataset.md#concatenate-dataset) for details about how to concatenate them and train them together. - - -### ADE20K -The training and validation set of ADE20K could be download from this [link](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip). -We may also download test set from [here](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip). - -## Inference with pretrained models - -We provide testing scripts to evaluate a whole dataset (Cityscapes, PASCAL VOC, ADE20k, etc.), -and also some high-level apis for easier integration to other projects. - -### Test a dataset - -- single GPU -- single node multiple GPU -- multiple node - -You can use the following commands to test a dataset. - -```shell -# single-gpu testing -python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] [--show] - -# multi-gpu testing -./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] -``` - -Optional arguments: -- `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file. -- `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `mIoU` is available for all dataset. Cityscapes could be evaluated by `cityscapes` as well as standard `mIoU` metrics. -- `--show`: If specified, segmentation results will be plotted on the images and shown in a new window. It is only applicable to single GPU testing and used for debugging and visualization. Please make sure that GUI is available in your environment, otherwise you may encounter the error like `cannot connect to X server`. -- `--show-dir`: If specified, segmentation results will be plotted on the images and saved to the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option. - - -Examples: - -Assume that you have already downloaded the checkpoints to the directory `checkpoints/`. - -1. Test PSPNet and visualize the results. Press any key for the next image. - - ```shell - python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ - checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ - --show - ``` - -2. Test PSPNet and save the painted images for latter visualization. - - ```shell - python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ - checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ - --show-dir psp_r50_512x1024_40ki_cityscapes_results - ``` - -3. Test PSPNet on PASCAL VOC (without saving the test results) and evaluate the mIoU. - - ```shell - python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_20k_voc12aug.py \ - checkpoints/pspnet_r50-d8_512x1024_20k_voc12aug_20200605_003338-c57ef100.pth \ - --eval mAP - ``` - -4. Test PSPNet with 8 GPUs, and evaluate the standard mIoU and cityscapes metric. - - ```shell - ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ - checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ - 8 --out results.pkl --eval mIoU cityscapes - ``` - -5. Test PSPNet on cityscapes test split with 8 GPUs, and generate the png files to be submit to the official evaluation server. - - ```shell - ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ - checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ - 8 --format-only --options "imgfile_prefix=./pspnet_test_results" - ``` - -You will get png files under `./pspnet_test_results` directory. - - -### Image demo - -We provide a demo script to test a single image. - -```shell -python demo/image_demo.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${DEVICE_NAME}] [--palette-thr ${PALETTE}] -``` - -Examples: - -```shell -python demo/image_demo.py demo/demo.jpg configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ - checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth --device cuda:0 --palette cityscapes -``` - - -### High-level APIs for testing images - -Here is an example of building the model and test given images. - -```python -from mmseg.apis import inference_segmentor, init_segmentor -import mmcv - -config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py' -checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' - -# build the model from a config file and a checkpoint file -model = init_segmentor(config_file, checkpoint_file, device='cuda:0') - -# test a single image and show the results -img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once -result = inference_segmentor(model, img) -# visualize the results in a new window -model.show_result(img, result, show=True) -# or save the visualization results to image files -model.show_result(img, result, out_file='result.jpg') - -# test a video and show the results -video = mmcv.VideoReader('video.mp4') -for frame in video: - result = inference_segmentor(model, frame) - model.show_result(frame, result, wait_time=1) -``` - -A notebook demo can be found in [demo/inference_demo.ipynb](../demo/inference_demo.ipynb). - - -## Train a model - -MMSegmentation implements distributed training and non-distributed training, -which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. - -All outputs (log files and checkpoints) will be saved to the working directory, -which is specified by `work_dir` in the config file. - -By default we evaluate the model on the validation set after some iterations, you can change the evaluation interval by adding the interval argument in the training config. -```python -evaluation = dict(interval=4000) # This evaluate the model per 4000 iterations. -``` - -**\*Important\***: The default learning rate in config files is for 8 GPUs and 1 img/gpu (batch size = 8x1 = 8). -Equivalently, you may also use 4 GPUs and 2 imgs/gpu since all models using cross-GPU SyncBN. - -### Train with a single GPU - -```shell -python tools/train.py ${CONFIG_FILE} [optional arguments] -``` - -If you want to specify the working directory in the command, you can add an argument `--work_dir ${YOUR_WORK_DIR}`. - -### Train with multiple GPUs - -```shell -./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] -``` - -Optional arguments are: - -- `--no-validate` (**not suggested**): By default, the codebase will perform evaluation at every k iterations during the training. To disable this behavior, use `--no-validate`. -- `--work-dir ${WORK_DIR}`: Override the working directory specified in the config file. -- `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. - -Difference between `resume-from` and `load-from`: -`resume-from` loads both the model weights and optimizer status, and the iteration number is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. -`load-from` only loads the model weights and the training iteration starts from 0. It is usually used for finetuning. - -### Train with multiple machines - -If you run MMSegmentation on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. (This script also supports single machine training.) - -```shell -[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} -``` - -Here is an example of using 16 GPUs to train PSPNet on the dev partition. - -```shell -GPUS=16 ./tools/slurm_train.sh dev pspr50 configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py /nfs/xxxx/psp_r50_512x1024_40ki_cityscapes -``` - -You can check [slurm_train.sh](../tools/slurm_train.sh) for full arguments and environment variables. - -If you have just multiple machines connected with ethernet, you can refer to -PyTorch [launch utility](https://pytorch.org/docs/stable/distributed_deprecated.html#launch-utility). -Usually it is slow if you do not have high speed networking like InfiniBand. - -### Launch multiple jobs on a single machine - -If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, -you need to specify different ports (29500 by default) for each job to avoid communication conflict. - -If you use `dist_train.sh` to launch training jobs, you can set the port in commands. - -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 -CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 -``` - -If you use launch training jobs with Slurm, you need to modify the config files (usually the 6th line from the bottom in config files) to set different communication ports. - -In `config1.py`, -```python -dist_params = dict(backend='nccl', port=29500) -``` - -In `config2.py`, -```python -dist_params = dict(backend='nccl', port=29501) -``` - -Then you can launch two jobs with `config1.py` ang `config2.py`. - -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} -CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} -``` - -Or you could specify port by `---options dist_params.port=29501` - -## Useful tools - -We provide lots of useful tools under `tools/` directory. - -### Get the FLOPs and params (experimental) - -We provide a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. - -```shell -python tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] -``` - -You will get the result like this. - -``` -============================== -Input shape: (3, 2048, 1024) -Flops: 1429.68 GMac -Params: 48.98 M -============================== -``` - -**Note**: This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. - -(1) FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 1280, 800). -(2) Some operators are not counted into FLOPs like GN and custom operators. -You can add support for new operators by modifying [`mmseg/utils/flops_counter.py`](../mmseg/utils/flops_counter.py). - -### Publish a model - -Before you upload a model to AWS, you may want to -(1) convert model weights to CPU tensors, (2) delete the optimizer states and -(3) compute the hash of the checkpoint file and append the hash id to the filename. - -```shell -python tools/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} -``` - -E.g., - -```shell -python tools/publish_model.py work_dirs/pspnet/latest.pth psp_r50_hszhao_200ep.pth -``` - -The final output filename will be `psp_r50_512x1024_40ki_cityscapes-{hash id}.pth`. diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index caa6677249..0000000000 --- a/docs/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -Welcome to MMSegmenation's documentation! -========================================= - -.. toctree:: - :maxdepth: 2 - - install.md - getting_started.md - config.md - model_zoo.md - -.. toctree:: - :maxdepth: 2 - :caption: Tutorials - - tutorials/index.rst - -.. toctree:: - :caption: API Reference - - api.rst - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 5d6a2d9bc5..0000000000 --- a/docs/install.md +++ /dev/null @@ -1,89 +0,0 @@ -## Installation - -### Requirements - -- Linux (Windows is not officially supported) -- Python 3.6+ -- PyTorch 1.3 or higher -- [mmcv](https://github.com/open-mmlab/mmcv) - -### Install mmsegmentation - -a. Create a conda virtual environment and activate it. - -```shell -conda create -n open-mmlab python=3.7 -y -conda activate open-mmlab -``` - -b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/). -Here we use PyTorch 1.5.0 and CUDA 10.1. -You may also switch to other version by specifying version number. - -```shell -conda install pytorch=1.5.0 torchvision cudatoolkit=10.1 -c pytorch -``` - -c. Clone the mmsegmentation repository. - -```shell -git clone http://github.com/open-mmlab/mmsegmentation -cd mmsegmentation -``` - -d. Install [MMCV](https://mmcv.readthedocs.io/en/latest/). -Either *mmcv* or *mmcv-full* is compatible with MMSegmentation, but for methods like CCNet and PSANet, CUDA ops in *mmcv-full* is required - -The pre-build *mmcv-full* could be installed by running: (available versions could be found [here](https://mmcv.readthedocs.io/en/latest/#install-with-pip)) -``` -pip install mmcv-full==latest+torch1.5.0+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html -``` - -Optionally, you could also install lite version by running: -``` -pip install mmcv -``` -or build full version from source: -``` -pip install mmcv-full -``` - -e. Install build requirements and then install MMSegmentation. - -```shell -pip install -r requirements/build.txt # or "pip install -r requirements.txt" for everything. -pip install -e . # or "python setup.py develop" -``` - -Note: - -1. The git commit id will be written to the version number with step *e*, e.g. 0.5.0+c415a2e. The version will also be saved in trained models. -It is recommended that you run step *e* each time you pull some updates from github. If C++/CUDA codes are modified, then this step is compulsory. - -2. Following the above instructions, mmsegmentation is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). - -3. If you would like to use `opencv-python-headless` instead of `opencv-python`, -you can install it before installing MMCV. - -4. Some dependencies are optional. Simply running `pip install -e .` will only install the minimum runtime requirements. -To use optional dependencies like `cityscapessripts` either install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -e .[optional]`). Valid keys for the extras field are: `all`, `tests`, `build`, and `optional`. - - -### A from-scratch setup script - -Here is a full script for setting up mmsegmentation with conda and link the dataset path (supposing that your dataset path is $DATA_ROOT). - -```shell -conda create -n open-mmlab python=3.7 -y -conda activate open-mmlab - -conda install pytorch=1.5.0 torchvision cudatoolkit=10.1 -c pytorch -git clone http://github.com/open-mmlab/mmsegmentation -cd mmsegmentation -pip install mmcv-full==latest+torch1.5.0+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html -pip install -r requirements/build.txt -pip install -e . - -mkdir data -ln -s $DATA_ROOT data -``` diff --git a/docs/model_zoo.json b/docs/model_zoo.json deleted file mode 100644 index cc14cce043..0000000000 --- a/docs/model_zoo.json +++ /dev/null @@ -1,2724 +0,0 @@ -{ - "ccnet": { - "voc12aug": [ - [ - [ - "CCNet", - "R-50-D8", - "512x512", - 20000, - "6.0", - 20.446969644812683, - 76.168, - 77.51245728562927, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x512", - 20000, - "9.5", - 13.637111132708073, - 77.274, - 79.02193536016937, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212.log.json)" - ], - [ - "CCNet", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 75.96300000000001, - 77.03666314173265, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 77.86800000000001, - 78.90226783309761, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "CCNet", - "R-50-D8", - "512x1024", - 40000, - "6.0", - 3.321448861645321, - 77.757, - 78.87281569371032, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x1024", - 40000, - "9.5", - 2.3057084889880533, - 76.346, - 78.19477535704155, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540.log.json)" - ], - [ - "CCNet", - "R-50-D8", - "769x769", - 40000, - "6.8", - 1.4297640908184566, - 78.461, - 79.9288478571096, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "769x769", - 40000, - "10.7", - 1.0054480750692631, - 76.941, - 78.62346948358564, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428.log.json)" - ], - [ - "CCNet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 79.035, - 80.1605485551008, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 78.86800000000001, - 79.89770560760813, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935.log.json)" - ], - [ - "CCNet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.295, - 81.07581708289482, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.449, - 80.65765062513057, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "CCNet", - "R-50-D8", - "512x512", - 80000, - "8.8", - 20.889847025344185, - 41.776, - 42.980388602332184, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x512", - 80000, - "12.2", - 14.108705519350595, - 43.972, - 45.13437368692854, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848.log.json)" - ], - [ - "CCNet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.079, - 43.131354987778764, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435.log.json)" - ], - [ - "CCNet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 43.706, - 45.043400185988624, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644.log.json)" - ] - ] - ] - }, - "ocrnet": { - "cityscapes": [ - [ - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x1024", - 40000, - "3.5", - 10.452887853499684, - 74.30099999999999, - 75.94532264911325, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x1024", - 40000, - "4.7", - 7.504321415510909, - 77.71900000000001, - 79.49233034088692, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x1024", - 40000, - "8.0", - 4.215373853142414, - 80.58, - 81.79213277409706, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x1024", - 80000, - "-", - "-", - 77.157, - 78.66157171766707, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x1024", - 80000, - "-", - "-", - 78.568, - 80.45534029123633, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x1024", - 80000, - "-", - "-", - 80.704, - 81.87462053536443, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x1024", - 160000, - "-", - "-", - 78.448, - 79.9684406563932, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x1024", - 160000, - "-", - "-", - 79.473, - 80.91408916940453, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x1024", - 160000, - "-", - "-", - 81.34599999999999, - 82.69728960882979, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x512", - 20000, - "3.5", - 31.554844022107428, - 71.7, - 73.83921653423745, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x512", - 20000, - "4.7", - 19.90720967998522, - 74.749, - 77.1105042314631, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x512", - 20000, - "8.1", - 17.82942134961672, - 77.72, - 79.87183377075576, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x512", - 40000, - "-", - "-", - 72.761, - 74.6014601681293, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x512", - 40000, - "-", - "-", - 74.982, - 77.39817842813225, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x512", - 40000, - "-", - "-", - 77.143, - 79.70754598517257, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x512", - 80000, - "6.7", - 28.980094398974657, - 35.056, - 35.797050387137105, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x512", - 80000, - "7.9", - 18.928971854245283, - 37.789, - 39.155377232744, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x512", - 80000, - "11.2", - 16.991178423144667, - 43.0, - 44.299600723103225, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18-Small", - "512x512", - 160000, - "-", - "-", - 37.191, - 38.40331034259458, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W18", - "512x512", - 160000, - "-", - "-", - 39.322, - 40.80220494656125, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940.log.json)" - ], - [ - "OCRNet", - "HRNetV2p-W48", - "512x512", - 160000, - "-", - "-", - 43.254, - 44.87655360616251, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705.log.json)" - ] - ] - ] - }, - "fcn": { - "ade20k": [ - [ - [ - "FCN", - "R-50-D8", - "512x512", - 80000, - "8.5", - 23.4864501408415, - 35.94, - 37.93716647334422, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x512", - 80000, - "12.0", - 14.780950192500319, - 39.614, - 40.827367113582405, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143.log.json)" - ], - [ - "FCN", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 36.105, - 38.078772436420934, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 39.914, - 41.39843118160508, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "FCN", - "R-50-D8", - "512x1024", - 40000, - "5.7", - 4.169686275718568, - 72.246, - 73.35990418338677, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x1024", - 40000, - "9.2", - 2.6579467518998623, - 75.44999999999999, - 76.58052712300109, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852.log.json)" - ], - [ - "FCN", - "R-50-D8", - "769x769", - 40000, - "6.5", - 1.796476680257555, - 71.47099999999999, - 72.53595679063739, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104.log.json)" - ], - [ - "FCN", - "R-101-D8", - "769x769", - 40000, - "10.4", - 1.1858280952855258, - 73.929, - 75.13723386002961, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208.log.json)" - ], - [ - "FCN", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 73.61, - 74.23620409061135, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 75.13300000000001, - 75.93619310604196, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038.log.json)" - ], - [ - "FCN", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 72.643, - 73.31626041581089, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749.log.json)" - ], - [ - "FCN", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 75.519, - 76.60857360886911, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "FCN", - "R-50-D8", - "512x512", - 20000, - "5.7", - 23.28063707693325, - 67.085, - 69.9383574378687, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x512", - 20000, - "9.2", - 14.80917380811037, - 71.16, - 73.56633022724682, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842.log.json)" - ], - [ - "FCN", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 66.971, - 69.03918575643368, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json)" - ], - [ - "FCN", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 69.90899999999999, - 72.3822015171163, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240.log.json)" - ] - ] - ] - }, - "upernet": { - "cityscapes": [ - [ - [ - "UPerNet", - "R-50", - "512x1024", - 40000, - "6.4", - 4.250181636943019, - 77.096, - 78.3708722046974, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x1024", - 40000, - "7.4", - 3.7930291329191848, - 78.689, - 80.10934950511658, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933.log.json)" - ], - [ - "UPerNet", - "R-50", - "769x769", - 40000, - "7.2", - 1.7640659185483825, - 77.97699999999999, - 79.70262909350413, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048.log.json)" - ], - [ - "UPerNet", - "R-101", - "769x769", - 40000, - "8.4", - 1.5620856953198976, - 79.03099999999999, - 80.76684306267266, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819.log.json)" - ], - [ - "UPerNet", - "R-50", - "512x1024", - 80000, - "-", - "-", - 78.193, - 79.18874361591651, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x1024", - 80000, - "-", - "-", - 79.396, - 80.45737464738971, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403.log.json)" - ], - [ - "UPerNet", - "R-50", - "769x769", - 80000, - "-", - "-", - 79.389, - 80.91628431360874, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107.log.json)" - ], - [ - "UPerNet", - "R-101", - "769x769", - 80000, - "-", - "-", - 80.096, - 81.49282902181865, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "UPerNet", - "R-50", - "512x512", - 20000, - "6.4", - 23.173912855179744, - 74.823, - 76.3452508971145, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x512", - 20000, - "7.5", - 19.980025806149488, - 77.096, - 78.28805607216208, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629.log.json)" - ], - [ - "UPerNet", - "R-50", - "512x512", - 40000, - "-", - "-", - 75.921, - 77.43852589851066, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x512", - 40000, - "-", - "-", - 77.432, - 78.55592324577675, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "UPerNet", - "R-50", - "512x512", - 80000, - "8.1", - 23.404934213597443, - 40.704, - 41.80915610272295, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x512", - 80000, - "9.1", - 20.336544682582634, - 42.91, - 43.95794963214672, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117.log.json)" - ], - [ - "UPerNet", - "R-50", - "512x512", - 160000, - "-", - "-", - 42.05, - 42.784926632807014, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328.log.json)" - ], - [ - "UPerNet", - "R-101", - "512x512", - 160000, - "-", - "-", - 43.824999999999996, - 44.84822175137515, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951.log.json)" - ] - ] - ] - }, - "encnet": { - "ade20k": [ - [ - [ - "encnet", - "R-50-D8", - "512x512", - 80000, - "10.1", - 22.81234693333879, - 39.53, - 41.174465044693534, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k-20200622_042412.log.json)" - ], - [ - "encnet", - "R-101-D8", - "512x512", - 80000, - "13.6", - 14.8713593833497, - 42.108000000000004, - 43.60970109562513, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k-20200622_101128.log.json)" - ], - [ - "encnet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 40.096, - 41.71317203062112, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k-20200622_101059.log.json)" - ], - [ - "encnet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 42.61, - 44.01125617918497, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k-20200622_073348.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "encnet", - "R-50-D8", - "512x1024", - 40000, - "8.6", - 4.579766763724604, - 75.672, - 77.08129779577173, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes-20200621_220958.log.json)" - ], - [ - "encnet", - "R-101-D8", - "512x1024", - 40000, - "12.1", - 2.6579084094229293, - 75.81099999999999, - 77.20722229497062, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes-20200621_220933.log.json)" - ], - [ - "encnet", - "R-50-D8", - "769x769", - 40000, - "9.8", - 1.815523577456311, - 76.244, - 77.8544931191627, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes-20200621_220958.log.json)" - ], - [ - "encnet", - "R-101-D8", - "769x769", - 40000, - "13.7", - 1.2586694727171592, - 74.248, - 76.2517491915298, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes-20200621_220933.log.json)" - ], - [ - "encnet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 77.93900000000001, - 79.12604738206694, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes-20200622_003554.log.json)" - ], - [ - "encnet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 78.55499999999999, - 79.46808306901366, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes-20200622_003555.log.json)" - ], - [ - "encnet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 77.444, - 78.71857972971966, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes-20200622_003554.log.json)" - ], - [ - "encnet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 76.099, - 76.97183988185541, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes-20200622_003555.log.json)" - ] - ] - ] - }, - "psanet": { - "voc12aug": [ - [ - [ - "PSANet", - "R-50-D8", - "512x512", - 20000, - "6.9", - 18.243332440478824, - 76.393, - 77.3406686160825, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x512", - 20000, - "10.4", - 12.62519076615176, - 77.90700000000001, - 79.30352112599553, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624.log.json)" - ], - [ - "PSANet", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.305, - 77.35064085992029, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 77.73400000000001, - 79.0523901742458, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "PSANet", - "R-50-D8", - "512x1024", - 40000, - "7.0", - 3.1667705694500796, - 77.628, - 79.0422496865149, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x1024", - 40000, - "10.5", - 2.2037021448307477, - 79.13900000000001, - 80.19378817152979, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418.log.json)" - ], - [ - "PSANet", - "R-50-D8", - "769x769", - 40000, - "7.9", - 1.4027260879051224, - 77.991, - 79.63690034167004, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "769x769", - 40000, - "11.9", - 0.9842023985382289, - 78.432, - 80.26050149532226, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107.log.json)" - ], - [ - "PSANet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 77.239, - 78.69336591221833, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 79.315, - 80.53461181920574, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823.log.json)" - ], - [ - "PSANet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.315, - 80.91271206157141, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.687, - 80.88962393764702, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "PSANet", - "R-50-D8", - "512x512", - 80000, - "9.0", - 18.906812073042055, - 41.141, - 41.91306881085375, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x512", - 80000, - "12.5", - 13.12564520230877, - 43.797999999999995, - 44.751736929040355, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117.log.json)" - ], - [ - "PSANet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 41.668, - 42.950020936188984, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258.log.json)" - ], - [ - "PSANet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 43.742999999999995, - 45.37610124877713, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537.log.json)" - ] - ] - ] - }, - "danet": { - "voc12aug": [ - [ - [ - "DANet", - "R-50-D8", - "512x512", - 20000, - "6.5", - 20.943311686542472, - 74.455, - 75.68810367906634, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x512", - 20000, - "9.9", - 13.758801929101844, - 76.024, - 77.22605579984322, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json)" - ], - [ - "DANet", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.371, - 77.29119104649632, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 76.508, - 77.31718399039389, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "DANet", - "R-50-D8", - "512x512", - 80000, - "11.5", - 21.197953173726543, - 41.662, - 42.90219783063448, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x512", - 80000, - "15.0", - 14.176784169645225, - 43.645, - 45.19098849554861, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json)" - ], - [ - "DANet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.449999999999996, - 43.251880532863545, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 44.171, - 45.016860694179314, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "DANet", - "R-50-D8", - "512x1024", - 40000, - "7.4", - 2.655504792992914, - 78.741, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x1024", - 40000, - "10.9", - 1.9939886829099438, - 80.521, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json)" - ], - [ - "DANet", - "R-50-D8", - "769x769", - 40000, - "8.8", - 1.5557926799730137, - 78.88, - 80.61866776927825, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json)" - ], - [ - "DANet", - "R-101-D8", - "769x769", - 40000, - "12.8", - 1.0655867297959223, - 79.88199999999999, - 81.46525733416875, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json)" - ], - [ - "DANet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 79.336, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json)" - ], - [ - "DANet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 80.413, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json)" - ], - [ - "DANet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.274, - 80.96441839831498, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json)" - ], - [ - "DANet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 80.471, - 82.020171090948, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json)" - ] - ] - ] - }, - "hrnet": { - "voc12aug": [ - [ - [ - "FCN", - "HRNetV2p-W18-Small", - "512x512", - 20000, - "1.8", - 43.364505532130885, - 65.201, - 68.55284135943813, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503-56e36088.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x512", - 20000, - "2.9", - 23.482760884011036, - 72.303, - 74.70589725240711, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x512", - 20000, - "6.2", - 22.047745500601465, - 75.87, - 78.57597654496765, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18-Small", - "512x512", - 40000, - "-", - "-", - 66.61200000000001, - 70.0031319918366, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x512", - 40000, - "-", - "-", - 72.904, - 75.58601750093821, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x512", - 40000, - "-", - "-", - 76.237, - 78.48754167864209, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "FCN", - "HRNetV2p-W18-Small", - "512x1024", - 40000, - "1.7", - 23.74297838183743, - 73.859, - 75.90997145624684, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x1024", - 40000, - "2.9", - 12.96853348364565, - 77.188, - 78.91665724639267, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x1024", - 40000, - "6.2", - 6.421700443191522, - 78.483, - 79.69458922303686, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18-Small", - "512x1024", - 80000, - "-", - "-", - 75.306, - 77.47890927385332, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x1024", - 80000, - "-", - "-", - 78.64999999999999, - 80.35059171130018, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x1024", - 80000, - "-", - "-", - 79.928, - 80.71977327982115, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18-Small", - "512x1024", - 160000, - "-", - "-", - 76.312, - 78.31118288010825, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x1024", - 160000, - "-", - "-", - 78.797, - 80.74329822797024, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x1024", - 160000, - "-", - "-", - 80.651, - 81.92482068666172, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "FCN", - "HRNetV2p-W18-Small", - "512x512", - 80000, - "3.8", - 38.65539699852906, - 31.384, - 32.452806656988855, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x512", - 80000, - "4.9", - 22.569194335083992, - 35.515, - 36.804822066348805, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145-66f20cb7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x512", - 80000, - "8.2", - 21.234892591194093, - 41.897, - 43.26888294374513, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18-Small", - "512x512", - 160000, - "-", - "-", - 32.995000000000005, - 34.547964211800654, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413.log.json)" - ], - [ - "FCN", - "HRNetV2p-W18", - "512x512", - 160000, - "-", - "-", - 36.786, - 38.58485577782462, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426.log.json)" - ], - [ - "FCN", - "HRNetV2p-W48", - "512x512", - 160000, - "-", - "-", - 42.018, - 43.86047333076445, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407.log.json)" - ] - ] - ] - }, - "ann": { - "ade20k": [ - [ - [ - "ANN", - "R-50-D8", - "512x512", - 80000, - "9.1", - 21.01150654479224, - 41.008, - 42.299370248011755, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x512", - 80000, - "12.5", - 14.116439500308603, - 42.939, - 44.180116688803125, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json)" - ], - [ - "ANN", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 41.744, - 42.61939537832803, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 42.936, - 44.05749393457835, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "ANN", - "R-50-D8", - "512x1024", - 40000, - "6.0", - 3.7066015347562153, - 77.402, - 78.56666466963291, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x1024", - 40000, - "9.5", - 2.5468121299522504, - 76.553, - 78.85000230335912, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json)" - ], - [ - "ANN", - "R-50-D8", - "769x769", - 40000, - "6.8", - 1.6951337367703907, - 78.89399999999999, - 80.45833256780746, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json)" - ], - [ - "ANN", - "R-101-D8", - "769x769", - 40000, - "10.7", - 1.1484480822281227, - 79.325, - 80.94411938511638, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json)" - ], - [ - "ANN", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 77.345, - 78.65222072634322, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 77.137, - 78.81361594500169, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json)" - ], - [ - "ANN", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 78.879, - 80.5665089108356, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json)" - ], - [ - "ANN", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 78.803, - 80.34287446616453, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "ANN", - "R-50-D8", - "512x512", - 20000, - "6.0", - 20.919551932584206, - 74.86, - 76.12674212435266, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x512", - 20000, - "9.5", - 13.944150769190673, - 77.47, - 78.69711736662727, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json)" - ], - [ - "ANN", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.556, - 77.5139251733015, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json)" - ], - [ - "ANN", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 76.69500000000001, - 78.05579776330663, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json)" - ] - ] - ] - }, - "pspnet": { - "ade20k": [ - [ - [ - "PSPNet", - "R-50-D8", - "512x512", - 80000, - "8.5", - 23.526579373672153, - 41.134, - 41.941132390638955, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x512", - 80000, - "12.0", - 15.301938618847755, - 43.57, - 44.354975719492394, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json)" - ], - [ - "PSPNet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.477, - 43.441892719742064, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 44.39, - 45.34825070704653, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "PSPNet", - "R-50-D8", - "512x1024", - 40000, - "6.1", - 4.072768293326251, - 77.848, - 79.18377782829393, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x1024", - 40000, - "9.6", - 2.6817753401497195, - 78.34, - 79.74414521564499, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json)" - ], - [ - "PSPNet", - "R-50-D8", - "769x769", - 40000, - "6.9", - 1.7590560538055864, - 78.262, - 79.88301952959716, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "769x769", - 40000, - "10.9", - 1.1539857289832562, - 79.082, - 80.2847015735947, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json)" - ], - [ - "PSPNet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 78.55199999999999, - 79.79089188640063, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 79.756, - 81.01164255858869, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json)" - ], - [ - "PSPNet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.58800000000001, - 80.68588581173638, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.77499999999999, - 81.05734239329955, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "PSPNet", - "R-50-D8", - "512x512", - 20000, - "6.1", - 23.594295286990285, - 76.778, - 77.61449930304435, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x512", - 20000, - "9.6", - 15.016859227435978, - 78.472, - 79.24767235924098, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json)" - ], - [ - "PSPNet", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 77.294, - 78.48376581837772, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json)" - ], - [ - "PSPNet", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 78.524, - 79.56722327765866, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json)" - ] - ] - ] - }, - "deeplabv3": { - "voc12aug": [ - [ - [ - "DeepLabV3", - "R-50-D8", - "512x512", - 20000, - "6.1", - 13.882586968538902, - 76.17, - 77.42428903363798, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x512", - 20000, - "9.6", - 9.81331369081087, - 78.704, - 79.9523799897917, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932.log.json)" - ], - [ - "DeepLabV3", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 77.676, - 78.78389817782097, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 77.923, - 79.17787250140825, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "DeepLabV3", - "R-50-D8", - "512x1024", - 40000, - "6.1", - 2.57047659861635, - 79.091, - 80.4506523590434, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x1024", - 40000, - "9.6", - 1.9222440928636317, - 77.121, - 79.61407891260694, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241.log.json)" - ], - [ - "DeepLabV3", - "R-50-D8", - "769x769", - 40000, - "6.9", - 1.1119590479409436, - 78.581, - 79.89433614719104, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "769x769", - 40000, - "10.9", - 0.832582701195375, - 79.27300000000001, - 80.11177730128428, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809.log.json)" - ], - [ - "DeepLabV3", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 79.31700000000001, - 80.56867900987751, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 80.19500000000001, - 81.21365141510776, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503.log.json)" - ], - [ - "DeepLabV3", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.893, - 81.0599984851973, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.668, - 80.81226045958836, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "DeepLabV3", - "R-50-D8", - "512x512", - 80000, - "8.9", - 14.763588319372595, - 42.422, - 43.27846378978279, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x512", - 80000, - "12.4", - 10.144087811258307, - 44.080999999999996, - 45.19313139034226, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256.log.json)" - ], - [ - "DeepLabV3", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.657000000000004, - 44.08872105809725, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227.log.json)" - ], - [ - "DeepLabV3", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 44.999, - 46.65804362786369, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816.log.json)" - ] - ] - ] - }, - "nonlocal_net": { - "ade20k": [ - [ - [ - "NonLocal", - "R-50-D8", - "512x512", - 80000, - "9.1", - 21.37048896225747, - 40.752, - 42.053557458158075, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x512", - 80000, - "12.6", - 13.965079302337752, - 42.896, - 44.26894963193766, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json)" - ], - [ - "NonLocal", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.028, - 43.03561642742581, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 43.361, - 44.82970596168541, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422-affd0f8d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "NonLocal", - "R-50-D8", - "512x512", - 20000, - "6.4", - 21.213895119736144, - 76.199, - 77.11520756528137, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x512", - 20000, - "9.8", - 14.009331593316489, - 78.146, - 78.86433067761453, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json)" - ], - [ - "NonLocal", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.646, - 77.47020448125416, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 78.27300000000001, - 79.11788410243086, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "NonLocal", - "R-50-D8", - "512x1024", - 40000, - "7.4", - 2.7236662742933437, - 78.237, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x1024", - 40000, - "10.9", - 1.9480966751075284, - 78.657, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json)" - ], - [ - "NonLocal", - "R-50-D8", - "769x769", - 40000, - "8.9", - 1.5246259413816563, - 78.327, - 79.92096670245425, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "769x769", - 40000, - "12.8", - 1.0477751460724616, - 78.569, - 80.29003703614515, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json)" - ], - [ - "NonLocal", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 78.009, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 78.93, - "-", - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json)" - ], - [ - "NonLocal", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.052, - 80.67913947439877, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json)" - ], - [ - "NonLocal", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.4, - 80.85278857807543, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json)" - ] - ] - ] - }, - "gcnet": { - "voc12aug": [ - [ - [ - "GCNet", - "R-50-D8", - "512x512", - 20000, - "5.8", - 23.350259534912006, - 76.42099999999999, - 77.50740243914798, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x512", - 20000, - "9.2", - 14.799942609024914, - 77.40700000000001, - 78.56005567821165, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713.log.json)" - ], - [ - "GCNet", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.238, - 77.63464439678829, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 77.84299999999999, - 78.59489046439079, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806.log.json)" - ] - ] - ], - "ade20k": [ - [ - [ - "GCNet", - "R-50-D8", - "512x512", - 80000, - "8.5", - 23.37990361060126, - 41.465999999999994, - 42.853494172834885, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x512", - 80000, - "12.0", - 15.198333955746829, - 42.824, - 44.54431618918491, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811.log.json)" - ], - [ - "GCNet", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 42.367, - 43.51941132800723, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 43.686, - 45.21077897100608, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "GCNet", - "R-50-D8", - "512x1024", - 40000, - "5.8", - 3.9294375140356674, - 77.691, - 78.55901060780846, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x1024", - 40000, - "9.2", - 2.613929250881175, - 78.276, - 79.34154953801408, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436.log.json)" - ], - [ - "GCNet", - "R-50-D8", - "769x769", - 40000, - "6.5", - 1.6665314351879814, - 78.117, - 80.08636386919896, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "769x769", - 40000, - "10.5", - 1.130548704280006, - 78.949, - 80.70740508232963, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550.log.json)" - ], - [ - "GCNet", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 78.481, - 80.00715692663934, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 79.02900000000001, - 79.8389342161561, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450.log.json)" - ], - [ - "GCNet", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 78.682, - 80.66434566958863, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516.log.json)" - ], - [ - "GCNet", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 79.184, - 80.70740508232963, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628.log.json)" - ] - ] - ] - }, - "deeplabv3plus": { - "ade20k": [ - [ - [ - "DeepLabV3+", - "R-50-D8", - "512x512", - 80000, - "10.6", - 21.009967570414005, - 42.725, - 43.750872665309245, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x512", - 80000, - "14.1", - 14.156578683381744, - 44.604, - 46.057602920856496, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139.log.json)" - ], - [ - "DeepLabV3+", - "R-50-D8", - "512x512", - 160000, - "-", - "-", - 43.952999999999996, - 44.9257356479825, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x512", - 160000, - "-", - "-", - 45.467999999999996, - 46.35142741219229, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232.log.json)" - ] - ] - ], - "voc12aug": [ - [ - [ - "DeepLabV3+", - "R-50-D8", - "512x512", - 20000, - "7.6", - 20.995826216517777, - 75.932, - 77.49501357998696, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x512", - 20000, - "11.0", - 13.877644753051397, - 77.216, - 78.59404066425819, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345.log.json)" - ], - [ - "DeepLabV3+", - "R-50-D8", - "512x512", - 40000, - "-", - "-", - 76.80799999999999, - 77.56956435172417, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x512", - 40000, - "-", - "-", - 78.618, - 79.5312727643948, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333.log.json)" - ] - ] - ], - "cityscapes": [ - [ - [ - "DeepLabV3+", - "R-50-D8", - "512x1024", - 40000, - "7.5", - 3.937852781596224, - 79.606, - 81.0126987140963, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x1024", - 40000, - "11.0", - 2.6029196398088135, - 80.208, - 81.81580429286755, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614.log.json)" - ], - [ - "DeepLabV3+", - "R-50-D8", - "769x769", - 40000, - "8.5", - 1.7219797309503193, - 78.972, - 80.46092552803746, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "769x769", - 40000, - "12.5", - 1.1546806682489152, - 79.461, - 80.5005593465169, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304.log.json)" - ], - [ - "DeepLabV3+", - "R-50-D8", - "512x1024", - 80000, - "-", - "-", - 80.08800000000001, - 81.13450865498024, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "512x1024", - 80000, - "-", - "-", - 80.972, - 82.02915734982798, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143.log.json)" - ], - [ - "DeepLabV3+", - "R-50-D8", - "769x769", - 80000, - "-", - "-", - 79.827, - 81.47591334418544, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233.log.json)" - ], - [ - "DeepLabV3+", - "R-101-D8", - "769x769", - 80000, - "-", - "-", - 80.97999999999999, - 82.17610990719812, - "[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmsegmentation/models/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405.log.json)" - ] - ] - ] - } -} diff --git a/docs/model_zoo.md b/docs/model_zoo.md deleted file mode 100644 index 8f39928e76..0000000000 --- a/docs/model_zoo.md +++ /dev/null @@ -1,107 +0,0 @@ -# Benchmark and Model Zoo - -## Common settings - -* We use distributed training with 4 GPUs by default. -* All pytorch-style pretrained backbones on ImageNet are train by ourselves, with the same procedure in the [paper](https://arxiv.org/pdf/1812.01187.pdf). -Our ResNet style backbone are based on ResNetV1c variant, where the 7x7 conv in the input stem is replaced with three 3x3 convs. -* For the consistency across different hardwares, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 4 GPUs with `torch.backends.cudnn.benchmark=False`. - Note that this value is usually less than what `nvidia-smi` shows. -* We report the inference time as the total time of network forwarding and post-processing, excluding the data loading time. -Results are obtained with the script `tools/benchmark.py` which computes the average time on 200 images with `torch.backends.cudnn.benchmark=False`. -* There are two inference modes in this framework. - * `slide` mode: The `test_cfg` will be like `dict(mode='slide', crop_size=(769, 769), stride=(513, 513))`. - - In this mode, multiple patches will be cropped from input image, passed into network individually. - The crop size and stride between patches are specified by `crop_size` and `stride`. - The overlapping area will be merged by average - * `whole` mode: The `test_cfg` will be like `dict(mode='whole')`. - - In this mode, the whole imaged will be passed into network directly. -* For input size of 8x+1 (e.g. 769), `align_corner=True` is adopted as a traditional practice. -Otherwise, for input size of 8x (e.g. 512, 1024), `align_corner=False` is adopted. - -## Baselines - -### FCN - -Please refer to [FCN](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn) for details. - -### PSPNet - -Please refer to [PSPNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet) for details. - -### DeepLabV3 - -Please refer to [DeepLabV3](https://github.com/open-mmlab/mmsegmentatio/tree/master/configs/deeplabv3) for details. - -### PSANet - -Please refer to [PSANet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/psanet) for details. - -### DeepLabV3+ - -Please refer to [DeepLabV3+](https://github.com/open-mmlab/mmsegmentatio/tree/master/configs/deeplabv3plus) for details. - -### UPerNet - -Please refer to [UPerNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet) for details. - -### NonLocal Net - -Please refer to [NonLocal Net](https://github.com/open-mmlab/mmsegmentatio/tree/master/configs/nlnet) for details. - -### CCNet - -Please refer to [CCNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ccnet) for details. - -### DANet - -Please refer to [DANet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet) for details. - -### HRNet - -Please refer to [HRNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/hrnet) for details. - -### GCNet - -Please refer to [GCNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/gcnet) for details. - -### ANN - -Please refer to [ANN](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ann) for details. - -### OCRNet - -Please refer to [OCRNet](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ocrnet) for details. - -## Speed benchmark - -### Hardware - -- 8 NVIDIA Tesla V100 (32G) GPUs -- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz - -### Software environment - -- Python 3.7 -- PyTorch 1.5 -- CUDA 10.1 -- CUDNN 7.6.03 -- NCCL 2.4.08 - -### Training speed - -For fair comparison, we benchmark all implementations with ResNet-101V1c. -The input size is fixed to 1024x512 with batch size 2. - -The training speed is reported as followed, in terms of second per iter (s/iter). The lower, the better. - -| Implementation | PSPNet (s/iter) | DeepLabV3+ (s/iter) | -|----------------|-----------------|---------------------| -| [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) | **0.83** | **0.85** | -| [SegmenTron](https://github.com/LikeLy-Journey/SegmenTron) | 0.84 | 0.85 | -| [CASILVision](https://github.com/CSAILVision/semantic-segmentation-pytorch) | 1.15 | N/A | -| [vedaseg](https://github.com/Media-Smart/vedaseg) | 0.95 | 1.25 | - -Note: The output stride of DeepLabV3+ is 8. diff --git a/docs/tutorials/data_pipeline.md b/docs/tutorials/data_pipeline.md deleted file mode 100644 index 825260d32c..0000000000 --- a/docs/tutorials/data_pipeline.md +++ /dev/null @@ -1,156 +0,0 @@ -# 2. Custom Data Pipelines - -## Design of Data pipelines - -Following typical conventions, we use `Dataset` and `DataLoader` for data loading -with multiple workers. `Dataset` returns a dict of data items corresponding -the arguments of models' forward method. -Since the data in semantic segmentation may not be the same size, -we introduce a new `DataContainer` type in MMCV to help collect and distribute -data of different size. -See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. - -The data preparation pipeline and the dataset is decomposed. Usually a dataset -defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. -A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. - -The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. - -Here is an pipeline example for PSPNet. - -```python -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (512, 1024) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -``` - -For each operation, we list the related dict fields that are added/updated/removed. - -### Data loading - -`LoadImageFromFile` -- add: img, img_shape, ori_shape - -`LoadAnnotations` -- add: gt_semantic_seg, seg_fields - -### Pre-processing - -`Resize` -- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio -- update: img, img_shape, *seg_fields - -`RandomFlip` -- add: flip -- update: img, *seg_fields - -`Pad` -- add: pad_fixed_size, pad_size_divisor -- update: img, pad_shape, *seg_fields - -`RandomCrop` -- update: img, pad_shape, *seg_fields - -`Normalize` -- add: img_norm_cfg -- update: img - -`SegRescale` -- update: gt_semantic_seg - -`PhotoMetricDistortion` -- update: img - -### Formatting - -`ToTensor` -- update: specified by `keys`. - -`ImageToTensor` -- update: specified by `keys`. - -`Transpose` -- update: specified by `keys`. - -`ToDataContainer` -- update: specified by `fields`. - -`DefaultFormatBundle` -- update: img, gt_semantic_seg - -`Collect` -- add: img_meta (the keys of img_meta is specified by `meta_keys`) -- remove: all other keys except for those specified by `keys` - -### Test time augmentation - -`MultiScaleFlipAug` - -## Extend and use custom pipelines - -1. Write a new pipeline in any file, e.g., `my_pipeline.py`. It takes a dict as input and return a dict. - - ```python - from mmseg.datasets import PIPELINES - - @PIPELINES.register_module() - class MyTransform: - - def __call__(self, results): - results['dummy'] = True - return results - ``` - -2. Import the new class. - - ```python - from .my_pipeline import MyTransform - ``` - -3. Use it in config files. - - ```python - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - crop_size = (512, 1024) - train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='MyTransform'), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), - ] - ``` diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst deleted file mode 100644 index 778191bb43..0000000000 --- a/docs/tutorials/index.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. toctree:: - :maxdepth: 2 - - new_dataset.md - data_pipeline.md - new_modules.md diff --git a/docs/tutorials/new_dataset.md b/docs/tutorials/new_dataset.md deleted file mode 100644 index 0ad1019e0e..0000000000 --- a/docs/tutorials/new_dataset.md +++ /dev/null @@ -1,161 +0,0 @@ -# 1. Adding New Dataset - -## Customize datasets by reorganizing data - -The simplest way is to convert your dataset to organize your data into folders. - -An example of file structure is as followed. -``` -├── data -│ ├── my_dataset -│ │ ├── img_dir -│ │ │ ├── train -│ │ │ │ ├── xxx{img_suffix} -│ │ │ │ ├── yyy{img_suffix} -│ │ │ │ ├── zzz{img_suffix} -│ │ │ ├── val -│ │ ├── ann_dir -│ │ │ ├── train -│ │ │ │ ├── xxx{seg_map_suffix} -│ │ │ │ ├── yyy{seg_map_suffix} -│ │ │ │ ├── zzz{seg_map_suffix} -│ │ │ ├── val - -``` -A training pair will consist of the files with same suffix in img_dir/ann_dir. - -If `split` argument is given, only part of the files in img_dir/ann_dir will be loaded. -We may specify the prefix of files we would like to be included in the split txt. - -More specifically, for a split txt like following, -``` -xxx -zzz -``` -Only -`data/my_dataset/img_dir/train/xxx{img_suffix}`, -`data/my_dataset/img_dir/train/zzz{img_suffix}`, -`data/my_dataset/ann_dir/train/xxx{seg_map_suffix}`, -`data/my_dataset/ann_dir/train/zzz{seg_map_suffix}` will be loaded. - -## Customize datasets by mixing dataset - -MMSegmentation also supports to mix dataset for training. -Currently it supports to concat and repeat datasets. - -### Repeat dataset - -We use `RepeatDataset` as wrapper to repeat the dataset. -For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following -```python -dataset_A_train = dict( - type='RepeatDataset', - times=N, - dataset=dict( # This is the original config of Dataset_A - type='Dataset_A', - ... - pipeline=train_pipeline - ) - ) -``` - -### Concatenate dataset - -There 2 ways to concatenate the dataset. - -1. If the datasets you want to concatenate are in the same type with different annotation files, - you can concatenate the dataset configs like the following. - - 1. You may concatenate two `ann_dir`. - ```python - dataset_A_train = dict( - type='Dataset_A', - img_dir = 'img_dir', - ann_dir = ['anno_dir_1', 'anno_dir_2'], - pipeline=train_pipeline - ) - ``` - 2. You may concatenate two `split`. - - ```python - dataset_A_train = dict( - type='Dataset_A', - img_dir = 'img_dir', - ann_dir = 'anno_dir', - split = ['split_1.txt', 'split_2.txt'], - pipeline=train_pipeline - ) - ``` - 3. You may concatenate two `ann_dir` and `split` simultaneously. - - ```python - dataset_A_train = dict( - type='Dataset_A', - img_dir = 'img_dir', - ann_dir = ['anno_dir_1', 'anno_dir_2'], - split = ['split_1.txt', 'split_2.txt'], - pipeline=train_pipeline - ) - ``` - In this case, `ann_dir_1` and `ann_dir_2` are corresponding to `split_1.txt` and `split_2.txt`. - -2. In case the dataset you want to concatenate is different, you can concatenate the dataset configs like the following. - - ```python - dataset_A_train = dict() - dataset_B_train = dict() - - data = dict( - imgs_per_gpu=2, - workers_per_gpu=2, - train = [ - dataset_A_train, - dataset_B_train - ], - val = dataset_A_val, - test = dataset_A_test - ) - ``` - - -A more complex example that repeats `Dataset_A` and `Dataset_B` by N and M times, respectively, and then concatenates the repeated datasets is as the following. - -```python -dataset_A_train = dict( - type='RepeatDataset', - times=N, - dataset=dict( - type='Dataset_A', - ... - pipeline=train_pipeline - ) -) -dataset_A_val = dict( - ... - pipeline=test_pipeline -) -dataset_A_test = dict( - ... - pipeline=test_pipeline -) -dataset_B_train = dict( - type='RepeatDataset', - times=M, - dataset=dict( - type='Dataset_B', - ... - pipeline=train_pipeline - ) -) -data = dict( - imgs_per_gpu=2, - workers_per_gpu=2, - train = [ - dataset_A_train, - dataset_B_train - ], - val = dataset_A_val, - test = dataset_A_test -) - -``` diff --git a/docs/tutorials/new_modules.md b/docs/tutorials/new_modules.md deleted file mode 100644 index 5940880907..0000000000 --- a/docs/tutorials/new_modules.md +++ /dev/null @@ -1,234 +0,0 @@ -# 3. Adding New Modules - -## Customize optimizer - -Assume you want to add a optimizer named as `MyOptimizer`, which has arguments `a`, `b`, and `c`. -You need to first implement the new optimizer in a file, e.g., in `mmseg/core/optimizer/my_optimizer.py`: - -```python -from mmcv.runner import OPTIMIZERS -from torch.optim import Optimizer - - -@OPTIMIZERS.register_module -class MyOptimizer(Optimizer): - - def __init__(self, a, b, c) - -``` - -Then add this module in `mmseg/core/optimizer/__init__.py` thus the registry will -find the new module and add it: - -```python -from .my_optimizer import MyOptimizer -``` - -Then you can use `MyOptimizer` in `optimizer` field of config files. -In the configs, the optimizers are defined by the field `optimizer` like the following: - -```python -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -``` - -To use your own optimizer, the field can be changed as - -```python -optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) -``` - -We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. -For example, if you want to use `ADAM`, though the performance will drop a lot, the modification could be as the following. - -```python -optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) -``` - -The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. - -## Customize optimizer constructor - -Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNoarm layers. -The users can do those fine-grained parameter tuning through customizing optimizer constructor. - -``` -from mmcv.utils import build_from_cfg - -from mmcv.runner import OPTIMIZER_BUILDERS -from .cocktail_optimizer import CocktailOptimizer - - -@OPTIMIZER_BUILDERS.register_module -class CocktailOptimizerConstructor(object): - - def __init__(self, optimizer_cfg, paramwise_cfg=None): - - def __call__(self, model): - - return my_optimizer - -``` - -## Develop new components - -There are mainly 2 types of components in MMSegmentation. - -- backbone: usually stacks of convolutional network to extract feature maps, e.g., ResNet, HRNet. -- head: the component for semantic segmentation map decoding. - -### Add new backbones - -Here we show how to develop new components with an example of MobileNet. - -1. Create a new file `mmseg/models/backbones/mobilenet.py`. - -```python -import torch.nn as nn - -from ..registry import BACKBONES - - -@BACKBONES.register_module -class MobileNet(nn.Module): - - def __init__(self, arg1, arg2): - pass - - def forward(self, x): # should return a tuple - pass - - def init_weights(self, pretrained=None): - pass -``` - -2. Import the module in `mmseg/models/backbones/__init__.py`. - -```python -from .mobilenet import MobileNet -``` - -3. Use it in your config file. - -```python -model = dict( - ... - backbone=dict( - type='MobileNet', - arg1=xxx, - arg2=xxx), - ... -``` - -### Add new heads - -In MMSegmentation, we provide a base [BaseDecodeHead](../../mmseg/models/decode_heads/decode_head.py) for all segmentation head. -All newly implemented decode heads should be derived from it. -Here we show how to develop a new head with the example of [PSPNet](https://arxiv.org/abs/1612.01105) as the following. - -First, add a new decode head in `mmseg/models/decode_heads/psp_head.py`. -PSPNet implements a decode head for segmentation decode. -To implement a decode head, basically we need to implement three functions of the new module as the following. - -```python -@HEADS.register_module() -class PSPHead(BaseDecodeHead): - - def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(PSPHead, self).__init__(**kwargs) - - def init_weights(self): - - def forward(self, inputs): - -``` - -Next, the users need to add the module in the `mmseg/models/decode_heads/__init__.py` thus the corresponding registry could find and load them. - -To config file of PSPNet is as the following - -```python -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='pretrain_model/resnet50_v1c_trick-2cccc1ad.pth', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSPHead', - in_channels=2048, - in_index=3, - channels=512, - pool_scales=(1, 2, 3, 6), - drop_out_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) - -``` - -### Add new loss - -Assume you want to add a new loss as `MyLoss` for segmentation decode. -To add a new loss function, the users need implement it in `mmseg/models/losses/my_loss.py`. -The decorator `weighted_loss` enable the loss to be weighted for each element. - -```python -import torch -import torch.nn as nn - -from ..builder import LOSSES -from .utils import weighted_loss - -@weighted_loss -def my_loss(pred, target): - assert pred.size() == target.size() and target.numel() > 0 - loss = torch.abs(pred - target) - return loss - -@LOSSES.register_module -class MyLoss(nn.Module): - - def __init__(self, reduction='mean', loss_weight=1.0): - super(MyLoss, self).__init__() - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss = self.loss_weight * my_loss( - pred, target, weight, reduction=reduction, avg_factor=avg_factor) - return loss -``` - -Then the users need to add it in the `mmseg/models/losses/__init__.py`. - -```python -from .my_loss import MyLoss, my_loss - -``` - -To use it, modify the `loss_xxx` field. -Then you need to modify the `loss_decode` field in the head. -`loss_weight` could be used to balance multiple losses. - -```python -loss_decode=dict(type='MyLoss', loss_weight=1.0)) -``` diff --git a/docs/tutorials/training_tricks.md b/docs/tutorials/training_tricks.md deleted file mode 100644 index 5ff4b18a70..0000000000 --- a/docs/tutorials/training_tricks.md +++ /dev/null @@ -1,28 +0,0 @@ -# 4. Training Tricks - -MMSegmentation support following training tricks out of box. - -## Different Learning Rate(LR) for Backbone and Heads - -In semantic segmentation, some methods make the LR of heads larger than backbone to achieve better performance or faster convergence. - -In MMSegmentation, you may add following lines to config to make the LR of heads 10 times of backbone. -```python -optimizer_config=dict( - paramwise_cfg = dict( - custom_keys={ - 'head': dict(lr_mult=10.)})) -``` -With this modification, the LR of any parameter group with `'head'` in name will be multiplied by 10. -You may refer to [MMCV doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.DefaultOptimizerConstructor) for further details. - -## Online Hard Example Mining (OHEM) -We implement pixel sampler [here](https://github.com/open-mmlab/mmsegmentation/tree/master/mmseg/core/seg/sampler) for training sampling. -Here is an example config of training PSPNet with OHEM enabled. -```python -_base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' -model=dict( - decode_head=dict( - sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=100000)) ) -``` -In this way, only pixels with confidence score under 0.7 are used to train. And we keep at least 100000 pixels during training. diff --git a/docs/zh_cn/Makefile b/docs/zh_cn/Makefile new file mode 100644 index 0000000000..d4bb2cbb9e --- /dev/null +++ b/docs/zh_cn/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_cn/_static/css/readthedocs.css b/docs/zh_cn/_static/css/readthedocs.css new file mode 100644 index 0000000000..2e38d0880b --- /dev/null +++ b/docs/zh_cn/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../images/mmsegmentation.png"); + background-size: 201px 40px; + height: 40px; + width: 201px; +} diff --git a/docs/zh_cn/_static/images/mmsegmentation.png b/docs/zh_cn/_static/images/mmsegmentation.png new file mode 100644 index 0000000000..009083a9e8 Binary files /dev/null and b/docs/zh_cn/_static/images/mmsegmentation.png differ diff --git a/docs/zh_cn/advanced_guides/add_datasets.md b/docs/zh_cn/advanced_guides/add_datasets.md new file mode 100644 index 0000000000..512df8b983 --- /dev/null +++ b/docs/zh_cn/advanced_guides/add_datasets.md @@ -0,0 +1,209 @@ +# 自定义数据集(待更新) + +## 通过重新组织数据来定制数据集 + +最简单的方法是将您的数据集进行转化,并组织成文件夹的形式。 + +如下的文件结构就是一个例子。 + +```none +├── data +│ ├── my_dataset +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{img_suffix} +│ │ │ │ ├── yyy{img_suffix} +│ │ │ │ ├── zzz{img_suffix} +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{seg_map_suffix} +│ │ │ │ ├── yyy{seg_map_suffix} +│ │ │ │ ├── zzz{seg_map_suffix} +│ │ │ ├── val + +``` + +一个训练对将由 img_dir/ann_dir 里同样首缀的文件组成。 + +如果给定 `split` 参数,只有部分在 img_dir/ann_dir 里的文件会被加载。 +我们可以对被包括在 split 文本里的文件指定前缀。 + +除此以外,一个 split 文本如下所示: + +```none +xxx +zzz +``` + +只有 + +`data/my_dataset/img_dir/train/xxx{img_suffix}`, +`data/my_dataset/img_dir/train/zzz{img_suffix}`, +`data/my_dataset/ann_dir/train/xxx{seg_map_suffix}`, +`data/my_dataset/ann_dir/train/zzz{seg_map_suffix}` 将被加载。 + +注意:标注是跟图像同样的形状 (H, W),其中的像素值的范围是 `[0, num_classes - 1]`。 +您也可以使用 [pillow](https://pillow.readthedocs.io/en/stable/handbook/concepts.html#palette) 的 `'P'` 模式去创建包含颜色的标注。 + +## 通过混合数据去定制数据集 + +MMSegmentation 同样支持混合数据集去训练。 +当前它支持拼接 (concat), 重复 (repeat) 和多图混合 (multi-image mix)数据集。 + +### 重复数据集 + +我们使用 `RepeatDataset` 作为包装 (wrapper) 去重复数据集。 +例如,假设原始数据集是 `Dataset_A`,为了重复它,配置文件如下: + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # 这是 Dataset_A 数据集的原始配置 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### 拼接数据集 + +有2种方式去拼接数据集。 + +1. 如果您想拼接的数据集是同样的类型,但有不同的标注文件, + 您可以按如下操作去拼接数据集的配置文件: + + 1. 您也许可以拼接两个标注文件夹 `ann_dir` + + ```python + dataset_A_train = dict( + type='Dataset_A', + img_dir = 'img_dir', + ann_dir = ['anno_dir_1', 'anno_dir_2'], + pipeline=train_pipeline + ) + ``` + + 2. 您也可以去拼接两个 `split` 文件列表 + + ```python + dataset_A_train = dict( + type='Dataset_A', + img_dir = 'img_dir', + ann_dir = 'anno_dir', + split = ['split_1.txt', 'split_2.txt'], + pipeline=train_pipeline + ) + ``` + + 3. 您也可以同时拼接 `ann_dir` 文件夹和 `split` 文件列表 + + ```python + dataset_A_train = dict( + type='Dataset_A', + img_dir = 'img_dir', + ann_dir = ['anno_dir_1', 'anno_dir_2'], + split = ['split_1.txt', 'split_2.txt'], + pipeline=train_pipeline + ) + ``` + + 在这样的情况下, `ann_dir_1` 和 `ann_dir_2` 分别对应于 `split_1.txt` 和 `split_2.txt` + +2. 如果您想拼接不同的数据集,您可以如下去拼接数据集的配置文件: + + ```python + dataset_A_train = dict() + dataset_B_train = dict() + + data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train = [ + dataset_A_train, + dataset_B_train + ], + val = dataset_A_val, + test = dataset_A_test + ) + ``` + +一个更复杂的例子如下:分别重复 `Dataset_A` 和 `Dataset_B` N 次和 M 次,然后再去拼接重复后的数据集 + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( + type='Dataset_A', + ... + pipeline=train_pipeline + ) +) +dataset_A_val = dict( + ... + pipeline=test_pipeline +) +dataset_A_test = dict( + ... + pipeline=test_pipeline +) +dataset_B_train = dict( + type='RepeatDataset', + times=M, + dataset=dict( + type='Dataset_B', + ... + pipeline=train_pipeline + ) +) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train = [ + dataset_A_train, + dataset_B_train + ], + val = dataset_A_val, + test = dataset_A_test +) + +``` + +### 多图混合集 + +我们使用 `MultiImageMixDataset` 作为包装(wrapper)去混合多个数据集的图片。 +`MultiImageMixDataset`可以被类似mosaic和mixup的多图混合数据増广使用。 + +`MultiImageMixDataset`与`Mosaic`数据増广一起使用的例子: + +```python +train_pipeline = [ + dict(type='RandomMosaic', prob=1), + dict(type='Resize', img_scale=(1024, 512), keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] + +train_dataset = dict( + type='MultiImageMixDataset', + dataset=dict( + classes=classes, + palette=palette, + type=dataset_type, + reduce_zero_label=False, + img_dir=data_root + "images/train", + ann_dir=data_root + "annotations/train", + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + ] + ), + pipeline=train_pipeline +) + +``` diff --git a/docs/zh_cn/advanced_guides/add_metric.md b/docs/zh_cn/advanced_guides/add_metric.md new file mode 100644 index 0000000000..dfd94487d1 --- /dev/null +++ b/docs/zh_cn/advanced_guides/add_metric.md @@ -0,0 +1 @@ +# 添加评测指标 diff --git a/docs/zh_cn/advanced_guides/add_modules.md b/docs/zh_cn/advanced_guides/add_modules.md new file mode 100644 index 0000000000..e20dadd242 --- /dev/null +++ b/docs/zh_cn/advanced_guides/add_modules.md @@ -0,0 +1,230 @@ +# 自定义模型(待更新) + +## 自定义优化器 (optimizer) + +假设您想增加一个新的叫 `MyOptimizer` 的优化器,它的参数分别为 `a`, `b`, 和 `c`。 +您首先需要在一个文件里实现这个新的优化器,例如在 `mmseg/core/optimizer/my_optimizer.py` 里面: + +```python +from mmcv.runner import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c) + +``` + +然后增加这个模块到 `mmseg/core/optimizer/__init__.py` 里面,这样注册器 (registry) 将会发现这个新的模块并添加它: + +```python +from .my_optimizer import MyOptimizer +``` + +之后您可以在配置文件的 `optimizer` 域里使用 `MyOptimizer`, +如下所示,在配置文件里,优化器被 `optimizer` 域所定义: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +为了使用您自己的优化器,域可以被修改为: + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +我们已经支持了 PyTorch 自带的全部优化器,唯一修改的地方是在配置文件里的 `optimizer` 域。例如,如果您想使用 `ADAM`,尽管数值表现会掉点,还是可以如下修改: + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +使用者可以直接按照 PyTorch [文档教程](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) 去设置参数。 + +## 定制优化器的构造器 (optimizer constructor) + +对于优化,一些模型可能会有一些特别定义的参数,例如批归一化 (BatchNorm) 层里面的权重衰减 (weight decay)。 +使用者可以通过定制优化器的构造器来微调这些细粒度的优化器参数。 + +```python +from mmcv.utils import build_from_cfg + +from mmcv.runner import OPTIMIZER_BUILDERS +from .cocktail_optimizer import CocktailOptimizer + + +@OPTIMIZER_BUILDERS.register_module +class CocktailOptimizerConstructor(object): + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + + def __call__(self, model): + + return my_optimizer + +``` + +## 开发和增加新的组件(Module) + +MMSegmentation 里主要有2种组件: + +- 主干网络 (backbone): 通常是卷积网络的堆叠,来做特征提取,例如 ResNet, HRNet +- 解码头 (decoder head): 用于语义分割图的解码的组件(得到分割结果) + +### 添加新的主干网络 + +这里我们以 MobileNet 为例,展示如何增加新的主干组件: + +1. 创建一个新的文件 `mmseg/models/backbones/mobilenet.py` + +```python +import torch.nn as nn + +from ..registry import BACKBONES + + +@BACKBONES.register_module +class MobileNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass + + def init_weights(self, pretrained=None): + pass +``` + +2. 在 `mmseg/models/backbones/__init__.py` 里面导入模块 + +```python +from .mobilenet import MobileNet +``` + +3. 在您的配置文件里使用它 + +```python +model = dict( + ... + backbone=dict( + type='MobileNet', + arg1=xxx, + arg2=xxx), + ... +``` + +### 增加新的解码头 (decoder head)组件 + +在 MMSegmentation 里面,对于所有的分割头,我们提供一个基类解码头 [BaseDecodeHead](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/decode_head.py) 。 +所有新建的解码头都应该继承它。这里我们以 [PSPNet](https://arxiv.org/abs/1612.01105) 为例, +展示如何开发和增加一个新的解码头组件: + +首先,在 `mmseg/models/decode_heads/psp_head.py` 里添加一个新的解码头。 +PSPNet 中实现了一个语义分割的解码头。为了实现一个解码头,我们只需要在新构造的解码头中实现如下的3个函数: + +```python +@HEADS.register_module() +class PSPHead(BaseDecodeHead): + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(PSPHead, self).__init__(**kwargs) + + def init_weights(self): + + def forward(self, inputs): + +``` + +接着,使用者需要在 `mmseg/models/decode_heads/__init__.py` 里面添加这个模块,这样对应的注册器 (registry) 可以查找并加载它们。 + +PSPNet的配置文件如下所示: + +```python +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain_model/resnet50_v1c_trick-2cccc1ad.pth', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) + +``` + +### 增加新的损失函数 + +假设您想添加一个新的损失函数 `MyLoss` 到语义分割解码器里。 +为了添加一个新的损失函数,使用者需要在 `mmseg/models/losses/my_loss.py` 里面去实现它。 +`weighted_loss` 可以对计算损失时的每个样本做加权。 + +```python +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + +@LOSSES.register_module +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss +``` + +然后使用者需要在 `mmseg/models/losses/__init__.py` 里面添加它: + +```python +from .my_loss import MyLoss, my_loss + +``` + +为了使用它,修改 `loss_xxx` 域。之后您需要在解码头组件里修改 `loss_decode` 域。 +`loss_weight` 可以被用来对不同的损失函数做加权。 + +```python +loss_decode=dict(type='MyLoss', loss_weight=1.0)) +``` diff --git a/docs/zh_cn/advanced_guides/add_transforms.md b/docs/zh_cn/advanced_guides/add_transforms.md new file mode 100644 index 0000000000..2fa55f0c05 --- /dev/null +++ b/docs/zh_cn/advanced_guides/add_transforms.md @@ -0,0 +1,166 @@ +# 自定义数据流程(待更新) + +## 数据流程的设计 + +按照通常的惯例,我们使用 `Dataset` 和 `DataLoader` 做多线程的数据加载。`Dataset` 返回一个数据内容的字典,里面对应于模型前传方法的各个参数。 +因为在语义分割中,输入的图像数据具有不同的大小,我们在 MMCV 里引入一个新的 `DataContainer` 类别去帮助收集和分发不同大小的输入数据。 + +更多细节,请查看[这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) 。 + +数据的准备流程和数据集是解耦的。通常一个数据集定义了如何处理标注数据(annotations)信息,而一个数据流程定义了准备一个数据字典的所有步骤。一个流程包括了一系列操作,每个操作里都把一个字典作为输入,然后再输出一个新的字典给下一个变换操作。 + +这些操作可分为数据加载 (data loading),预处理 (pre-processing),格式变化 (formatting) 和测试时数据增强 (test-time augmentation)。 + +下面的例子就是 PSPNet 的一个流程: + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +``` + +对于每个操作,我们列出它添加、更新、移除的相关字典域 (dict fields): + +### 数据加载 Data loading + +`LoadImageFromFile` + +- 增加: img, img_shape, ori_shape + +`LoadAnnotations` + +- 增加: gt_semantic_seg, seg_fields + +### 预处理 Pre-processing + +`Resize` + +- 增加: scale, scale_idx, pad_shape, scale_factor, keep_ratio +- 更新: img, img_shape, \*seg_fields + +`RandomFlip` + +- 增加: flip +- 更新: img, \*seg_fields + +`Pad` + +- 增加: pad_fixed_size, pad_size_divisor +- 更新: img, pad_shape, \*seg_fields + +`RandomCrop` + +- 更新: img, pad_shape, \*seg_fields + +`Normalize` + +- 增加: img_norm_cfg +- 更新: img + +`SegRescale` + +- 更新: gt_semantic_seg + +`PhotoMetricDistortion` + +- 更新: img + +### 格式 Formatting + +`ToTensor` + +- 更新: 由 `keys` 指定 + +`ImageToTensor` + +- 更新: 由 `keys` 指定 + +`Transpose` + +- 更新: 由 `keys` 指定 + +`ToDataContainer` + +- 更新: 由 `keys` 指定 + +`DefaultFormatBundle` + +- 更新: img, gt_semantic_seg + +`Collect` + +- 增加: img_meta (the keys of img_meta is specified by `meta_keys`) +- 移除: all other keys except for those specified by `keys` + +### 测试时数据增强 Test time augmentation + +`MultiScaleFlipAug` + +## 拓展和使用自定义的流程 + +1. 在任何一个文件里写一个新的流程,例如 `my_pipeline.py`,它以一个字典作为输入并且输出一个字典 + + ```python + from mmseg.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform: + + def __call__(self, results): + results['dummy'] = True + return results + ``` + +2. 导入一个新类 + + ```python + from .my_pipeline import MyTransform + ``` + +3. 在配置文件里使用它 + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + crop_size = (512, 1024) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='MyTransform'), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), + ] + ``` diff --git a/docs/zh_cn/advanced_guides/customize_runtime.md b/docs/zh_cn/advanced_guides/customize_runtime.md new file mode 100644 index 0000000000..1afd95a9a6 --- /dev/null +++ b/docs/zh_cn/advanced_guides/customize_runtime.md @@ -0,0 +1,248 @@ +# 自定义运行设定(待更新) + +## 自定义优化设定 + +### 自定义 PyTorch 支持的优化器 + +我们已经支持 PyTorch 自带的所有优化器,唯一需要修改的地方是在配置文件里的 `optimizer` 域里面。 +例如,如果您想使用 `ADAM` (注意如下操作可能会让模型表现下降),可以使用如下修改: + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +为了修改模型的学习率,使用者仅需要修改配置文件里 optimizer 的 `lr` 即可。 +使用者可以参照 PyTorch 的 [API 文档](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) +直接设置参数。 + +### 自定义自己实现的优化器 + +#### 1. 定义一个新的优化器 + +一个自定义的优化器可以按照如下去定义: + +假如您想增加一个叫做 `MyOptimizer` 的优化器,它的参数分别有 `a`, `b`, 和 `c`。 +您需要创建一个叫 `mmseg/core/optimizer` 的新文件夹。 +然后再在文件,即 `mmseg/core/optimizer/my_optimizer.py` 里面去实现这个新优化器: + +```python +from .registry import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c) + +``` + +#### 2. 增加优化器到注册表 (registry) + +为了让上述定义的模块被框架发现,首先这个模块应该被导入到主命名空间 (main namespace) 里。 +有两种方式可以实现它。 + +- 修改 `mmseg/core/optimizer/__init__.py` 来导入它 + + 新的被定义的模块应该被导入到 `mmseg/core/optimizer/__init__.py` 这样注册表将会发现新的模块并添加它 + +```python +from .my_optimizer import MyOptimizer +``` + +- 在配置文件里使用 `custom_imports` 去手动导入它 + +```python +custom_imports = dict(imports=['mmseg.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +`mmseg.core.optimizer.my_optimizer` 模块将会在程序运行的开始被导入,并且 `MyOptimizer` 类将会自动注册。 +需要注意只有包含 `MyOptimizer` 类的包 (package) 应当被导入。 +而 `mmseg.core.optimizer.my_optimizer.MyOptimizer` **不能** 被直接导入。 + +事实上,使用者完全可以用另一个按这样导入方法的文件夹结构,只要模块的根路径已经被添加到 `PYTHONPATH` 里面。 + +#### 3. 在配置文件里定义优化器 + +之后您可以在配置文件的 `optimizer` 域里面使用 `MyOptimizer` +在配置文件里,优化器被定义在 `optimizer` 域里,如下所示: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +为了使用您自己的优化器,这个域可以被改成: + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### 自定义优化器的构造器 (constructor) + +有些模型可能需要在优化器里有一些特别参数的设置,例如 批归一化层 (BatchNorm layers) 的 权重衰减 (weight decay)。 +使用者可以通过自定义优化器的构造器去微调这些细粒度参数。 + +```python +from mmcv.utils import build_from_cfg + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS +from mmseg.utils import get_root_logger +from .my_optimizer import MyOptimizer + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor(object): + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + + def __call__(self, model): + + return my_optimizer + +``` + +默认的优化器构造器的实现可以参照 [这里](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11) ,它也可以被用作新的优化器构造器的模板。 + +### 额外的设置 + +优化器没有实现的一些技巧应该通过优化器构造器 (optimizer constructor) 或者钩子 (hook) 去实现,如设置基于参数的学习率 (parameter-wise learning rates)。我们列出一些常见的设置,它们可以稳定或加速模型的训练。 +如果您有更多的设置,欢迎在 PR 和 issue 里面提交。 + +- __使用梯度截断 (gradient clip) 去稳定训练__: + + 一些模型需要梯度截断去稳定训练过程,如下所示 + + ```python + optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) + ``` + + 如果您的配置继承自已经设置了 `optimizer_config` 的基础配置 (base config),您可能需要 `_delete_=True` 来重写那些不需要的设置。更多细节请参照 [配置文件文档](https://mmsegmentation.readthedocs.io/en/latest/config.html) 。 + +- __使用动量计划表 (momentum schedule) 去加速模型收敛__: + + 我们支持动量计划表去让模型基于学习率修改动量,这样可能让模型收敛地更快。 + 动量计划表经常和学习率计划表 (LR scheduler) 一起使用,例如如下配置文件就在 3D 检测里经常使用以加速收敛。 + 更多细节请参考 [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) 和 [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130) 的实现。 + + ```python + lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + ) + momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, + ) + ``` + +## 自定义训练计划表 + +我们根据默认的训练迭代步数 40k/80k 来设置学习率,这在 MMCV 里叫做 [`PolyLrUpdaterHook`](https://github.com/open-mmlab/mmcv/blob/826d3a7b68596c824fa1e2cb89b6ac274f52179c/mmcv/runner/hooks/lr_updater.py#L196) 。 +我们也支持许多其他的学习率计划表:[这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py) ,例如 `CosineAnnealing` 和 `Poly` 计划表。下面是一些例子: + +- 步计划表 Step schedule: + + ```python + lr_config = dict(policy='step', step=[9, 10]) + ``` + +- 余弦退火计划表 ConsineAnnealing schedule: + + ```python + lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + ``` + +## 自定义工作流 (workflow) + +工作流是一个专门定义运行顺序和轮数 (running order and epochs) 的列表 (phase, epochs)。 +默认情况下它设置成: + +```python +workflow = [('train', 1)] +``` + +意思是训练是跑 1 个 epoch。有时候使用者可能想检查模型在验证集上的一些指标(如 损失 loss,精确性 accuracy),我们可以这样设置工作流: + +```python +[('train', 1), ('val', 1)] +``` + +于是 1 个 epoch 训练,1 个 epoch 验证将交替运行。 + +**注意**: + +1. 模型的参数在验证的阶段不会被自动更新 +2. 配置文件里的关键词 `total_epochs` 仅控制训练的 epochs 数目,而不会影响验证时的工作流 +3. 工作流 `[('train', 1), ('val', 1)]` 和 `[('train', 1)]` 将不会改变 `EvalHook` 的行为,因为 `EvalHook` 被 `after_train_epoch` + 调用而且验证的工作流仅仅影响通过调用 `after_val_epoch` 的钩子 (hooks)。因此, `[('train', 1), ('val', 1)]` 和 `[('train', 1)]` + 的区别仅在于 runner 将在每次训练 epoch 结束后计算在验证集上的损失 + +## 自定义钩 (hooks) + +### 使用 MMCV 实现的钩子 (hooks) + +如果钩子已经在 MMCV 里被实现,如下所示,您可以直接修改配置文件来使用钩子: + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +### 修改默认的运行时间钩子 (runtime hooks) + +以下的常用的钩子没有被 `custom_hooks` 注册: + +- log_config +- checkpoint_config +- evaluation +- lr_config +- optimizer_config +- momentum_config + +在这些钩子里,只有 logger hook 有 `VERY_LOW` 优先级,其他的优先级都是 `NORMAL`。 +上述提及的教程已经包括了如何修改 `optimizer_config`,`momentum_config` 和 `lr_config`。 +这里我们展示我们如何处理 `log_config`, `checkpoint_config` 和 `evaluation`。 + +#### 检查点配置文件 (Checkpoint config) + +MMCV runner 将使用 `checkpoint_config` 去初始化 [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). + +```python +checkpoint_config = dict(interval=1) +``` + +使用者可以设置 `max_keep_ckpts` 来仅保存一小部分检查点或者通过 `save_optimizer` 来决定是否保存优化器的状态字典 (state dict of optimizer)。 更多使用参数的细节请参考 [这里](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) 。 + +#### 日志配置文件 (Log config) + +`log_config` 包裹了许多日志钩 (logger hooks) 而且能去设置间隔 (intervals)。现在 MMCV 支持 `WandbLoggerHook`, `MlflowLoggerHook` 和 `TensorboardLoggerHook`。 +详细的使用请参照 [文档](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook) 。 + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### 评估配置文件 (Evaluation config) + +`evaluation` 的配置文件将被用来初始化 [`EvalHook`](https://github.com/open-mmlab/mmsegmentation/blob/e3f6f655d69b777341aec2fe8829871cc0beadcb/mmseg/core/evaluation/eval_hooks.py#L7) 。 +除了 `interval` 键,其他的像 `metric` 这样的参数将被传递给 `dataset.evaluate()` 。 + +```python +evaluation = dict(interval=1, metric='mIoU') +``` diff --git a/docs/zh_cn/advanced_guides/data_flow.md b/docs/zh_cn/advanced_guides/data_flow.md new file mode 100644 index 0000000000..960b4e6586 --- /dev/null +++ b/docs/zh_cn/advanced_guides/data_flow.md @@ -0,0 +1 @@ +# 数据流 diff --git a/docs/zh_cn/advanced_guides/datasets.md b/docs/zh_cn/advanced_guides/datasets.md new file mode 100644 index 0000000000..06a75e54bd --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets.md @@ -0,0 +1,362 @@ +# 数据集 + +在 MMSegmentation 算法库中, 所有 Dataset 类的功能有两个: 加载[预处理](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/2_dataset_prepare.md) 之后的数据集的信息, 和将数据送入[数据集变换流水线](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/datasets/basesegdataset.py#L141) 中, 进行[数据变换操作](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/advanced_guides/transforms.md). 加载的数据集信息包括两类: 元信息 (meta information), 数据集本身的信息, 例如数据集总共的类别, 和它们对应调色盘信息: 数据信息 (data information) 是指每组数据中图片和对应标签的路径. 下文中介绍了 MMSegmentation 1.x 中数据集的常用接口, 和 mmseg 数据集基类中数据信息加载与修改数据集类别的逻辑, 以及数据集与数据变换流水线 (pipeline) 的关系. + +## 常用接口 + +以 Cityscapes 为例, 介绍数据集常用接口. 如需运行以下示例, 请在当前工作目录下的 `data` 目录下载并[预处理](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/2_dataset_prepare.md#cityscapes) Cityscapes 数据集. + +实例化 Cityscapes 训练数据集: + +```python +from mmseg.datasets import CityscapesDataset +from mmseg.utils import register_all_modules +register_all_modules() + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PackSegInputs') +] + +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, test_mode=False, pipeline=train_pipeline) +``` + +查看训练数据集长度: + +```python +print(len(dataset)) + +2975 +``` + +获取数据信息, 数据信息的类型是一个字典, 包括 `'img_path'` 字段的存放图片的路径和 `'seg_map_path'` 字段存放分割标注的路径, 以及标签重映射的字段 `'label_map'` 和 `'reduce_zero_label'`(主要功能在下文中介绍), 还有存放已加载标签字段 `'seg_fields'`, 和当前样本的索引字段 `'sample_idx'`. + +```python +# 获取数据集中第一组样本的数据信息 +print(dataset.get_data_info(0)) + +{'img_path': 'data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png', + 'seg_map_path': 'data/cityscapes/gtFine/train/aachen/aachen_000000_000019_gtFine_labelTrainIds.png', + 'label_map': None, + 'reduce_zero_label': False, + 'seg_fields': [], + 'sample_idx': 0} +``` + +获取数据集元信息, MMSegmentation 的数据集元信息的类型同样是一个字典, 包括 `'classes'` 字段存放数据集类别, `'palette'` 存放数据集类别对应的可视化时调色盘的颜色, 以及标签重映射的字段 `'label_map'` 和 `'reduce_zero_label'`. + +```python +print(dataset.metainfo) + +{'classes': ('road', + 'sidewalk', + 'building', + 'wall', + 'fence', + 'pole', + 'traffic light', + 'traffic sign', + 'vegetation', + 'terrain', + 'sky', + 'person', + 'rider', + 'car', + 'truck', + 'bus', + 'train', + 'motorcycle', + 'bicycle'), + 'palette': [[128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [70, 130, 180], + [220, 20, 60], + [255, 0, 0], + [0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32]], + 'label_map': None, + 'reduce_zero_label': False} +``` + +数据集 `__getitem__` 方法的返回值, 是经过数据增强的样本数据的输出, 同样也是一个字典, 包括两个字段, `'inputs'` 字段是当前样本经过数据增强操作的图像, 类型为 torch.Tensor, `'data_samples'` 字段存放的数据类型是 MMSegmentation 1.x 新添加的数据结构 [`Segdatasample`](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/advanced_guides/structures.md), 其中`gt_sem_seg` 字段是经过数据增强的标签数据. + +```python +print(dataset[0]) + +{'inputs': tensor([[[131, 130, 130, ..., 23, 23, 23], + [132, 132, 132, ..., 23, 22, 23], + [134, 133, 133, ..., 23, 23, 23], + ..., + [ 66, 67, 67, ..., 71, 71, 71], + [ 66, 67, 66, ..., 68, 68, 68], + [ 67, 67, 66, ..., 70, 70, 70]], + + [[143, 143, 142, ..., 28, 28, 29], + [145, 145, 145, ..., 28, 28, 29], + [145, 145, 145, ..., 27, 28, 29], + ..., + [ 75, 75, 76, ..., 80, 81, 81], + [ 75, 76, 75, ..., 80, 80, 80], + [ 77, 76, 76, ..., 82, 82, 82]], + + [[126, 125, 126, ..., 21, 21, 22], + [127, 127, 128, ..., 21, 21, 22], + [127, 127, 126, ..., 21, 21, 22], + ..., + [ 63, 63, 64, ..., 69, 69, 70], + [ 64, 65, 64, ..., 69, 69, 69], + [ 65, 66, 66, ..., 72, 71, 71]]], dtype=torch.uint8), + 'data_samples': + _gt_sem_seg: + )} +``` + +## BaseSegDataset + +由于 MMSegmentation 中的所有数据集的基本功能均包括加载[预处理](https://mmsegmentation.readthedocs.io/en/dev-1.x/advanced_guides/models.html#id2) 之后的数据集的信息, 和将数据送入数据集变换流水线中, 因此在 MMSegmentation 中将其中的共同接口抽象成 [`BaseSegDataset`](https://mmsegmentation.readthedocs.io/en/dev-1.x/api.html?highlight=BaseSegDataset#mmseg.datasets.BaseSegDataset),它继承自 [MMEngine 的 `BaseDataset`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/basedataset.md), 遵循 OpenMMLab 数据集初始化统一流程, 支持高效的内部数据存储格式, 支持数据集拼接、数据集重复采样等功能. +在 MMSegmentation BaseSegDataset 中重新定义了**数据信息加载方法**(`load_data_list`)和并新增了 `get_label_map` 方法用来**修改数据集的类别信息**. + +### 数据信息加载 + +数据信息加载的内容是样本数据的图片路径和标签路径, 具体实现在 MMSegmentation 的 BaseSegDataset 的 [`load_data_list`](https://github.com/open-mmlab/mmsegmentation/blob/163277bfe0fa8fefb63ee5137917fafada1b301c/mmseg/datasets/basesegdataset.py#L231) 中. +主要有两种获取图片和标签的路径方法, 如果当数据集目录按以下目录结构组织, [`load_data_list`](https://github.com/open-mmlab/mmsegmentation/blob/163277bfe0fa8fefb63ee5137917fafada1b301c/mmseg/datasets/basesegdataset.py#L231)) 会根据数据路径和后缀来解析. + +``` +├── data +│ ├── my_dataset +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{img_suffix} +│ │ │ │ ├── yyy{img_suffix} +│ │ │ ├── val +│ │ │ │ ├── zzz{img_suffix} +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{seg_map_suffix} +│ │ │ │ ├── yyy{seg_map_suffix} +│ │ │ ├── val +│ │ │ │ ├── zzz{seg_map_suffix} +``` + +例如 ADE20k 数据集结构如下所示: + +``` +├── ade +│ ├── ADEChallengeData2016 +│ │ ├── annotations +│ │ │ ├── training +│ │ │ │ ├── ADE_train_00000001.png +│ │ │ │ ├── ... +│ │ │ │── validation +│ │ │ │ ├── ADE_val_00000001.png +│ │ │ │ ├── ... +│ │ ├── images +│ │ │ ├── training +│ │ │ │ ├── ADE_train_00000001.jpg +│ │ │ │ ├── ... +│ │ │ ├── validation +│ │ │ │ ├── ADE_val_00000001.jpg +│ │ │ │ ├── ... +``` + +实例化 ADE20k 数据集时,输入图片和标签的路径和后缀: + +```python +from mmseg.datasets import ADE20KDataset + +ADE20KDataset(data_root = 'data/ade/ADEChallengeData2016', + data_prefix=dict(img_path='images/training', seg_map_path='annotations/training'), + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True) +``` + +如果数据集有标注文件, 实例化数据集时会根据输入的数据集标注文件加载数据信息. 例如, PascalContext 数据集实例, 输入标注文件的内容为: + +```python +2008_000008 +... +``` + +实例化时需要定义 `ann_file` + +```python +PascalContextDataset(data_root='data/VOCdevkit/VOC2010/', + data_prefix=dict(img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/train.txt') +``` + +### 数据集类别修改 + +- 通过输入 metainfo 修改 + `BaseSegDataset` 的子类元信息在数据集实现时定义为类变量,例如 Cityscapes 的 `METAINFO` 变量: + +```python +class CityscapesDataset(BaseSegDataset): + """Cityscapes dataset. + + The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is + fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. + """ + METAINFO = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', + 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', + 'motorcycle', 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, + 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], + [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]) + +``` + +这里的 `'classes'` 中定义了 Cityscapes 数据集标签中的类别名, 如果训练时只关注几个交通工具类别, **忽略其他类别**, +在实例化 Cityscapes 数据集时通过定义 `metainfo` 输入参数的 classes 的字段来修改数据集的元信息: + +```python +from mmseg.datasets import CityscapesDataset + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +# metainfo 中只保留以下 classes +metainfo=dict(classes=( 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')) +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, metainfo=metainfo) + +print(dataset.metainfo) + +{'classes': ('car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle'), + 'palette': [[0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32], + [128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [70, 130, 180], + [220, 20, 60], + [255, 0, 0]], + # 类别索引为 255 的像素,在计算损失时会被忽略 + 'label_map': {0: 255, + 1: 255, + 2: 255, + 3: 255, + 4: 255, + 5: 255, + 6: 255, + 7: 255, + 8: 255, + 9: 255, + 10: 255, + 11: 255, + 12: 255, + 13: 0, + 14: 1, + 15: 2, + 16: 3, + 17: 4, + 18: 5}, + 'reduce_zero_label': False} +``` + +可以看到, 数据集元信息的类别和默认 Cityscapes 不同. 并且, 定义了标签重映射的字段 `label_map` 用来修改每个分割掩膜上的像素的类别索引, 分割标签类别会根据 `label_map`, 将类别重映射, [具体实现](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/datasets/basesegdataset.py#L151): + +```python +gt_semantic_seg_copy = gt_semantic_seg.copy() +for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id +``` + +- 通过 `reduce_zero_label` 修改 + 对于常见的忽略 0 号标签的场景, `BaseSegDataset` 的子类中可以用 `reduce_zero_label` 输入参数来控制。`reduce_zero_label` (默认为 `False`) + 用来控制是否将标签 0 忽略, 当该参数为 `True` 时(最常见的应用是 ADE20k 数据集), 对分割标签中第 0 个类别对应的类别索引改为 255 (MMSegmentation 模型中计算损失时, 默认忽略 255), 其他类别对应的类别索引减一: + +```python +gt_semantic_seg[gt_semantic_seg == 0] = 255 +gt_semantic_seg = gt_semantic_seg - 1 +gt_semantic_seg[gt_semantic_seg == 254] = 255 +``` + +## 数据集与数据变换流水线 + +在常用接口的例子中可以看到, 输入的参数中定义了数据变换流水线参数 `pipeline`, 数据集 `__getitem__` 方法返回经过数据变换的值. +当数据集输入参数没有定义 pipeline, 返回值和 `get_data_info` 方法返回值相同, 例如: + +```python +from mmseg.datasets import CityscapesDataset + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, test_mode=False) + +print(dataset[0]) + +{'img_path': 'data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png', + 'seg_map_path': 'data/cityscapes/gtFine/train/aachen/aachen_000000_000019_gtFine_labelTrainIds.png', + 'label_map': None, + 'reduce_zero_label': False, + 'seg_fields': [], + 'sample_idx': 0} +``` diff --git a/docs/zh_cn/advanced_guides/engine.md b/docs/zh_cn/advanced_guides/engine.md new file mode 100644 index 0000000000..f67a787597 --- /dev/null +++ b/docs/zh_cn/advanced_guides/engine.md @@ -0,0 +1,163 @@ +# 训练引擎 + +## 钩子 (Hook) + +### 介绍 + +OpenMMLab 将模型训练和测试过程抽象为 `Runner`, 插入钩子可以实现在 `Runner` 中不同的训练和测试节点 (例如 "每个训练 iter 前后", "每个验证 iter 前后" 等不同阶段) 所需要的相应功能. 更多钩子机制的介绍可以参考[这里](https://www.calltutors.com/blog/what-is-hook). + +`Runner` 中所使用的钩子分为两类: + +- 默认钩子 (default hooks) + +它们实现了训练时所必需的功能,在配置文件中用 `default_hooks` 定义传给 `Runner`, `Runner` 通过 [`register_default_hooks`](https://github.com/open-mmlab/mmengine/blob/090104df21acd05a8aadae5a0d743a7da3314f6f/mmengine/runner/runner.py#L1780) 方法注册. +钩子有对应的优先级, 优先级越高, 越早被执行器调用. 如果优先级一样, 被调用的顺序和钩子注册的顺序一致. +不建议用户修改默认钩子的优先级,可以参考 [mmengine hooks 文档](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/hook.md) 了解钩子优先级的定义. +下面是 MMSegmentation 中所用到的默认钩子: + +| 钩子 | 用法 | 优先级 | +| :-----------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------: | :---------------: | +| [IterTimerHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/iter_timer_hook.py) | 记录 iteration 花费的时间. | NORMAL (50) | +| [LoggerHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/logger_hook.py) | 从 `Runner` 里不同的组件中收集日志记录,并将其输出到终端, JSON 文件,tensorboard,wandb 等下游. | BELOW_NORMAL (60) | +| [ParamSchedulerHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/param_scheduler_hook.py) | 更新优化器里面的一些超参数,例如学习率的动量. | LOW (70) | +| [CheckpointHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py) | 规律性地保存 checkpoint 文件. | VERY_LOW (90) | +| [DistSamplerSeedHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/sampler_seed_hook.py) | 确保分布式采样器 shuffle 是打开的. | NORMAL (50) | +| [SegVisualizationHook](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/visualization/local_visualizer.py) | 可视化验证和测试过程里的预测结果. | NORMAL (50) | + +它们在配置文件中的配置为: + +```python +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=32000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) +``` + +以上默认钩子除 `SegVisualizationHook` 外都是在 MMEngine 中所实现, `SegVisualizationHook` 是在 MMSegmentation 里被实现的钩子, 之后会专门介绍. + +- 自定义钩子 (custom hooks) + +自定义钩子在配置通过 `custom_hooks` 定义, `Runner` 通过 [`register_custom_hooks`](https://github.com/open-mmlab/mmengine/blob/090104df21acd05a8aadae5a0d743a7da3314f6f/mmengine/runner/runner.py#L1852) 方法注册. +自定义钩子优先级需要在配置文件里设置, 如果没有设置, 则会被默认设置为 `NORMAL`. 下面是部分 MMEngine 中实现的自定义钩子: + +| 钩子 | 用法 | +| :----------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| [EMAHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/ema_hook.py) | 在模型训练时使用指数滑动平均 (Exponential Moving Average, EMA). | +| [EmptyCacheHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/empty_cache_hook.py) | 在训练时释放所有没有被缓存占用的 GPU 显存. | +| [SyncBuffersHook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/sync_buffer_hook.py) | 在每个训练 Epoch 结束时同步模型 buffer 里的参数例如 BN 里的 `running_mean` 和 `running_var`. | + +以下是 `EMAHook` 的用例, 配置文件中, 将已经实现的自定义钩子的配置作为 `custom_hooks` 列表中的成员. + +```python +custom_hooks = [ + dict(type='EMAHook', start_iters=500, priority='NORMAL') +] +``` + +### SegVisualizationHook + +MMSegmentation 实现了 [`SegVisualizationHook`](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/engine/hooks/visualization_hook.py#L17), 用来在验证和测试时可视化预测结果. +`SegVisualizationHook` 重写了基类 `Hook` 中的 `_after_iter` 方法, 在验证或测试时, 根据指定的迭代次数间隔调用 `visualizer` 的 `add_datasample` 方法绘制语义分割结果,具体实现如下: + +```python +... +@HOOKS.register_module() +class SegVisualizationHook(Hook): +... + def _after_iter(self, + runner: Runner, + batch_idx: int, + data_batch: dict, + outputs: Sequence[SegDataSample], + mode: str = 'val') -> None: +... + # 如果是训练阶段或者 self.draw 为 False 则直接跳出 + if self.draw is False or mode == 'train': + return +... + if self.every_n_inner_iters(batch_idx, self.interval): + for output in outputs: + img_path = output.img_path + img_bytes = self.file_client.get(img_path) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + window_name = f'{mode}_{osp.basename(img_path)}' + + self._visualizer.add_datasample( + window_name, + img, + data_sample=output, + show=self.show, + wait_time=self.wait_time, + step=runner.iter) + +``` + +关于可视化更多的细节可以查看[这里](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/visualization.md). + +## 优化器 + +### 优化器封装 + +OpenMMLab 2.0 设计了优化器封装, 它支持不同的训练策略, 包括混合精度训练、梯度累加和梯度截断等, 用户可以根据需求选择合适的训练策略. +优化器封装还定义了一套标准的参数更新流程, 用户可以基于这一套流程, 在同一套代码里, 实现不同训练策略的切换. 如果想了解更多, 可以参考 [MMEngine 优化器封装文档](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/optim_wrapper.md). + +MMSegmenetation 训练模型也是使用优化器封装来优化参数, 以下是 MMSegmentation 中常用的使用方法: + +#### 配置 PyTorch 支持的优化器 + +OpenMMLab 2.0 支持 PyTorch 原生所有优化器, 参考[这里](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/optim_wrapper.md#%E7%AE%80%E5%8D%95%E9%85%8D%E7%BD%AE). +在配置文件中设置训练时 `Runner` 所使用的优化器, 需要定义 `optim_wrapper`, 例如配置使用 SGD 优化器: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005), + clip_grad=None) +``` + +#### `paramwise_cfg` 参数 + +在模型训练中, 如果想在优化器里为不同参数设置优化策略, 例如设置不同的学习率、权重衰减, 可以通过设置 `paramwise_cfg` 来实现. + +例如, 在使用 ViT 作为模型骨干网络进行训练时, 优化器中设置了权重衰减 (weight decay), 但对 position embedding, layer normalization 和 class token 参数需要关掉 weight decay, `optim_wrapper` 的配置[如下](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/configs/vit/vit_vit-b16-ln_mln_upernet_8xb2-160k_ade20k-512x512.py#L15-L27): + +```python +optimizer = dict( + type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01), +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) +``` + +其中 `decay_mult` 指的是对应参数的权重衰减的系数. 关于更多 `paramwise_cfg` 的使用可以参考 [MMEngine 文档](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/optim_wrapper.md). + +### 优化器封装构造器 + +默认的优化器封装构造器 [`DefaultOptimWrapperConstructor`](https://github.com/open-mmlab/mmengine/blob/376251961da47ea8254ab808ae5c51e1430f18dc/mmengine/optim/optimizer/default_constructor.py#L19) 根据输入的 `optim_wrapper` 和 `optim_wrapper` 中定义的 `paramwise_cfg` 来构建训练中使用的优化器. 当 [`DefaultOptimWrapperConstructor`](https://github.com/open-mmlab/mmengine/blob/376251961da47ea8254ab808ae5c51e1430f18dc/mmengine/optim/optimizer/default_constructor.py#L19) 功能不能满足需求时, 可以自定义优化器封装构造器来实现超参数的配置. + +MMSegmentation 中的实现了 [`LearningRateDecayOptimizerConstructor`](https://github.com/open-mmlab/mmsegmentation/blob/b21df463d47447f33c28d9a4f46136ad64d34a40/mmseg/engine/optimizers/layer_decay_optimizer_constructor.py#L104), 可以对以 ConvNeXt, BEiT 和 MAE 为骨干网络的模型训练时, 骨干网络的模型参数的学习率按照定义的衰减比例(`decay_rate`)逐层递减, 在配置文件中的配置如下: + +```python +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }, + constructor='LearningRateDecayOptimizerConstructor', + loss_scale='dynamic') +``` diff --git a/docs/zh_cn/advanced_guides/evaluation.md b/docs/zh_cn/advanced_guides/evaluation.md new file mode 100644 index 0000000000..d07fcf1048 --- /dev/null +++ b/docs/zh_cn/advanced_guides/evaluation.md @@ -0,0 +1 @@ +# 模型评测 diff --git a/docs/zh_cn/advanced_guides/index.rst b/docs/zh_cn/advanced_guides/index.rst new file mode 100644 index 0000000000..b706742603 --- /dev/null +++ b/docs/zh_cn/advanced_guides/index.rst @@ -0,0 +1,26 @@ +基本概念 +*************** + +.. toctree:: + :maxdepth: 1 + + data_flow.md + structures.md + models.md + datasets.md + transforms.md + evaluation.md + engine.md + training_tricks.md + +自定义组件 +************************ + +.. toctree:: + :maxdepth: 1 + + add_modules.md + add_datasets.md + add_transforms.md + add_metrics.md + customize_runtime.md diff --git a/docs/zh_cn/advanced_guides/models.md b/docs/zh_cn/advanced_guides/models.md new file mode 100644 index 0000000000..bebf4ef44f --- /dev/null +++ b/docs/zh_cn/advanced_guides/models.md @@ -0,0 +1 @@ +# 模型 diff --git a/docs/zh_cn/advanced_guides/structures.md b/docs/zh_cn/advanced_guides/structures.md new file mode 100644 index 0000000000..958e011a7b --- /dev/null +++ b/docs/zh_cn/advanced_guides/structures.md @@ -0,0 +1,102 @@ +# 数据结构 + +为了统一模型和各功能模块之间的输入和输出的接口, 在 OpenMMLab 2.0 MMEngine 中定义了一套抽象数据结构, 实现了基础的增/删/查/改功能, 支持不同设备间的数据迁移, 也支持了如 +`.cpu()`, `.cuda()`, `.get()` 和 `.detach()` 的类字典和张量的操作。具体可以参考 [MMEngine 文档](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/data_element.md)。 + +同样的, MMSegmentation 亦遵循了 OpenMMLab 2.0 各模块间的接口协议, 定义了 `SegDataSample` 用来封装语义分割任务所需要的数据。 + +## 语义分割数据 SegDataSample + +[SegDataSample](mmseg.structures.SegDataSample) 包括了三个主要数据字段 `gt_sem_seg`, `pred_sem_seg` 和 `seg_logits`, 分别用来存放标注信息, 预测结果和预测的未归一化前的 logits 值。 + +| 字段 | 类型 | 描述 | +| -------------- | ------------------------- | ------------------------------- | +| gt_sem_seg | [`PixelData`](#pixeldata) | 图像标注信息. | +| pred_instances | [`PixelData`](#pixeldata) | 图像预测结果. | +| seg_logits | [`PixelData`](#pixeldata) | 模型预测未归一化前的 logits 值. | + +以下示例代码展示了 `SegDataSample` 的使用方法: + +```python +import torch +from mmengine.structures import PixelData +from mmseg.structures import SegDataSample + +img_meta = dict(img_shape=(4, 4, 3), + pad_shape=(4, 4, 3)) +data_sample = SegDataSample() +# 定义 gt_segmentations 用于封装模型的输出信息 +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) + +# 增加和处理 SegDataSample 中的属性 +data_sample.gt_sem_seg = gt_segmentations +assert 'gt_sem_seg' in data_sample +assert 'data' in data_sample.gt_sem_seg +assert 'img_shape' in data_sample.gt_sem_seg.metainfo_keys() +print(data_sample.gt_sem_seg.shape) +''' +(4, 4) +''' +print(data_sample) +''' + +) at 0x1c2aae44d60> +''' + +# 删除和修改 SegDataSample 中的属性 +data_sample = SegDataSample() +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) +data_sample.gt_sem_seg = gt_segmentations +data_sample.gt_sem_seg.set_metainfo(dict(img_shape=(4,4,9), pad_shape=(4,4,9))) +del data_sample.gt_sem_seg.img_shape + +# 类张量的操作 +data_sample = SegDataSample() +gt_segmentations = PixelData(metainfo=img_meta) +gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) +cuda_gt_segmentations = gt_segmentations.cuda() +cuda_gt_segmentations = gt_segmentations.to('cuda:0') +cpu_gt_segmentations = cuda_gt_segmentations.cpu() +cpu_gt_segmentations = cuda_gt_segmentations.to('cpu') +``` + +## 在 SegDataSample 中自定义新的属性 + +如果你想在 `SegDataSample` 中自定义新的属性,你可以参考下面的 [SegDataSample](https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/structures/seg_data_sample.py) 示例: + +```python +class SegDataSample(BaseDataElement): + ... + + @property + def xxx_property(self) -> xxxData: + return self._xxx_property + + @xxx_property.setter + def xxx_property(self, value: xxxData) -> None: + self.set_field(value, '_xxx_property', dtype=xxxData) + + @xxx_property.deleter + def xxx_property(self) -> None: + del self._xxx_property +``` + +这样一个新的属性 `xxx_property` 就将被增加到 `SegDataSample` 里面了。 diff --git a/docs/zh_cn/advanced_guides/training_tricks.md b/docs/zh_cn/advanced_guides/training_tricks.md new file mode 100644 index 0000000000..a33c0ea9cf --- /dev/null +++ b/docs/zh_cn/advanced_guides/training_tricks.md @@ -0,0 +1,95 @@ +# 训练技巧(待更新) + +MMSegmentation 支持如下训练技巧: + +## 主干网络和解码头组件使用不同的学习率 (Learning Rate, LR) + +在语义分割里,一些方法会让解码头组件的学习率大于主干网络的学习率,这样可以获得更好的表现或更快的收敛。 + +在 MMSegmentation 里面,您也可以在配置文件里添加如下行来让解码头组件的学习率是主干组件的10倍。 + +```python +optimizer=dict( + paramwise_cfg = dict( + custom_keys={ + 'head': dict(lr_mult=10.)})) +``` + +通过这种修改,任何被分组到 `'head'` 的参数的学习率都将乘以10。您也可以参照 [MMCV 文档](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.DefaultOptimizerConstructor) 获取更详细的信息。 + +## 在线难样本挖掘 (Online Hard Example Mining, OHEM) + +对于训练时采样,我们在 [这里](https://github.com/open-mmlab/mmsegmentation/tree/master/mmseg/core/seg/sampler) 做了像素采样器。 +如下例子是使用 PSPNet 训练并采用 OHEM 策略的配置: + +```python +_base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' +model=dict( + decode_head=dict( + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=100000)) ) +``` + +通过这种方式,只有置信分数在0.7以下的像素值点会被拿来训练。在训练时我们至少要保留100000个像素值点。如果 `thresh` 并未被指定,前 `min_kept` +个损失的像素值点才会被选择。 + +## 类别平衡损失 (Class Balanced Loss) + +对于不平衡类别分布的数据集,您也许可以改变每个类别的损失权重。这里以 cityscapes 数据集为例: + +```python +_base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' +model=dict( + decode_head=dict( + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, + # DeepLab 对 cityscapes 使用这种权重 + class_weight=[0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754, + 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, + 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]))) +``` + +`class_weight` 将被作为 `weight` 参数,传递给 `CrossEntropyLoss`。详细信息请参照 [PyTorch 文档](https://pytorch.org/docs/stable/nn.html?highlight=crossentropy#torch.nn.CrossEntropyLoss) 。 + +## 同时使用多种损失函数 (Multiple Losses) + +对于训练时损失函数的计算,我们目前支持多个损失函数同时使用。 以 `unet` 使用 `DRIVE` 数据集训练为例, +使用 `CrossEntropyLoss` 和 `DiceLoss` 的 `1:3` 的加权和作为损失函数。配置文件写为: + +```python +_base_ = './fcn_unet_s5-d16_64x64_40k_drive.py' +model = dict( + decode_head=dict(loss_decode=[dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]), + auxiliary_head=dict(loss_decode=[dict(type='CrossEntropyLoss', loss_name='loss_ce',loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]), + ) +``` + +通过这种方式,确定训练过程中损失函数的权重 `loss_weight` 和在训练日志里的名字 `loss_name`。 + +注意: `loss_name` 的名字必须带有 `loss_` 前缀,这样它才能被包括在反传的图里。 + +## 在损失函数中忽略特定的 label 类别 + +默认设置 `avg_non_ignore=False`, 即每个像素都用来计算损失函数。尽管其中的一些像素属于需要被忽略的类别。 + +对于训练时损失函数的计算,我们目前支持使用 `avg_non_ignore` 和 `ignore_index` 来忽略 label 特定的类别。 这样损失函数将只在非忽略类别像素中求平均值,会获得更好的表现。这里是[相关 PR](https://github.com/open-mmlab/mmsegmentation/pull/1409)。以 `unet` 使用 `Cityscapes` 数据集训练为例, +在计算损失函数时,忽略 label 为0的背景,并且仅在不被忽略的像素上计算均值。配置文件写为: + +```python +_base_ = './fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py' +model = dict( + decode_head=dict( + ignore_index=0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, avg_non_ignore=True), + auxiliary_head=dict( + ignore_index=0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, avg_non_ignore=True)), + )) +``` + +通过这种方式,确定训练过程中损失函数的权重 `loss_weight` 和在训练日志里的名字 `loss_name`。 + +注意: `loss_name` 的名字必须带有 `loss_` 前缀,这样它才能被包括在反传的图里。 diff --git a/docs/zh_cn/advanced_guides/transforms.md b/docs/zh_cn/advanced_guides/transforms.md new file mode 100644 index 0000000000..c96b688bd9 --- /dev/null +++ b/docs/zh_cn/advanced_guides/transforms.md @@ -0,0 +1 @@ +# 数据增广 diff --git a/docs/zh_cn/api.rst b/docs/zh_cn/api.rst new file mode 100644 index 0000000000..be68c7579d --- /dev/null +++ b/docs/zh_cn/api.rst @@ -0,0 +1,104 @@ +mmseg.apis +-------------- +.. automodule:: mmseg.apis + :members: + +mmseg.datasets +-------------- + +datasets +^^^^^^^^^^ +.. automodule:: mmseg.datasets + :members: + +transforms +^^^^^^^^^^ +.. automodule:: mmseg.datasets.transforms + :members: + +mmseg.engine +-------------- + +hooks +^^^^^^^^^^ +.. automodule:: mmseg.engine.hooks + :members: + +optimizers +^^^^^^^^^^ +.. automodule:: mmseg.engine.optimizers + :members: + +mmseg.evaluation +-------------- + +metrics +^^^^^^^^^^ +.. automodule:: mmseg.evaluation.metrics + :members: + +mmseg.models +-------------- + +models +^^^^^^^^^^ +.. automodule:: mmseg.models + :members: + +segmentors +^^^^^^^^^^ +.. automodule:: mmseg.models.segmentors + :members: + +backbones +^^^^^^^^^^ +.. automodule:: mmseg.models.backbones + :members: + +decode_heads +^^^^^^^^^^^^ +.. automodule:: mmseg.models.decode_heads + :members: + +losses +^^^^^^^^^^ +.. automodule:: mmseg.models.losses + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmseg.models.utils + :members: + +necks +^^^^^^^^^^ +.. automodule:: mmseg.models.necks + :members: + +mmseg.registry +-------------- +.. automodule:: mmseg.registry + :members: + +mmseg.structures +-------------- + +structures +^^^^^^^^^^ +.. automodule:: mmseg.structures + :members: + +sampler +^^^^^^^^^^ +.. automodule:: mmseg.structures.sampler + :members: + +mmseg.utils +-------------- +.. automodule:: mmseg.utils + :members: + +mmseg.visualization +-------------- +.. automodule:: mmseg.visualization + :members: diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py new file mode 100644 index 0000000000..18420558dc --- /dev/null +++ b/docs/zh_cn/conf.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMSegmentation' +copyright = '2020-2021, OpenMMLab' +author = 'MMSegmentation Authors' +version_file = '../../mmseg/version.py' + + +def get_version(): + with open(version_file) as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' +] + +autodoc_mock_imports = [ + 'matplotlib', 'pycocotools', 'mmseg.version', 'mmcv.ops' +] + +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + 'logo_url': + 'https://mmsegmentation.readthedocs.io/zh-CN/latest/', + 'menu': [ + { + 'name': + '教程', + 'url': + 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + 'demo/MMSegmentation_Tutorial.ipynb' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmsegmentation' + }, + { + 'name': + '上游库', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': '基础视觉库' + }, + ] + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'cn', +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] + +language = 'zh-CN' + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md new file mode 100644 index 0000000000..fff70a2560 --- /dev/null +++ b/docs/zh_cn/get_started.md @@ -0,0 +1,202 @@ +# 开始:安装和运行 MMSeg + +## 预备知识 + +本教程中,我们将会演示如何使用 PyTorch 准备环境。 + +MMSegmentation 可以在 Linux, Windows 和 macOS 系统上运行,并且需要安装 Python 3.6+, CUDA 9.2+ 和 PyTorch 1.5+ + +**注意:** +如果您已经安装了 PyTorch, 可以跳过该部分,直接到[下一小节](##安装)。否则,您可以按照以下步骤操作。 + +**步骤 0.** 从[官方网站](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda + +**步骤 1.** 创建一个 conda 环境,并激活 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 2.** 参考 [official instructions](https://pytorch.org/get-started/locally/) 安装 PyTorch + +在 GPU 平台上: + +```shell +conda install pytorch torchvision -c pytorch +``` + +在 CPU 平台上 + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +## 安装 + +我们建议用户遵循我们的最佳实践来安装 MMSegmentation 。但是整个过程是高度自定义的。更多信息请参见[自定义安装](#自定义安装)部分。 + +### 最佳实践 + +**步骤 0.** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMCV](https://github.com/open-mmlab/mmcv) + +```shell +pip install -U openmim +mim install mmengine +mim install "mmcv>=2.0.0rc1" +``` + +**步骤 1.** 安装 MMSegmentation + +情况 a: 如果您想立刻开发和运行 mmsegmentation,您可通过源码安装: + +```shell +git clone -b dev-1.x https://github.com/open-mmlab/mmsegmentation.git +cd mmsegmentation +pip install -v -e . +# '-v' 表示详细模式,更多的输出 +# '-e' 表示以可编辑模式安装工程, +# 因此对代码所做的任何修改都生效,无需重新安装 +``` + +情况 b: 如果您把 mmsegmentation 作为依赖库或者第三方库,可以通过 pip 安装: + +```shell +pip install "mmsegmentation>=1.0.0rc0" +``` + +### 验证是否安装成功 + +为了验证 MMSegmentation 是否正确安装,我们提供了一些示例代码来运行一个推理 demo 。 + +**步骤 1.** 下载配置文件和模型文件 + +```shell +mim download mmsegmentation --config pspnet_r50-d8_4xb2-40k_cityscapes-512x1024 --dest . +``` + +该下载过程可能需要花费几分钟,这取决于您的网络环境。当下载结束,您将看到以下两个文件在您当前工作目录:`pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py` 和 `pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth` + +**步骤 2.** 验证推理 demo + +选项 (a). 如果您通过源码安装了 mmsegmentation,运行以下命令即可: + +```shell +python demo/image_demo.py demo/demo.png configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth --device cuda:0 --out-file result.jpg +``` + +您将在当前文件夹中看到一个新图像 `result.jpg`,其中所有目标都覆盖了分割 mask + +选项 (b). 如果您通过 pip 安装 mmsegmentation, 打开您的 python +解释器,复制粘贴以下代码: + +```python +from mmseg.apis import inference_model, init_model, show_result_pyplot +from mmseg.utils import register_all_modules +import mmcv + +register_all_modules() +config_file = 'pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py' +checkpoint_file = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' + +# 根据配置文件和模型文件建立模型 +model = init_model(config_file, checkpoint_file, device='cuda:0') + +# 在单张图像上测试并可视化 +img = 'demo/demo.png' # or img = mmcv.imread(img), 这样仅需下载一次 +result = inference_model(model, img) +# 在新的窗口可视化结果 +show_result_pyplot(model, img, result, show=True) +# 或者将可视化结果保存到图像文件夹中 +# 您可以修改分割 map 的透明度 (0, 1]. +show_result_pyplot(model, img, result, show=True, out_file='result.jpg', opacity=0.5) +# 在一段视频上测试并可视化分割结果 +video = mmcv.VideoReader('video.mp4') +for frame in video: + result = inference_segmentor(model, frame) + show_result_pyplot(model, result, wait_time=1) +``` + +您可以修改上面的代码来测试单个图像或视频,这两个选项都可以验证安装是否成功。 + +### 自定义安装 + +#### CUDA 版本 + +当安装 PyTorch 的时候,您需要指定 CUDA 的版本, 如果您不确定选择哪个版本,请遵循我们的建议: + +- 对于基于 Ampere 的 NVIDIA GPUs, 例如 GeForce 30 系列和 NVIDIA A100, 必须要求是 CUDA 11. +- 对于更老的 NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 提供了更好的兼容性,以及更加的轻量化 + +请确保 GPU 驱动满足最小的版本需求。详情请参考这个[表格](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) + +**注意:** +如果您按照我们的最佳实践,安装 CUDA 运行库就足够了,因为不需要 CUDA 代码在本地编译。 但是如果您希望从源码编译 MMCV 或者需要开发其他的 CUDA 算子,您需要从 NVIDIA 的[官网](https://developer.nvidia.com/cuda-downloads)安装完整的 CUDA 工具,同时它的版本需要与 PyTorch 的 CUDA 版本匹配。即 `conda install` 命令中指定的 cudatoolkit 版本。 + +#### 不使用 MIM 安装 MMCV + +MMCV 包含 C++ 和 CUDA 扩展,因此与 PyTorch 的依赖方式比较复杂。MIM 自动解决了这种依赖关系,使安装更容易。然而,MIM 也并不是必须的。 + +为了使用 pip 而不是 MIM 安装 MMCV, 请参考 [MMCV 安装指南](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). 这需要手动指定一个基于 PyTorch 版本及其 CUDA 版本的 find-url. + +例如,以下命令可为 PyTorch 1.10.x and CUDA 11.3 安装 mmcv==2.0.0rc1 + +```shell +pip install mmcv==2.0.0rc1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +#### 在仅有 CPU 的平台安装 + +MMSegmentation 可以在仅有 CPU 的版本上运行。在 CPU 模式,您可以训练(需要 MMCV-Lite 版本 >= 2.0.0rc0),测试和推理模型。 + +#### 在 Google Colab 上安装 + +[Google Colab](https://research.google.com/) 通常已经安装了 PyTorch,因此我们仅需要通过以下命令安装 MMCV 和 MMSegmentation。 + +**步骤 1.** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMCV](https://github.com/open-mmlab/mmcv) + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.0rc1" +``` + +**Step 2.** 通过源码安装 MMSegmentation + +```shell +!git clone https://github.com/open-mmlab/mmsegmentation.git +%cd mmsegmentation +!git checkout dev-1.x +!pip install -e . +``` + +**Step 3.** 验证 + +```python +import mmseg +print(mmseg.__version__) +# 示例输出: 1.0.0rc0 +``` + +**注意:** +在 Jupyter 中, 感叹号 `!` 用于调用外部可执行命令,`%cd` 是一个 [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) 可以改变当前 python 的工作目录。 + +### 通过 Docker 使用 MMSegmentation + +我们提供了一个 [Dockerfile](https://github.com/open-mmlab/mmsegmentation/blob/master/docker/Dockerfile) 来建立映像。确保您的 [docker 版本](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# 通过 PyTorch 1.11, CUDA 11.3 建立映像 +# 如果您使用其他版本,修改 Dockerfile 即可 +docker build -t mmsegmentation docker/ +``` + +运行: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmsegmentation/data mmsegmentation +``` + +## 问题解答 + +如果您在安装过程中遇到了其他问题,请第一时间查阅 [FAQ](faq.md) 文件。如果没有找到答案,您也可以在 GitHub 上提出 [issue](https://github.com/open-mmlab/mmsegmentation/issues/new/choose) diff --git a/docs/zh_cn/imgs/qq_group_qrcode.jpg b/docs/zh_cn/imgs/qq_group_qrcode.jpg new file mode 100644 index 0000000000..417347449f Binary files /dev/null and b/docs/zh_cn/imgs/qq_group_qrcode.jpg differ diff --git a/docs/zh_cn/imgs/seggroup_qrcode.jpg b/docs/zh_cn/imgs/seggroup_qrcode.jpg new file mode 100644 index 0000000000..9684582ee1 Binary files /dev/null and b/docs/zh_cn/imgs/seggroup_qrcode.jpg differ diff --git a/docs/zh_cn/imgs/zhihu_qrcode.jpg b/docs/zh_cn/imgs/zhihu_qrcode.jpg new file mode 100644 index 0000000000..c745fb027f Binary files /dev/null and b/docs/zh_cn/imgs/zhihu_qrcode.jpg differ diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst new file mode 100644 index 0000000000..e66c178689 --- /dev/null +++ b/docs/zh_cn/index.rst @@ -0,0 +1,57 @@ +欢迎来到 MMSegmentation 的文档! +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: 开始你的第一步 + + get_started.md + +.. toctree:: + :maxdepth: 2 + :caption: 用户指南 + + user_guides/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: 进阶指南 + + advanced_guides/index.rst + +.. toctree:: + :maxdepth: 1 + :caption: 迁移指引 + + migration.md + +.. toctree:: + :caption: 接口文档(英文) + + api.rst + +.. toctree:: + :maxdepth: 1 + :caption: 模型库 + + model_zoo.md + modelzoo_statistics.md + +.. toctree:: + :maxdepth: 2 + :caption: 说明 + + changelog.md + faq.md + +.. toctree:: + :caption: 语言切换 + + switch_language.md + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_cn/make.bat b/docs/zh_cn/make.bat new file mode 100644 index 0000000000..922152e96a --- /dev/null +++ b/docs/zh_cn/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/zh_cn/migration.md b/docs/zh_cn/migration.md new file mode 100644 index 0000000000..5168bb0bd5 --- /dev/null +++ b/docs/zh_cn/migration.md @@ -0,0 +1 @@ +# 迁移文档 diff --git a/docs/zh_cn/model_zoo.md b/docs/zh_cn/model_zoo.md new file mode 100644 index 0000000000..bd5721579f --- /dev/null +++ b/docs/zh_cn/model_zoo.md @@ -0,0 +1,152 @@ +# 标准与模型库 + +## 共同设定 + +- 我们默认使用 4 卡分布式训练 +- 所有 PyTorch 风格的 ImageNet 预训练网络由我们自己训练,和 [论文](https://arxiv.org/pdf/1812.01187.pdf) 保持一致。 + 我们的 ResNet 网络是基于 ResNetV1c 的变种,在这里输入层的 7x7 卷积被 3个 3x3 取代 +- 为了在不同的硬件上保持一致,我们以 `torch.cuda.max_memory_allocated()` 的最大值作为 GPU 占用率,同时设置 `torch.backends.cudnn.benchmark=False`。 + 注意,这通常比 `nvidia-smi` 显示的要少 +- 我们以网络 forward 和后处理的时间加和作为推理时间,除去数据加载时间。我们使用脚本 `tools/benchmark.py` 来获取推理时间,它在 `torch.backends.cudnn.benchmark=False` 的设定下,计算 200 张图片的平均推理时间 +- 在框架中,有两种推理模式 + - `slide` 模式(滑动模式):测试的配置文件字段 `test_cfg` 会是 `dict(mode='slide', crop_size=(769, 769), stride=(513, 513))`. + 在这个模式下,从原图中裁剪多个小图分别输入网络中进行推理。小图的大小和小图之间的距离由 `crop_size` 和 `stride` 决定,重合区域会进行平均 + - `whole` 模式 (全图模式):测试的配置文件字段 `test_cfg` 会是 `dict(mode='whole')`. 在这个模式下,全图会被直接输入到网络中进行推理。 + 对于 769x769 下训练的模型,我们默认使用 `slide` 进行推理,其余模型用 `whole` 进行推理 +- 对于输入大小为 8x+1 (比如769),我们使用 `align_corners=True`。其余情况,对于输入大小为 8x (比如 512,1024),我们使用 `align_corners=False` + +## 基线 + +### FCN + +请参考 [FCN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn) 获得详细信息。 + +### PSPNet + +请参考 [PSPNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet) 获得详细信息。 + +### DeepLabV3 + +请参考 [DeepLabV3](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3) 获得详细信息。 + +### PSANet + +请参考 [PSANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet) 获得详细信息。 + +### DeepLabV3+ + +请参考 [DeepLabV3+](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus) 获得详细信息。 + +### UPerNet + +请参考 [UPerNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet) 获得详细信息。 + +### NonLocal Net + +请参考 [NonLocal Net](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nlnet) 获得详细信息。 + +### EncNet + +请参考 [EncNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet) 获得详细信息。 + +### CCNet + +请参考 [CCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet) 获得详细信息。 + +### DANet + +请参考 [DANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet) 获得详细信息。 + +### APCNet + +请参考 [APCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet) 获得详细信息。 + +### HRNet + +请参考 [HRNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet) 获得详细信息。 + +### GCNet + +请参考 [GCNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet) 获得详细信息。 + +### DMNet + +请参考 [DMNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet) 获得详细信息。 + +### ANN + +请参考 [ANN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann) 获得详细信息。 + +### OCRNet + +请参考 [OCRNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet) 获得详细信息。 + +### Fast-SCNN + +请参考 [Fast-SCNN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn) 获得详细信息。 + +### ResNeSt + +请参考 [ResNeSt](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest) 获得详细信息。 + +### Semantic FPN + +请参考 [Semantic FPN](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/semfpn) 获得详细信息。 + +### PointRend + +请参考 [PointRend](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend) 获得详细信息。 + +### MobileNetV2 + +请参考 [MobileNetV2](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2) 获得详细信息。 + +### MobileNetV3 + +请参考 [MobileNetV3](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3) 获得详细信息。 + +### EMANet + +请参考 [EMANet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet) 获得详细信息。 + +### DNLNet + +请参考 [DNLNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet) 获得详细信息。 + +### CGNet + +请参考 [CGNet](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet) 获得详细信息。 + +### Mixed Precision (FP16) Training + +请参考 [Mixed Precision (FP16) Training 在 BiSeNetV2 训练的样例](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py) 获得详细信息。 + +## 速度标定(待更新) + +### 硬件 + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz + +### 软件环境 + +- Python 3.7 +- PyTorch 1.5 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 + +### 训练速度 + +为了公平比较,我们全部使用 ResNet-101V1c 进行标定。输入大小为 1024x512,批量样本数为 2。 + +训练速度如下表,指标为每次迭代的时间,以秒为单位,越低越快。 + +| Implementation | PSPNet (s/iter) | DeepLabV3+ (s/iter) | +| --------------------------------------------------------------------------- | --------------- | ------------------- | +| [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) | **0.83** | **0.85** | +| [SegmenTron](https://github.com/LikeLy-Journey/SegmenTron) | 0.84 | 0.85 | +| [CASILVision](https://github.com/CSAILVision/semantic-segmentation-pytorch) | 1.15 | N/A | +| [vedaseg](https://github.com/Media-Smart/vedaseg) | 0.95 | 1.25 | + +注意:DeepLabV3+ 的输出步长为 8。 diff --git a/docs/zh_cn/notes/faq.md b/docs/zh_cn/notes/faq.md new file mode 100644 index 0000000000..09fde025fd --- /dev/null +++ b/docs/zh_cn/notes/faq.md @@ -0,0 +1,8 @@ +# 常见问题解答(FAQ)(待更新) + +我们在这里列出了使用时的一些常见问题及其相应的解决方案。 如果您发现有一些问题被遗漏,请随时提 PR 丰富这个列表。 如果您无法在此获得帮助,请使用 [issue模板](https://github.com/open-mmlab/mmsegmentation/blob/master/.github/ISSUE_TEMPLATE/error-report.md/)创建问题,但是请在模板中填写所有必填信息,这有助于我们更快定位问题。 + +## 如何获知模型训练时需要的显卡数量 + +- 看模型的config文件的命名。可以参考[学习配置文件](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/tutorials/config.md)中的`配置文件命名风格`部分。比如,对于名字为`segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py`的config文件,`8x1`代表训练其对应的模型需要的卡数为8,每张卡中的batch size为1。 +- 看模型的log文件。点开该模型的log文件,并在其中搜索`nGPU`,在`nGPU`后的数字个数即训练时所需的卡数。比如,在log文件中搜索`nGPU`得到`nGPU 0,1,2,3,4,5,6,7`的记录,则说明训练该模型需要使用八张卡。 diff --git a/docs/zh_cn/overview.md b/docs/zh_cn/overview.md new file mode 100644 index 0000000000..7dce105a81 --- /dev/null +++ b/docs/zh_cn/overview.md @@ -0,0 +1,75 @@ +# 概述 + +本章节向您介绍 MMSegmentation 框架以及语义分割相关的基本概念。我们还提供了关于 MMSegmentation 的详细教程链接。 + +## 什么是语义分割? + +语义分割是将图像中属于同一目标类别的部分聚类在一起的任务。它也是一种像素级预测任务,因为图像中的每一个像素都将根据类别进行分类。该任务的一些示例基准有 [Cityscapes](https://www.cityscapes-dataset.com/benchmarks/), [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/) 和 [ADE20K](https://groups.csail.mit.edu/vision/datasets/ADE20K/) 。通常用平均交并比 (Mean IoU) 和像素准确率 (Pixel Accuracy) 这两个指标来评估模型。 + +## 什么是 MMSegmentation? + +MMSegmentation 是一个工具箱,它为语义分割任务的统一实现和模型评估提供了一个框架,并且高质量实现了常用的语义分割方法和数据集。 + +MMSeg 主要包含了 apis, structures, datasets, models, engine, evaluation 和 visualization 这七个主要部分。 + +- **apis** 提供了模型推理的高级api + +- **structures** 提供了分割任务的数据结构 `SegDataSample` + +- **datasets** 支持用于语义分割的多种数据集 + + - **transforms** 包含多种数据增强变换 + +- **models** 是分割器最重要的部分,包含了分割器的不同组件 + + - **segmentors** 定义了所有分割模型类 + - **data_preprocessors** 用于预处理模型的输入数据 + - **backbones** 包含各种骨干网络,可将图像映射为特征图 + - **necks** 包含各种模型颈部组件,用于连接分割头和骨干网络 + - **decode_heads** 包含各种分割头,将特征图作为输入,并预测分割结果 + - **losses** 包含各种损失函数 + +- **engine** 是运行时组件的一部分,扩展了 [MMEngine](https://github.com/open-mmlab/mmengine) 的功能 + + - **optimizers** 提供了优化器和优化器封装 + - **hooks** 提供了 runner 的各种钩子 + +- **evaluation** 提供了评估模型性能的不同指标 + +- **visualization** 分割结果的可视化工具 + +## 如何使用本指南? + +以下是详细步骤,将带您一步步学习如何使用 MMSegmentation : + +1. 有关安装说明,请参阅 [开始你的第一步](getting_started.md)。 + +2. 对于初学者来说,MMSegmentation 是开始语义分割之旅的最好选择,因为这里实现了许多 SOTA 模型以及经典的模型 [model](model_zoo.md) 。另外,将各类组件和高级 API 結合使用,可以更便捷的执行分割任务。关于 MMSegmentation 的基本用法,请参考下面的教程: + + - [配置](user_guides/1_config.md) + - [数据预处理](user_guides/2_dataset_prepare.md) + - [推理](user_guides/3_inference.md) + - [训练和测试](user_guides/4_train_test.md) + +3. 如果你想了解 MMSegmentation 工作的基本类和功能,请参考下面的教程来深入研究: + + - [数据流](advanced_guides/data_flow.md) + - [结构](advanced_guides/structures.md) + - [模型](advanced_guides/models.md) + - [数据集](advanced_guides/datasets.md) + - [评估](advanced_guides/evaluation.md) + +4. MMSegmentation 也为用户自定义和一些前沿的研究提供了教程,请参考下面的教程来建立你自己的分割项目: + + - [添加新的模型](advanced_guides/add_models.md) + - [添加新的数据集](advanced_guides/add_dataset.md) + - [添加新的 transform](advanced_guides/add_transform.md) + - [自定义 runtime](advanced_guides/customize_runtime.md) + +5. 如果您更熟悉 MMSegmentation v0.x , 以下是 MMSegmentation v0.x 迁移到 v1.x 的文档 + + - [迁移](migration/index.rst) + +## 参考来源 + +- https://paperswithcode.com/task/semantic-segmentation/codeless#task-home diff --git a/docs/zh_cn/stat.py b/docs/zh_cn/stat.py new file mode 100755 index 0000000000..7a86302e32 --- /dev/null +++ b/docs/zh_cn/stat.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import functools as func +import glob +import os.path as osp +import re + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + +files = sorted(glob.glob('../../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../../', url_prefix)) + + with open(f) as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('#', '').strip() + ckpts = { + x.lower().strip() + for x in re.findall(r'https?://download.*\.pth', content) + if 'mmsegmentation' in x + } + if len(ckpts) == 0: + continue + + _papertype = [ + x for x in re.findall(r'', content) + ] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = {(papertype, title)} + + titles.append(title) + num_ckpts += len(ckpts) + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# 模型库统计数据 + +* 论文数量: {len(set(titles))} +{countstr} + +* 模型数量: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md new file mode 100644 index 0000000000..f58efc42be --- /dev/null +++ b/docs/zh_cn/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/docs/zh_cn/user_guides/1_config.md b/docs/zh_cn/user_guides/1_config.md new file mode 100644 index 0000000000..dfcf0f9655 --- /dev/null +++ b/docs/zh_cn/user_guides/1_config.md @@ -0,0 +1,577 @@ +# 教程1:了解配置文件 + +我们将模块化和继承性设计融入到我们的配置文件系统中,方便进行各种实验。如果您想查看配置文件,你可以运行 `python tools/misc/print_config.py /PATH/TO/CONFIG` 来查看完整的配置文件。你也可以通过传递参数 `--cfg-options xxx.yyy=zzz` 来查看更新的配置信息。 + +## 配置文件的结构 + +在 `config/_base_ ` 文件夹下面有4种基本组件类型: 数据集(dataset),模型(model),训练策略(schedule)和运行时的默认设置(default runtime)。许多模型都可以很容易地通过组合这些组件进行实现,比如 DeepLabV3,PSPNet。使用 `_base_` 下的组件构建的配置信息叫做原始配置 (primitive)。 + +对于同一个文件夹下的所有配置文件,建议**只有一个**对应的**原始配置文件**。所有其他的配置文件都应该继承自这个原始配置文件,从而保证每个配置文件的最大继承深度为 3。 + +为了便于理解,我们建议社区贡献者从现有的方法继承。例如,如果您在 DeepLabV3 基础上进行了一些修改,用户可以先通过指定 `_base_ = ../deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py` 继承基本的 DeepLabV3 结构,然后在配置文件中修改必要的字段。 + +如果你正在构建一个全新的方法,它不与现有的任何方法共享基本组件,您可以在`config`下创建一个新的文件夹`xxxnet` ,详细文档请参考[mmengine](https://mmengine.readthedocs.io/en/latest/tutorials/config.html)。 + +## 配置文件命名风格 + +我们遵循以下格式来命名配置文件,建议社区贡献者遵循相同的风格。 + +```text +{algorithm name}_{model component names [component1]_[component2]_[...]}_{training settings}_{training dataset information}_{testing dataset information} +``` + +配置文件的文件名分为五个部分,组成文件名每一个部分和组件之间都用`_`连接,每个部分或组件中的每个单词都要用`-`连接。 + +- `{algorithm name}`: 算法的名称,如 `deeplabv3`, `pspnet` 等。 +- `{model component names}`: 算法中使用的组件名称,如主干(backbone)、解码头(head)等。例如,`r50-d8 `表示使用ResNet50主干网络,并使用主干网络的8倍下采样输出作为下一级的输入。 +- `{training settings}`: 训练时的参数设置,如 `batch size`、数据增强(augmentation)、损失函数(loss)、学习率调度器(learning rate scheduler)和训练轮数(epochs/iterations)。例如: `4xb4-ce-linearlr-40K` 意味着使用4个gpu,每个gpu4个图像,使用交叉熵损失函数(CrossEntropy),线性学习率调度程序,训练40K iterations。 + 一些缩写: + - `{gpu x batch_per_gpu}`: GPU数量和每个GPU的样本数。`bN ` 表示每个GPU的batch size为N,如 `8xb2` 为8个gpu x 每个gpu2张图像的缩写。如果未提及,则默认使用 `4xb4 `。 + - `{schedule}`: 训练计划,选项有`20k`,`40k`等。`20k ` 和 `40k` 分别表示20000次迭代(iterations)和40000次迭代(iterations)。 +- `{training dataset information}`: 训练数据集名称,如 `cityscapes `, `ade20k ` 等,以及输入分辨率。例如: `cityscapes-768x768 `表示使用 `cityscapes` 数据集进行训练,输入分辨率为`768x768 `。 +- `{testing dataset information}` (可选): 测试数据集名称。当您的模型在一个数据集上训练但在另一个数据集上测试时,请将测试数据集名称添加到此处。如果没有这一部分,则意味着模型是在同一个数据集上进行训练和测试的。 + +## PSPNet 的一个例子 + +为了帮助用户熟悉对这个现代语义分割系统的完整配置文件和模块,我们对使用ResNet50V1c作为主干网络的PSPNet的配置文件作如下的简要注释和说明。要了解更详细的用法和每个模块对应的替换方法,请参阅API文档。 + +```python +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] # 我们可以在基本配置文件的基础上 构建新的配置文件 +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict(data_preprocessor=data_preprocessor) +``` + +`_base_/models/pspnet_r50-d8.py`是使用ResNet50V1c作为主干网络的PSPNet的基本模型配置文件。 + +```python +# 模型设置 +norm_cfg = dict(type='SyncBN', requires_grad=True) # 分割框架通常使用 SyncBN +data_preprocessor = dict( # 数据预处理的配置项,通常包括图像的归一化和增强 + type='SegDataPreProcessor', # 数据预处理的类型 + mean=[123.675, 116.28, 103.53], # 用于归一化输入图像的平均值 + std=[58.395, 57.12, 57.375], # 用于归一化输入图像的标准差 + bgr_to_rgb=True, # 是否将图像从 BGR 转为 RGB + pad_val=0, # 图像的填充值 + seg_pad_val=255) # 'gt_seg_map'的填充值 +model = dict( + type='EncoderDecoder', # 分割器(segmentor)的名字 + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', # 加载使用 ImageNet 预训练的主干网络 + backbone=dict( + type='ResNetV1c', # 主干网络的类别,更多细节请参考 mmseg/models/backbones/resnet.py + depth=50, # 主干网络的深度,通常为 50 和 101 + num_stages=4, # 主干网络状态(stages)的数目 + out_indices=(0, 1, 2, 3), # 每个状态(stage)产生的特征图输出的索引 + dilations=(1, 1, 2, 4), # 每一层(layer)的空心率(dilation rate) + strides=(1, 2, 1, 1), # 每一层(layer)的步长(stride) + norm_cfg=norm_cfg, # 归一化层(norm layer)的配置项 + norm_eval=False, # 是否冻结 BN 里的统计项 + style='pytorch', # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积 + contract_dilation=True), # 当空洞率 > 1, 是否压缩第一个空洞层 + decode_head=dict( + type='PSPHead', # 解码头(decode head)的类别。可用选项请参 mmseg/models/decode_heads + in_channels=2048, # 解码头的输入通道数 + in_index=3, # 被选择特征图(feature map)的索引 + channels=512, # 解码头中间态(intermediate)的通道数 + pool_scales=(1, 2, 3, 6), # PSPHead 平均池化(avg pooling)的规模(scales)。 细节请参考文章内容 + dropout_ratio=0.1, # 进入最后分类层(classification layer)之前的 dropout 比例 + num_classes=19, # 分割前景的种类数目。 通常情况下,cityscapes 为19,VOC为21,ADE20k 为150 + norm_cfg=norm_cfg, # 归一化层的配置项 + align_corners=False, # 解码过程中调整大小(resize)的 align_corners 参数 + loss_decode=dict( # 解码头(decode_head)里的损失函数的配置项 + type='CrossEntropyLoss', # 分割时使用的损失函数的类别 + use_sigmoid=False, # 分割时是否使用 sigmoid 激活 + loss_weight=1.0)), # 解码头的损失权重 + auxiliary_head=dict( + type='FCNHead', # 辅助头(auxiliary head)的种类。可用选项请参考 mmseg/models/decode_heads + in_channels=1024, # 辅助头的输入通道数 + in_index=2, # 被选择的特征图(feature map)的索引 + channels=256, # 辅助头中间态(intermediate)的通道数 + num_convs=1, # FCNHead 里卷积(convs)的数目,辅助头中通常为1 + concat_input=False, # 在分类层(classification layer)之前是否连接(concat)输入和卷积的输出 + dropout_ratio=0.1, # 进入最后分类层(classification layer)之前的 dropout 比例 + num_classes=19, # 分割前景的种类数目。 通常情况下,cityscapes 为19,VOC为21,ADE20k 为150 + norm_cfg=norm_cfg, # 归一化层的配置项 + align_corners=False, # 解码过程中调整大小(resize)的 align_corners 参数 + loss_decode=dict( # 辅助头(auxiliary head)里的损失函数的配置项 + type='CrossEntropyLoss', # 分割时使用的损失函数的类别 + use_sigmoid=False, # 分割时是否使用 sigmoid 激活 + loss_weight=0.4)), # 辅助头损失的权重,默认设置为0.4 + # 模型训练和测试设置项 + train_cfg=dict(), # train_cfg 当前仅是一个占位符 + test_cfg=dict(mode='whole')) # 测试模式,可选参数为 'whole' 和 'slide'. 'whole': 在整张图像上全卷积(fully-convolutional)测试。 'slide': 在输入图像上做滑窗预测 +``` + +`_base_/datasets/cityscapes.py`是数据集的基本配置文件。 + +```python +# 数据集设置 +dataset_type = 'CityscapesDataset' # 数据集类型,这将被用来定义数据集 +data_root = 'data/cityscapes/' # 数据的根路径 +crop_size = (512, 1024) # 训练时的裁剪大小 +train_pipeline = [ # 训练流程 + dict(type='LoadImageFromFile'), # 第1个流程,从文件路径里加载图像 + dict(type='LoadAnnotations'), # 第2个流程,对于当前图像,加载它的标注图像 + dict(type='RandomResize', # 调整输入图像大小(resize)和其标注图像的数据增广流程 + scale=(2048, 1024), # 图像裁剪的大小 + ratio_range=(0.5, 2.0), # 数据增广的比例范围 + keep_ratio=True), # 调整图像大小时是否保持纵横比 + dict(type='RandomCrop', # 随机裁剪当前图像和其标注图像的数据增广流程 + crop_size=crop_size, # 随机裁剪的大小 + cat_max_ratio=0.75), # 单个类别可以填充的最大区域的比 + dict(type='RandomFlip', # 翻转图像和其标注图像的数据增广流程 + prob=0.5), # 翻转图像的概率 + dict(type='PhotoMetricDistortion'), # 光学上使用一些方法扭曲当前图像和其标注图像的数据增广流程 + dict(type='PackSegInputs') # 打包用于语义分割的输入数据 +] +test_pipeline = [ + dict(type='LoadImageFromFile'), # 第1个流程,从文件路径里加载图像 + dict(type='Resize', # 使用调整图像大小(resize)增强 + scale=(2048, 1024), # 图像缩放的大小 + keep_ratio=True), # 在调整图像大小时是否保留长宽比 + # 在' Resize '之后添加标注图像 + # 不需要做调整图像大小(resize)的数据变换 + dict(type='LoadAnnotations'), # 加载数据集提供的语义分割标注 + dict(type='PackSegInputs') # 打包用于语义分割的输入数据 +] +train_dataloader = dict( # 训练数据加载器(dataloader)的配置 + batch_size=2, # 每一个GPU的batch size大小 + num_workers=2, # 为每一个GPU预读取数据的进程个数 + persistent_workers=True, # 在一个epoch结束后关闭worker进程,可以加快训练速度 + sampler=dict(type='InfiniteSampler', shuffle=True), # 训练时进行随机洗牌(shuffle) + dataset=dict( # 训练数据集配置 + type=dataset_type, # 数据集类型,详见mmseg/datassets/ + data_root=data_root, # 数据集的根目录 + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), # 训练数据的前缀 + pipeline=train_pipeline)) # 数据处理流程,它通过之前创建的train_pipeline传递。 +val_dataloader = dict( + batch_size=1, # 每一个GPU的batch size大小 + num_workers=4, # 为每一个GPU预读取数据的进程个数 + persistent_workers=True, # 在一个epoch结束后关闭worker进程,可以加快训练速度 + sampler=dict(type='DefaultSampler', shuffle=False), # 训练时不进行随机洗牌(shuffle) + dataset=dict( # 测试数据集配置 + type=dataset_type, # 数据集类型,详见mmseg/datassets/ + data_root=data_root, # 数据集的根目录 + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), # 测试数据的前缀 + pipeline=test_pipeline)) # 数据处理流程,它通过之前创建的test_pipeline传递。 +test_dataloader = val_dataloader +# 精度评估方法,我们在这里使用 IoUMetric 进行评估 +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator +``` + +`_base_/schedules/schedule_40k.py` + +```python +# optimizer +optimizer = dict(type='SGD', # 优化器种类,更多细节可参考 https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/default_constructor.py + lr=0.01, # 优化器的学习率,参数的使用细节请参照对应的 PyTorch 文档 + momentum=0.9, # 动量大小 (Momentum) + weight_decay=0.0005) # SGD 的权重衰减 (weight decay) +optim_wrapper = dict(type='OptimWrapper', # 优化器包装器(Optimizer wrapper)为更新参数提供了一个公共接口 + optimizer=optimizer, # 用于更新模型参数的优化器(Optimizer) + clip_grad=None) # 如果 'clip_grad' 不是None,它将是 ' torch.nn.utils.clip_grad' 的参数。 +# 学习策略 +param_scheduler = [ + dict( + type='PolyLR', # 调度流程的策略,同样支持 Step, CosineAnnealing, Cyclic 等. 请从 https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py 参考 LrUpdater 的细节 + eta_min=1e-4, # 训练结束时的最小学习率 + power=0.9, # 多项式衰减 (polynomial decay) 的幂 + begin=0, # 开始更新参数的时间步(step) + end=40000, # 停止更新参数的时间步(step) + by_epoch=False) # 是否按照 epoch 计算训练时间 +] +# 40k iteration 的训练计划 +train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +# 默认钩子(hook)配置 +default_hooks = dict( + timer=dict(type='IterTimerHook'), # 记录迭代过程中花费的时间 + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), # 从'Runner'的不同组件收集和写入日志 + param_scheduler=dict(type='ParamSchedulerHook'), # 更新优化器中的一些超参数,例如学习率 + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), # 定期保存检查点(checkpoint) + sampler_seed=dict(type='DistSamplerSeedHook')) # 用于分布式训练的数据加载采样器 +``` + +in `_base_/default_runtime.py` + +```python +# 将注册表的默认范围设置为mmseg +default_scope = 'mmseg' +# environment +env_cfg = dict( + cudnn_benchmark=True, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) +log_level = 'INFO' +log_processor = dict(by_epoch=False) +load_from = None # 从文件中加载检查点(checkpoint) +resume = False # 是否从已有的模型恢复 +``` + +这些都是用于训练和测试PSPNet的配置文件,要加载和解析它们,我们可以使用[MMEngine](https://github.com/open-mmlab/mmengine)实现的[Config](https://mmengine.readthedocs.io/en/latest/tutorials/config.html)。 + +```python +from mmengine.config import Config + +cfg = Config.fromfile('configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py') +print(cfg.train_dataloader) +``` + +```shell +{'batch_size': 2, + 'num_workers': 2, + 'persistent_workers': True, + 'sampler': {'type': 'InfiniteSampler', 'shuffle': True}, + 'dataset': {'type': 'CityscapesDataset', + 'data_root': 'data/cityscapes/', + 'data_prefix': {'img_path': 'leftImg8bit/train', + 'seg_map_path': 'gtFine/train'}, + 'pipeline': [{'type': 'LoadImageFromFile'}, + {'type': 'LoadAnnotations'}, + {'type': 'RandomResize', + 'scale': (2048, 1024), + 'ratio_range': (0.5, 2.0), + 'keep_ratio': True}, + {'type': 'RandomCrop', 'crop_size': (512, 1024), 'cat_max_ratio': 0.75}, + {'type': 'RandomFlip', 'prob': 0.5}, + {'type': 'PhotoMetricDistortion'}, + {'type': 'PackSegInputs'}]}} +``` + +`cfg `是`mmengine.config.Config `的一个实例。它的接口与dict对象相同,也允许将配置值作为属性访问。更多信息请参见[MMEngine](https://github.com/open-mmlab/mmengine)中的[config tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/config.html)。 + +## FAQ + +### 忽略基础配置文件里的一些字段 + +有时,您可以设置`_delete_=True `来忽略基本配置文件中的某些字段。您可以参考[MMEngine](https://github.com/open-mmlab/mmengine)中的[config tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/config.html)来获得一些简单的指导。 + +例如,在MMSegmentation中,如果您想在下面的配置文件`pspnet.py `中修改PSPNet的主干网络: + +```python +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) +``` + +用以下代码加载并解析配置文件`pspnet.py`: + +```python +from mmengine.config import Config + +cfg = Config.fromfile('pspnet.py') +print(cfg.model) +``` + +```shell +{'type': 'EncoderDecoder', + 'pretrained': 'torchvision://resnet50', + 'backbone': {'type': 'ResNetV1c', + 'depth': 50, + 'num_stages': 4, + 'out_indices': (0, 1, 2, 3), + 'dilations': (1, 1, 2, 4), + 'strides': (1, 2, 1, 1), + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'norm_eval': False, + 'style': 'pytorch', + 'contract_dilation': True}, + 'decode_head': {'type': 'PSPHead', + 'in_channels': 2048, + 'in_index': 3, + 'channels': 512, + 'pool_scales': (1, 2, 3, 6), + 'dropout_ratio': 0.1, + 'num_classes': 19, + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'align_corners': False, + 'loss_decode': {'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0}}} +``` + +`ResNet`和`HRNet`使用不同的关键字构建,编写一个新的配置文件`hrnet.py`,如下所示: + +```python +_base_ = 'pspnet.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w32', + backbone=dict( + _delete_=True, + type='HRNet', + norm_cfg=norm_cfg, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))))) +``` + +用以下代码加载并解析配置文件`hrnet.py`: + +```python +from mmengine.config import Config +cfg = Config.fromfile('hrnet.py') +print(cfg.model) +``` + +```shell +{'type': 'EncoderDecoder', + 'pretrained': 'open-mmlab://msra/hrnetv2_w32', + 'backbone': {'type': 'HRNet', + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'extra': {'stage1': {'num_modules': 1, + 'num_branches': 1, + 'block': 'BOTTLENECK', + 'num_blocks': (4,), + 'num_channels': (64,)}, + 'stage2': {'num_modules': 1, + 'num_branches': 2, + 'block': 'BASIC', + 'num_blocks': (4, 4), + 'num_channels': (32, 64)}, + 'stage3': {'num_modules': 4, + 'num_branches': 3, + 'block': 'BASIC', + 'num_blocks': (4, 4, 4), + 'num_channels': (32, 64, 128)}, + 'stage4': {'num_modules': 3, + 'num_branches': 4, + 'block': 'BASIC', + 'num_blocks': (4, 4, 4, 4), + 'num_channels': (32, 64, 128, 256)}}}, + 'decode_head': {'type': 'PSPHead', + 'in_channels': 2048, + 'in_index': 3, + 'channels': 512, + 'pool_scales': (1, 2, 3, 6), + 'dropout_ratio': 0.1, + 'num_classes': 19, + 'norm_cfg': {'type': 'SyncBN', 'requires_grad': True}, + 'align_corners': False, + 'loss_decode': {'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0}}} +``` + +`_delete_=True` 将用新的键去替换 `backbone` 字段内所有旧的键。 + +### 使用配置文件里的中间变量 + +配置文件中会使用一些中间变量,例如数据集(datasets)字段里的 `train_pipeline`/`test_pipeline`。 需要注意的是,在子配置文件里修改中间变量时,您需要再次传递这些变量给对应的字段。例如,我们想改变在训练或测试PSPNet时采用的多尺度策略 (multi scale strategy),`train_pipeline`/`test_pipeline` 是我们需要修改的中间变量。 + +```python +_base_ = '../pspnet/pspnet_r50-d8_4xb4-40k_cityscpaes-512x1024.py' +crop_size = (512, 1024) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='RandomResize', + img_scale=(2048, 1024), + ratio_range=(1., 2.), + keep_ration=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', + scale=(2048, 1024), + keep_ratio=True), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), + pipeline=train_pipeline) +test_dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), + pipeline=test_pipeline) +train_dataloader = dict(dataset=train_dataset) +val_dataloader = dict(dataset=test_dataset) +test_dataloader = val_dataloader +``` + +我们首先需要定义新的 `train_pipeline`/`test_pipeline` 然后传递到 `dataset` 里。 + +类似的,如果我们想从 `SyncBN` 切换到 `BN` 或者 `MMSyncBN`,我们需要替换配置文件里的每一个 `norm_cfg`。 + +```python +_base_ = '../pspnet/pspnet_r50-d8_4xb4-40k_cityscpaes-512x1024.py' +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg), + decode_head=dict(norm_cfg=norm_cfg), + auxiliary_head=dict(norm_cfg=norm_cfg)) +``` + +## 通过脚本参数修改配置文件 + +在[training script](https://github.com/open-mmlab/mmsegmentation/blob/1.x/tools/train.py)和[testing script](https://github.com/open-mmlab/mmsegmentation/blob/1.x/tools/test.py)中,我们支持脚本参数 `--cfg-options`,它可以帮助用户覆盖所使用的配置中的一些设置,`xxx=yyy` 格式的键值对将合并到配置文件中。 + +例如,这是一个简化的脚本 `demo_script.py `: + +```python +import argparse + +from mmengine.config import Config, DictAction + +def parse_args(): + parser = argparse.ArgumentParser(description='Script Example') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + print(cfg) + +if __name__ == '__main__': + main() +``` + +一个配置文件示例 `demo_config.py` 如下所示: + +```python +backbone = dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_eval=False, + style='pytorch', + contract_dilation=True) +``` + +运行 `demo_script.py`: + +```shell +python demo_script.py demo_config.py +``` + +```shell +Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 2, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} +``` + +通过脚本参数修改配置: + +```shell +python demo_script.py demo_config.py --cfg-options backbone.depth=101 +``` + +```shell +Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 101, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 2, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} +``` + +- 更新列表/元组的值。 + + 如果要更新的值是一个 list 或 tuple。例如,需要在配置文件 `demo_config.py ` 的 `backbone ` 中设置 `stride =(1,2,1,1) `。 + 如果您想更改这个键,你可以用两种方式进行指定: + + 1. `--cfg-options backbone.strides="(1, 1, 1, 1)"`. 注意引号 " 是支持 list/tuple 数据类型所必需的。 + + ```shell + python demo_script.py demo_config.py --cfg-options backbone.strides="(1, 1, 1, 1)" + ``` + + ```shell + Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': (1, 1, 1, 1), 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} + ``` + + 2. `--cfg-options backbone.strides=1,1,1,1`. 注意,在指定的值中**不允许**有空格。 + + 另外,如果原来的类型是tuple,通过这种方式修改后会自动转换为list。 + + ```shell + python demo_script.py demo_config.py --cfg-options backbone.strides=1,1,1,1 + ``` + + ```shell + Config (path: demo_config.py): {'backbone': {'type': 'ResNetV1c', 'depth': 50, 'num_stages': 4, 'out_indices': (0, 1, 2, 3), 'dilations': (1, 1, 2, 4), 'strides': [1, 1, 1, 1], 'norm_eval': False, 'style': 'pytorch', 'contract_dilation': True}} + ``` + +```{note} + 这种修改方法仅支持修改string、int、float、boolean、None、list和tuple类型的配置项。 + 具体来说,对于list和tuple类型的配置项,它们内部的元素也必须是上述七种类型之一。 +``` diff --git a/docs/zh_cn/user_guides/2_dataset_prepare.md b/docs/zh_cn/user_guides/2_dataset_prepare.md new file mode 100644 index 0000000000..a546b1a3d0 --- /dev/null +++ b/docs/zh_cn/user_guides/2_dataset_prepare.md @@ -0,0 +1,319 @@ +## 准备数据集(待更新) + +推荐用软链接,将数据集根目录链接到 `$MMSEGMENTATION/data` 里。如果您的文件夹结构是不同的,您也许可以试着修改配置文件里对应的路径。 + +```none +mmsegmentation +├── mmseg +├── tools +├── configs +├── data +│ ├── cityscapes +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2012 +│ │ │ ├── JPEGImages +│ │ │ ├── SegmentationClass +│ │ │ ├── ImageSets +│ │ │ │ ├── Segmentation +│ │ ├── VOC2010 +│ │ │ ├── JPEGImages +│ │ │ ├── SegmentationClassContext +│ │ │ ├── ImageSets +│ │ │ │ ├── SegmentationContext +│ │ │ │ │ ├── train.txt +│ │ │ │ │ ├── val.txt +│ │ │ ├── trainval_merged.json +│ │ ├── VOCaug +│ │ │ ├── dataset +│ │ │ │ ├── cls +│ ├── ade +│ │ ├── ADEChallengeData2016 +│ │ │ ├── annotations +│ │ │ │ ├── training +│ │ │ │ ├── validation +│ │ │ ├── images +│ │ │ │ ├── training +│ │ │ │ ├── validation +│ ├── CHASE_DB1 +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── DRIVE +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── HRF +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +│ ├── STARE +│ │ ├── images +│ │ │ ├── training +│ │ │ ├── validation +│ │ ├── annotations +│ │ │ ├── training +│ │ │ ├── validation +| ├── dark_zurich +| │   ├── gps +| │   │   ├── val +| │   │   └── val_ref +| │   ├── gt +| │   │   └── val +| │   ├── LICENSE.txt +| │   ├── lists_file_names +| │   │   ├── val_filenames.txt +| │   │   └── val_ref_filenames.txt +| │   ├── README.md +| │   └── rgb_anon +| │   | ├── val +| │   | └── val_ref +| ├── NighttimeDrivingTest +| | ├── gtCoarse_daytime_trainvaltest +| | │   └── test +| | │   └── night +| | └── leftImg8bit +| | | └── test +| | | └── night +│ ├── loveDA +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ │ ├── test +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── potsdam +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── vaihingen +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +│ ├── iSAID +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ │ ├── test +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val +``` + +### Cityscapes + +注册成功后,数据集可以在 [这里](https://www.cityscapes-dataset.com/downloads/) 下载。 + +通常情况下,`**labelTrainIds.png` 被用来训练 cityscapes。 +基于 [cityscapesscripts](https://github.com/mcordts/cityscapesScripts), +我们提供了一个 [脚本](https://github.com/open-mmlab/mmsegmentation/blob/master/tools/convert_datasets/cityscapes.py), +去生成 `**labelTrainIds.png`。 + +```shell +# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略。 +python tools/convert_datasets/cityscapes.py data/cityscapes --nproc 8 +``` + +### Pascal VOC + +Pascal VOC 2012 可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar) 下载。 +此外,许多最近在 Pascal VOC 数据集上的工作都会利用增广的数据,它们可以在 [这里](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz) 找到。 + +如果您想使用增广后的 VOC 数据集,请运行下面的命令来将数据增广的标注转成正确的格式。 + +```shell +# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略。 +python tools/convert_datasets/voc_aug.py data/VOCdevkit data/VOCdevkit/VOCaug --nproc 8 +``` + +关于如何拼接数据集 (concatenate) 并一起训练它们,更多细节请参考 [拼接连接数据集](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/tutorials/customize_datasets.md#%E6%8B%BC%E6%8E%A5%E6%95%B0%E6%8D%AE%E9%9B%86) 。 + +### ADE20K + +ADE20K 的训练集和验证集可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip) 下载。 +您还可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/release_test.zip) 下载验证集。 + +### Pascal Context + +Pascal Context 的训练集和验证集可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar) 下载。 +注册成功后,您还可以在 [这里](http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2010test.tar) 下载验证集。 + +为了从原始数据集里切分训练集和验证集, 您可以在 [这里](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) +下载 trainval_merged.json。 + +如果您想使用 Pascal Context 数据集, +请安装 [细节](https://github.com/zhanghang1989/detail-api) 然后再运行如下命令来把标注转换成正确的格式。 + +```shell +python tools/convert_datasets/pascal_context.py data/VOCdevkit data/VOCdevkit/VOC2010/trainval_merged.json +``` + +### CHASE DB1 + +CHASE DB1 的训练集和验证集可以在 [这里](https://staffnet.kingston.ac.uk/~ku15565/CHASE_DB1/assets/CHASEDB1.zip) 下载。 + +为了将 CHASE DB1 数据集转换成 MMSegmentation 的格式,您需要运行如下命令: + +```shell +python tools/convert_datasets/chase_db1.py /path/to/CHASEDB1.zip +``` + +这个脚本将自动生成正确的文件夹结构。 + +### DRIVE + +DRIVE 的训练集和验证集可以在 [这里](https://drive.grand-challenge.org/) 下载。 +在此之前,您需要注册一个账号,当前 '1st_manual' 并未被官方提供,因此需要您从其他地方获取。 + +为了将 DRIVE 数据集转换成 MMSegmentation 格式,您需要运行如下命令: + +```shell +python tools/convert_datasets/drive.py /path/to/training.zip /path/to/test.zip +``` + +这个脚本将自动生成正确的文件夹结构。 + +### HRF + +首先,下载 [healthy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy.zip) [glaucoma.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma.zip), [diabetic_retinopathy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy.zip), [healthy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy_manualsegm.zip), [glaucoma_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma_manualsegm.zip) 以及 [diabetic_retinopathy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy_manualsegm.zip) 。 + +为了将 HRF 数据集转换成 MMSegmentation 格式,您需要运行如下命令: + +```shell +python tools/convert_datasets/hrf.py /path/to/healthy.zip /path/to/healthy_manualsegm.zip /path/to/glaucoma.zip /path/to/glaucoma_manualsegm.zip /path/to/diabetic_retinopathy.zip /path/to/diabetic_retinopathy_manualsegm.zip +``` + +这个脚本将自动生成正确的文件夹结构。 + +### STARE + +首先,下载 [stare-images.tar](http://cecas.clemson.edu/~ahoover/stare/probing/stare-images.tar), [labels-ah.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-ah.tar) 和 [labels-vk.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-vk.tar) 。 + +为了将 STARE 数据集转换成 MMSegmentation 格式,您需要运行如下命令: + +```shell +python tools/convert_datasets/stare.py /path/to/stare-images.tar /path/to/labels-ah.tar /path/to/labels-vk.tar +``` + +这个脚本将自动生成正确的文件夹结构。 + +### Dark Zurich + +因为我们只支持在此数据集上测试模型,所以您只需下载[验证集](https://data.vision.ee.ethz.ch/csakarid/shared/GCMA_UIoU/Dark_Zurich_val_anon.zip) 。 + +### Nighttime Driving + +因为我们只支持在此数据集上测试模型,所以您只需下载[测试集](http://data.vision.ee.ethz.ch/daid/NighttimeDriving/NighttimeDrivingTest.zip) 。 + +### LoveDA + +可以从 Google Drive 里下载 [LoveDA数据集](https://drive.google.com/drive/folders/1ibYV0qwn4yuuh068Rnc-w4tPi0U0c-ti?usp=sharing) 。 + +或者它还可以从 [zenodo](https://zenodo.org/record/5706578#.YZvN7SYRXdF) 下载, 您需要运行如下命令: + +```shell +# Download Train.zip +wget https://zenodo.org/record/5706578/files/Train.zip +# Download Val.zip +wget https://zenodo.org/record/5706578/files/Val.zip +# Download Test.zip +wget https://zenodo.org/record/5706578/files/Test.zip +``` + +对于 LoveDA 数据集,请运行以下命令下载并重新组织数据集 + +```shell +python tools/convert_datasets/loveda.py /path/to/loveDA +``` + +请参照 [这里](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/inference.md) 来使用训练好的模型去预测 LoveDA 测试集并且提交到官网。 + +关于 LoveDA 的更多细节可以在[这里](https://github.com/Junjue-Wang/LoveDA) 找到。 + +### ISPRS Potsdam + +[Potsdam](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-potsdam/) +数据集是一个有着2D 语义分割内容标注的城市遥感数据集。 +数据集可以从挑战[主页](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/) 获得。 +需要其中的 '2_Ortho_RGB.zip' 和 '5_Labels_all_noBoundary.zip'。 + +对于 Potsdam 数据集,请运行以下命令下载并重新组织数据集 + +```shell +python tools/convert_datasets/potsdam.py /path/to/potsdam +``` + +使用我们默认的配置, 将生成 3456 张图片的训练集和 2016 张图片的验证集。 + +### ISPRS Vaihingen + +[Vaihingen](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-vaihingen/) +数据集是一个有着2D 语义分割内容标注的城市遥感数据集。 + +数据集可以从挑战 [主页](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/). +需要其中的 'ISPRS_semantic_labeling_Vaihingen.zip' 和 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE.zip'。 + +对于 Vaihingen 数据集,请运行以下命令下载并重新组织数据集 + +```shell +python tools/convert_datasets/vaihingen.py /path/to/vaihingen +``` + +使用我们默认的配置 (`clip_size`=512, `stride_size`=256), 将生成 344 张图片的训练集和 398 张图片的验证集。 + +### iSAID + +iSAID 数据集(训练集/验证集/测试集)的图像可以从 [DOTA-v1.0](https://captain-whu.github.io/DOTA/dataset.html) 下载. + +iSAID 数据集(训练集/验证集)的注释可以从 [iSAID](https://captain-whu.github.io/iSAID/dataset.html) 下载. + +该数据集是一个大规模的实例分割(也可以用于语义分割)的遥感数据集. + +下载后,在数据集转换前,您需要将数据集文件夹调整成如下格式. + +``` +│ ├── iSAID +│ │ ├── train +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ │ ├── part2.zip +│ │ │ │ ├── part3.zip +│ │ │ ├── Semantic_masks +│ │ │ │ ├── images.zip +│ │ ├── val +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ ├── Semantic_masks +│ │ │ │ ├── images.zip +│ │ ├── test +│ │ │ ├── images +│ │ │ │ ├── part1.zip +│ │ │ │ ├── part2.zip +``` + +```shell +python tools/convert_datasets/isaid.py /path/to/iSAID +``` + +使用我们默认的配置 (`patch_width`=896, `patch_height`=896, `overlap_area`=384), 将生成 33978 张图片的训练集和 11644 张图片的验证集。 diff --git a/docs/zh_cn/user_guides/3_inference.md b/docs/zh_cn/user_guides/3_inference.md new file mode 100644 index 0000000000..b90f73420c --- /dev/null +++ b/docs/zh_cn/user_guides/3_inference.md @@ -0,0 +1,127 @@ +## 使用预训练模型推理(待更新) + +我们提供测试脚本来评估完整数据集(Cityscapes, PASCAL VOC, ADE20k 等)上的结果,同时为了使其他项目的整合更容易,也提供一些高级 API。 + +### 测试一个数据集 + +- 单卡 GPU +- CPU +- 单节点多卡 GPU +- 多节点 + +您可以使用以下命令来测试一个数据集。 + +```shell +# 单卡 GPU 测试 +python tools/test.py ${配置文件} ${检查点文件} [--out ${结果文件}] [--eval ${评估指标}] [--show] + +# CPU: 如果机器没有 GPU, 则跟上述单卡 GPU 测试一致 +# CPU: 如果机器有 GPU, 那么先禁用 GPU 再运行单 GPU 测试脚本 +export CUDA_VISIBLE_DEVICES=-1 # 禁用 GPU +python tools/test.py ${配置文件} ${检查点文件} [--out ${结果文件}] [--eval ${评估指标}] [--show] + +# 多卡GPU 测试 +./tools/dist_test.sh ${配置文件} ${检查点文件} ${GPU数目} [--out ${结果文件}] [--eval ${评估指标}] +``` + +可选参数: + +- `RESULT_FILE`: pickle 格式的输出结果的文件名,如果不专门指定,结果将不会被专门保存成文件。(MMseg v0.17 之后,args.out 将只会保存评估时的中间结果或者是分割图的保存路径。) +- `EVAL_METRICS`: 在结果里将被评估的指标。这主要取决于数据集, `mIoU` 对于所有数据集都可获得,像 Cityscapes 数据集可以通过 `cityscapes` 命令来专门评估,就像标准的 `mIoU`一样。 +- `--show`: 如果被指定,分割结果将会在一张图像里画出来并且在另一个窗口展示。它仅仅是用来调试与可视化,并且仅针对单卡 GPU 测试。请确认 GUI 在您的环境里可用,否则您也许会遇到报错 `cannot connect to X server` +- `--show-dir`: 如果被指定,分割结果将会在一张图像里画出来并且保存在指定文件夹里。它仅仅是用来调试与可视化,并且仅针对单卡GPU测试。使用该参数时,您的环境不需要 GUI。 +- `--eval-options`: 评估时的可选参数,当设置 `efficient_test=True` 时,它将会保存中间结果至本地文件里以节约 CPU 内存。请确认您本地硬盘有足够的存储空间(大于20GB)。(MMseg v0.17 之后,`efficient_test` 不再生效,我们重构了 test api,通过使用一种渐近式的方式来提升评估和保存结果的效率。) + +例子: + +假设您已经下载检查点文件至文件夹 `checkpoints/` 里。 + +1. 测试 PSPNet 并可视化结果。按下任何键会进行到下一张图 + + ```shell + python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + --show + ``` + +2. 测试 PSPNet 并保存画出的图以便于之后的可视化 + + ```shell + python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + --show-dir psp_r50_512x1024_40ki_cityscapes_results + ``` + +3. 在数据集 PASCAL VOC (不保存测试结果) 上测试 PSPNet 并评估 mIoU + + ```shell + python tools/test.py configs/pspnet/pspnet_r50-d8_512x1024_20k_voc12aug.py \ + checkpoints/pspnet_r50-d8_512x1024_20k_voc12aug_20200605_003338-c57ef100.pth \ + --eval mAP + ``` + +4. 使用4卡 GPU 测试 PSPNet,并且在标准 mIoU 和 cityscapes 指标里评估模型 + + ```shell + ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + 4 --out results.pkl --eval mIoU cityscapes + ``` + + 注意:在 cityscapes mIoU 和我们的 mIoU 指标会有一些差异 (~0.1%) 。因为 cityscapes 默认是根据类别样本数的多少进行加权平均,而我们对所有的数据集都是采取直接平均的方法来得到 mIoU。 + +5. 在 cityscapes 数据集上4卡 GPU 测试 PSPNet, 并生成 png 文件以便提交给官方评估服务器 + + 首先,在配置文件里添加内容: `configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py`, + + ```python + data = dict( + test=dict( + img_dir='leftImg8bit/test', + ann_dir='gtFine/test')) + ``` + + 随后,进行测试。 + + ```shell + ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + 4 --format-only --eval-options "imgfile_prefix=./pspnet_test_results" + ``` + + 您会在文件夹 `./pspnet_test_results` 里得到生成的 png 文件。 + 您也许可以运行 `zip -r results.zip pspnet_test_results/` 并提交 zip 文件给 [evaluation server](https://www.cityscapes-dataset.com/submit/) 。 + +6. 在 Cityscapes 数据集上使用 CPU 高效内存选项来测试 DeeplabV3+ `mIoU` 指标 (没有保存测试结果) + + ```shell + python tools/test.py \ + configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py \ + deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth \ + --eval-options efficient_test=True \ + --eval mIoU + ``` + + 使用 `pmap` 可查看 CPU 内存情况, `efficient_test=True` 会使用约 2.25GB 的 CPU 内存, `efficient_test=False` 会使用约 11.06GB 的 CPU 内存。 这个可选参数可以节约很多 CPU 内存。(MMseg v0.17 之后, `efficient_test` 参数将不再生效, 我们使用了一种渐近的方式来更加有效快速地评估和保存结果。) + +7. 在 LoveDA 数据集上1卡 GPU 测试 PSPNet, 并生成 png 文件以便提交给官方评估服务器 + + 首先,在配置文件里添加内容: `configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py`, + + ```python + data = dict( + test=dict( + img_dir='img_dir/test', + ann_dir='ann_dir/test')) + ``` + + 随后,进行测试。 + + ```shell + python ./tools/test.py configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py \ + checkpoints/pspnet_r50-d8_512x512_80k_loveda_20211104_155728-88610f9f.pth \ + --format-only --eval-options "imgfile_prefix=./pspnet_test_results" + ``` + + 您会在文件夹 `./pspnet_test_results` 里得到生成的 png 文件。 + 您也许可以运行 `zip -r -j Results.zip pspnet_test_results/` 并提交 zip 文件给 [evaluation server](https://codalab.lisn.upsaclay.fr/competitions/421) 。 diff --git a/docs/zh_cn/user_guides/4_train_test.md b/docs/zh_cn/user_guides/4_train_test.md new file mode 100644 index 0000000000..b26132e765 --- /dev/null +++ b/docs/zh_cn/user_guides/4_train_test.md @@ -0,0 +1,225 @@ +# 教程4:使用现有模型进行训练和测试 + +MMSegmentation 支持在多种设备上训练和测试模型。如下文,具体方式分别为单GPU、分布式以及计算集群的训练和测试。通过本教程,您将知晓如何用 MMSegmentation 提供的脚本进行训练和测试。 + +## 在单GPU上训练和测试 + +### 在单GPU上训练 + +`tools/train.py` 文件提供了在单GPU上部署训练任务的方法。 + +基础用法如下: + +```shell +python tools/train.py ${配置文件} [可选参数] +``` + +- `--work-dir ${工作路径}`: 重新指定工作路径 +- `--amp`: 使用自动混合精度计算 +- `--resume`: 从工作路径中保存的最新检查点文件(checkpoint)恢复训练 +- `--cfg-options ${需更覆盖的配置}`: 覆盖已载入的配置中的部分设置,并且 以 xxx=yyy 格式的键值对 将被合并到配置文件中。 + 比如: '--cfg-option model.encoder.in_channels=6', 更多细节请看[指导](./1_config.md#Modify-config-through-script-arguments)。 + +下面是对于多GPU测试的可选参数: + +- `--launcher`: 执行器的启动方式。允许选择的参数值有 `none`, `pytorch`, `slurm`, `mpi`。特别的,如果设置为none,测试将非分布式模式下进行。 +- `--local_rank`: 分布式中进程的序号。如果没有指定,默认设置为0。 + +**注意:** 命令行参数 `--resume` 和在配置文件中的参数 `load_from` 的不同之处: + +`--resume` 只决定是否继续使用工作路径中最新的检查点,它常常用于恢复被意外打断的训练。 + +`load_from` 会明确指定被载入的检查点文件,且训练迭代器将从0开始,通常用于微调模型。 + +如果您希望从指定的检查点上恢复训练您可以使用: + +```python +python tools/train.py ${配置文件} --resume --cfg-options load_from=${检查点} +``` + +**在 CPU 上训练**: 如果机器没有 GPU,则在 CPU 上训练的过程是与单GPU训练一致的。如果机器有 GPU 但是不希望使用它们,我们只需要在训练前通过以下方式关闭 GPU 训练功能。 + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +然后运行[上方](#在单GPU上训练)脚本。 + +### 在单GPU上测试 + +`tools/test.py` 文件提供了在单 GPU 上启动测试任务的方法。 + +基础用法如下: + +```shell +python tools/test.py ${配置文件} ${模型权重文件} [可选参数] +``` + +这个工具有几个可选参数,包括: + +- `--work-dir`: 如果指定了路径,结果会保存在该路径下。如果没有指定则会保存在 `work_dirs/{配置文件名}` 路径下. +- `--show`: 当 `--show-dir` 没有指定时,可以使用该参数,在程序运行过程中显示预测结果。 +- `--show-dir`: 绘制了分割掩膜图片的存储文件夹。如果指定了该参数,则可视化的分割掩膜将被保存到 `work_dir/timestamp/{指定路径}`. +- `--wait-time`: 多次可视化结果的时间间隔。当 `--show` 为激活状态时发挥作用。默认为2。 +- `--cfg-options`: 如果被具体指定,以 xxx=yyy 形式的键值对将被合并入配置文件中。 + +**在CPU上测试**: 如果机器没有GPU,则在CPU上训练的过程是与单GPU训练一致的。如果机器有GPU,但是不希望使用它们,我们只需要在训练前通过以下方式关闭GPUs训练功能。 + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +然后运行[上方](#在单GPU上测试)脚本。 + +## 多GPU、多机器上训练和测试 + +### 在多GPU上训练 + +OpenMMLab2.0 通过 `MMDistributedDataParallel`实现 **分布式** 训练。 + +`tools/dist_train.sh` 文件提供了在在多GPU上部署训练任务的方法。 + +基础用法如下: + +```shell +sh tools/dist_train.sh ${配置文件} ${GPU数量} [可选参数] +``` + +可选参数与[上方](#在单GPU上训练)相同并且还增加了可以指定gpu数量的参数。 + +示例: + +```shell +# 模型训练的检查点和日志保存在这个路径下: WORK_DIR=work_dirs/pspnet_r50-d8_4xb4-80k_ade20k-512x512/ +# 如果工作路径没有被设定,它将会被自动生成。 +sh tools/dist_train.sh configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py 8 --work-dir work_dirs/pspnet_r50-d8_4xb4-80k_ade20k-512x512 +``` + +**注意**: 在训练过程中,检查点和日志保存在`work_dirs/`下的配置文件的相同文件夹结构下。 +不推荐自定义的工作路径,因为评估脚本依赖于源自配置文件名的路径。如果您希望将权重保存在其他地方,请用符号链接,例如: + +```shell +ln -s ${您的工作路径} ${MMSEG 路径}/work_dirs +``` + +### 在多GPU上测试 + +`tools/dist_test.sh` 文件提供了在多GPU上启动测试任务的方法。 + +基础用法如下: + +```shell +sh tools/dist_test.sh ${配置文件} ${检查点文件} ${GPU数量} [可选参数] +``` + +可选参数与[上方](#在单GPU上测试)相同并且增加了可以指定 gpu 数量的参数。 + +示例: + +```shell +./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py \ + checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth 4 +``` + +### 在单台机器上启动多个任务 + +如果您在单个机器上运行多个任务,比如:在8卡GPU的单个机器上执行2个各需4卡GPU的训练任务,您需要为每个任务具体指定不同端口(默认29500),从而避免通讯冲突。否则,会有报错信息——`RuntimeError: Address already in use`(运行错误:地址被使用)。 + +如果您使用 `dist_train.sh` 来启动训练任务,您可以通过环境变量 `PORT` 设置端口。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 sh tools/dist_train.sh ${配置文件} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 sh tools/dist_train.sh ${配置文件} 4 +``` + +### 在多台机器上训练 + +MMSegmentation 的分布式训练依赖 `torch.distributed`。 +因此, 可以通过 PyTorch 的 [运行工具 launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility) 来进行分布式训练。 + +如果您启动的多台机器简单地通过以太网连接,您可以直接运行下方命令: + +在第一个机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=${主节点端口} MASTER_ADDR=${主节点地址} sh tools/dist_train.sh ${配置文件} ${GPUS} +``` + +在第二个机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=${主节点端口} MASTER_ADDR=${主节点地址} sh tools/dist_train.sh ${配置文件} ${GPUS} +``` + +通常,如果您没有使用像无限带宽一类的高速网络,这个会过程比较慢。 + +## 通过 Slurm 管理任务 + +[Slurm](https://slurm.schedmd.com/) 是一个很好的计算集群作业调度系统。 + +### 通过 Slurm 在集群上训练 + +在一个由Slurm管理的集群上,您可以使用`slurm_train.sh`来启动训练任务。它同时支持单节点和多节点的训练。 + +基础用法如下: + +```shell +[GPUS=${GPUS}] sh tools/slurm_train.sh ${分区} ${任务名} ${配置文件} [可选参数] +``` + +下方是一个通过名为 `dev` 的 Slurm 分区,调用4个 GPU 来训练 PSPNet,并设置工作路径为共享文件系统。 + +```shell +GPUS=4 sh tools/slurm_train.sh dev pspnet configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py --work-dir work_dir/pspnet +``` + +您可以检查 [源码](../../../tools/slurm_train.sh) 来查看全部的参数和环境变量。 + +### 通过 Slurm 在集群上测试 + +与训练任务相同, MMSegmentation 提供 `slurm_test.sh` 文件来启动测试任务。 + +基础用法如下: + +```shell +[GPUS=${GPUS}] sh tools/slurm_test.sh ${分区} ${任务名} ${配置文件} ${检查点文件} [可选参数] +``` + +您可以通过 [源码](../../../tools/slurm_test.sh) 来查看全部的参数和环境变量。 + +**注意:** 使用 Slurm 时,需要设置端口,可从以下方式中选取一种。 + +1. 我们更推荐的通过`--cfg-options`设置端口,因为这不会改变原始配置: + + ```shell + GPUS=4 GPUS_PER_NODE=4 sh tools/slurm_train.sh ${分区} ${任务名} config1.py ${工作路径} --cfg-options env_cfg.dist_cfg.port=29500 + GPUS=4 GPUS_PER_NODE=4 sh tools/slurm_train.sh ${任务名} ${工作路径} config2.py ${工作路径} --cfg-options env_cfg.dist_cfg.port=29501 + ``` + +2. 通过修改配置文件设置不同的通讯端口: + + 在 `config1.py`中: + + ```python + enf_cfg = dict(dist_cfg=dict(backend='nccl', port=29500)) + ``` + + 在 `config2.py`中: + + ```python + enf_cfg = dict(dist_cfg=dict(backend='nccl', port=29501)) + ``` + + 然后您可以通过 config1.py 和 config2.py 同时启动两个任务: + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 sh tools/slurm_train.sh ${分区} ${任务名} config1.py ${工作路径} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 sh tools/slurm_train.sh ${分区} ${任务名} config2.py ${工作路径} + ``` + +3. 在命令行中通过环境变量 `MASTER_PORT` 设置端口 : + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 MASTER_PORT=29500 sh tools/slurm_train.sh ${分区} ${任务名} config1.py ${工作路径} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 MASTER_PORT=29501 sh tools/slurm_train.sh ${分区} ${任务名} config2.py ${工作路径} +``` diff --git a/docs/zh_cn/user_guides/deployment.md b/docs/zh_cn/user_guides/deployment.md new file mode 100644 index 0000000000..f98110c8b5 --- /dev/null +++ b/docs/zh_cn/user_guides/deployment.md @@ -0,0 +1 @@ +# 模型部署 diff --git a/docs/zh_cn/user_guides/index.rst b/docs/zh_cn/user_guides/index.rst new file mode 100644 index 0000000000..dacac79698 --- /dev/null +++ b/docs/zh_cn/user_guides/index.rst @@ -0,0 +1,20 @@ +训练 & 测试 +************** + +.. toctree:: + :maxdepth: 1 + + 1_config.md + 2_dataset_prepare.md + 3_inference.md + 4_train_test.md + +实用工具 +************* + +.. toctree:: + :maxdepth: 2 + + visualization.md + useful_tools.md + deployment.md diff --git a/docs/zh_cn/user_guides/useful_tools.md b/docs/zh_cn/user_guides/useful_tools.md new file mode 100644 index 0000000000..acbacb950f --- /dev/null +++ b/docs/zh_cn/user_guides/useful_tools.md @@ -0,0 +1,368 @@ +## 常用工具(待更新) + +除了训练和测试的脚本,我们在 `tools/` 文件夹路径下还提供许多有用的工具。 + +### 计算参数量(params)和计算量( FLOPs) (试验性) + +我们基于 [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) +提供了一个用于计算给定模型参数量和计算量的脚本。 + +```shell +python tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +您将得到如下的结果: + +```none +============================== +Input shape: (3, 2048, 1024) +Flops: 1429.68 GMac +Params: 48.98 M +============================== +``` + +**注意**: 这个工具仍然是试验性的,我们无法保证数字是正确的。您可以拿这些结果做简单的实验的对照,在写技术文档报告或者论文前您需要再次确认一下。 + +(1) 计算量与输入的形状有关,而参数量与输入的形状无关,默认的输入形状是 (1, 3, 1280, 800); +(2) 一些运算操作,如 GN 和其他定制的运算操作没有加入到计算量的计算中。 + +### 发布模型 + +在您上传一个模型到云服务器之前,您需要做以下几步: +(1) 将模型权重转成 CPU 张量; +(2) 删除记录优化器状态 (optimizer states)的相关信息; +(3) 计算检查点文件 (checkpoint file) 的哈希编码(hash id)并且将哈希编码加到文件名中。 + +```shell +python tools/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +例如, + +```shell +python tools/publish_model.py work_dirs/pspnet/latest.pth psp_r50_hszhao_200ep.pth +``` + +最终输出文件将是 `psp_r50_512x1024_40ki_cityscapes-{hash id}.pth`。 + +### 导出 ONNX (试验性) + +我们提供了一个脚本来导出模型到 [ONNX](https://github.com/onnx/onnx) 格式。被转换的模型可以通过工具 [Netron](https://github.com/lutzroeder/netron) +来可视化。除此以外,我们同样支持对 PyTorch 和 ONNX 模型的输出结果做对比。 + +```bash +python tools/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${ONNX_FILE} \ + --input-img ${INPUT_IMG} \ + --shape ${INPUT_SHAPE} \ + --rescale-shape ${RESCALE_SHAPE} \ + --show \ + --verify \ + --dynamic-export \ + --cfg-options \ + model.test_cfg.mode="whole" +``` + +各个参数的描述: + +- `config` : 模型配置文件的路径 +- `--checkpoint` : 模型检查点文件的路径 +- `--output-file`: 输出的 ONNX 模型的路径。如果没有专门指定,它默认是 `tmp.onnx` +- `--input-img` : 用来转换和可视化的一张输入图像的路径 +- `--shape`: 模型的输入张量的高和宽。如果没有专门指定,它将被设置成 `test_pipeline` 的 `img_scale` +- `--rescale-shape`: 改变输出的形状。设置这个值来避免 OOM,它仅在 `slide` 模式下可以用 +- `--show`: 是否打印输出模型的结构。如果没有被专门指定,它将被设置成 `False` +- `--verify`: 是否验证一个输出模型的正确性 (correctness)。如果没有被专门指定,它将被设置成 `False` +- `--dynamic-export`: 是否导出形状变化的输入与输出的 ONNX 模型。如果没有被专门指定,它将被设置成 `False` +- `--cfg-options`: 更新配置选项 + +**注意**: 这个工具仍然是试验性的,目前一些自定义操作还没有被支持 + +### 评估 ONNX 模型 + +我们提供 `tools/deploy_test.py` 去评估不同后端的 ONNX 模型。 + +#### 先决条件 + +- 安装 onnx 和 onnxruntime-gpu + + ```shell + pip install onnx onnxruntime-gpu + ``` + +- 参考 [如何在 MMCV 里构建 tensorrt 插件](https://mmcv.readthedocs.io/en/latest/tensorrt_plugin.html#how-to-build-tensorrt-plugins-in-mmcv) 安装TensorRT (可选) + +#### 使用方法 + +```bash +python tools/deploy_test.py \ + ${CONFIG_FILE} \ + ${MODEL_FILE} \ + ${BACKEND} \ + --out ${OUTPUT_FILE} \ + --eval ${EVALUATION_METRICS} \ + --show \ + --show-dir ${SHOW_DIRECTORY} \ + --cfg-options ${CFG_OPTIONS} \ + --eval-options ${EVALUATION_OPTIONS} \ + --opacity ${OPACITY} \ +``` + +各个参数的描述: + +- `config`: 模型配置文件的路径 +- `model`: 被转换的模型文件的路径 +- `backend`: 推理的后端,可选项:`onnxruntime`, `tensorrt` +- `--out`: 输出结果成 pickle 格式文件的路径 +- `--format-only` : 不评估直接给输出结果的格式。通常用在当您想把结果输出成一些测试服务器需要的特定格式时。如果没有被专门指定,它将被设置成 `False`。 注意这个参数是用 `--eval` 来 **手动添加** +- `--eval`: 评估指标,取决于每个数据集的要求,例如 "mIoU" 是大多数据集的指标而 "cityscapes" 仅针对 Cityscapes 数据集。注意这个参数是用 `--format-only` 来 **手动添加** +- `--show`: 是否展示结果 +- `--show-dir`: 涂上结果的图像被保存的文件夹的路径 +- `--cfg-options`: 重写配置文件里的一些设置,`xxx=yyy` 格式的键值对将被覆盖到配置文件里 +- `--eval-options`: 自定义的评估的选项, `xxx=yyy` 格式的键值对将成为 `dataset.evaluate()` 函数的参数变量 +- `--opacity`: 涂上结果的分割图的透明度,范围在 (0, 1\] 之间 + +#### 结果和模型 + +| 模型 | 配置文件 | 数据集 | 评价指标 | PyTorch | ONNXRuntime | TensorRT-fp32 | TensorRT-fp16 | +| :--------: | :---------------------------------------------: | :--------: | :------: | :-----: | :---------: | :-----------: | :-----------: | +| FCN | fcn_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 72.2 | 72.2 | 72.2 | 72.2 | +| PSPNet | pspnet_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 77.8 | 77.8 | 77.8 | 77.8 | +| deeplabv3 | deeplabv3_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 79.0 | 79.0 | 79.0 | 79.0 | +| deeplabv3+ | deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py | cityscapes | mIoU | 79.6 | 79.5 | 79.5 | 79.5 | +| PSPNet | pspnet_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.2 | 78.1 | | | +| deeplabv3 | deeplabv3_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.5 | 78.3 | | | +| deeplabv3+ | deeplabv3plus_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.9 | 78.7 | | | + +**注意**: TensorRT 仅在使用 `whole mode` 测试模式时的配置文件里可用。 + +### 导出 TorchScript (试验性) + +我们同样提供一个脚本去把模型导出成 [TorchScript](https://pytorch.org/docs/stable/jit.html) 格式。您可以使用 pytorch C++ API [LibTorch](https://pytorch.org/docs/stable/cpp_index.html) 去推理训练好的模型。 +被转换的模型能被像 [Netron](https://github.com/lutzroeder/netron) 的工具来可视化。此外,我们还支持 PyTorch 和 TorchScript 模型的输出结果的比较。 + +```shell +python tools/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${ONNX_FILE} + --shape ${INPUT_SHAPE} + --verify \ + --show +``` + +各个参数的描述: + +- `config` : pytorch 模型的配置文件的路径 +- `--checkpoint` : pytorch 模型的检查点文件的路径 +- `--output-file`: TorchScript 模型输出的路径,如果没有被专门指定,它将被设置成 `tmp.pt` +- `--input-img` : 用来转换和可视化的输入图像的路径 +- `--shape`: 模型的输入张量的宽和高。如果没有被专门指定,它将被设置成 `512 512` +- `--show`: 是否打印输出模型的追踪图 (traced graph),如果没有被专门指定,它将被设置成 `False` +- `--verify`: 是否验证一个输出模型的正确性 (correctness),如果没有被专门指定,它将被设置成 `False` + +**注意**: 目前仅支持 PyTorch>=1.8.0 版本 + +**注意**: 这个工具仍然是试验性的,一些自定义操作符目前还不被支持 + +例子: + +- 导出 PSPNet 在 cityscapes 数据集上的 pytorch 模型 + + ```shell + python tools/pytorch2torchscript.py configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ + --checkpoint checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ + --output-file checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pt \ + --shape 512 1024 + ``` + +### 导出 TensorRT (试验性) + +一个导出 [ONNX](https://github.com/onnx/onnx) 模型成 [TensorRT](https://developer.nvidia.com/tensorrt) 格式的脚本 + +先决条件 + +- 按照 [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) 和 [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md) ,用 ONNXRuntime 自定义运算 (custom ops) 和 TensorRT 插件安装 `mmcv-full` +- 使用 [pytorch2onnx](#convert-to-onnx-experimental) 将模型从 PyTorch 转成 ONNX + +使用方法 + +```bash +python ${MMSEG_PATH}/tools/onnx2tensorrt.py \ + ${CFG_PATH} \ + ${ONNX_PATH} \ + --trt-file ${OUTPUT_TRT_PATH} \ + --min-shape ${MIN_SHAPE} \ + --max-shape ${MAX_SHAPE} \ + --input-img ${INPUT_IMG} \ + --show \ + --verify +``` + +各个参数的描述: + +- `config` : 模型的配置文件 +- `model` : 输入的 ONNX 模型的路径 +- `--trt-file` : 输出的 TensorRT 引擎的路径 +- `--max-shape` : 模型的输入的最大形状 +- `--min-shape` : 模型的输入的最小形状 +- `--fp16` : 做 fp16 模型转换 +- `--workspace-size` : 在 GiB 里的最大工作空间大小 (Max workspace size) +- `--input-img` : 用来可视化的图像 +- `--show` : 做结果的可视化 +- `--dataset` : Palette provider, 默认为 `CityscapesDataset` +- `--verify` : 验证 ONNXRuntime 和 TensorRT 的输出 +- `--verbose` : 当创建 TensorRT 引擎时,是否详细做信息日志。默认为 False + +**注意**: 仅在全图测试模式 (whole mode) 下测试过 + +## 其他内容 + +### 打印完整的配置文件 + +`tools/print_config.py` 会逐字逐句的打印整个配置文件,展开所有的导入。 + +```shell +python tools/print_config.py \ + ${CONFIG} \ + --graph \ + --cfg-options ${OPTIONS [OPTIONS...]} \ +``` + +各个参数的描述: + +- `config` : pytorch 模型的配置文件的路径 +- `--graph` : 是否打印模型的图 (models graph) +- `--cfg-options`: 自定义替换配置文件的选项 + +### 对训练日志 (training logs) 画图 + +`tools/analyze_logs.py` 会画出给定的训练日志文件的 loss/mIoU 曲线,首先需要 `pip install seaborn` 安装依赖包。 + +```shell +python tools/analyze_logs.py xxx.log.json [--keys ${KEYS}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +示例: + +- 对 mIoU, mAcc, aAcc 指标画图 + + ```shell + python tools/analyze_logs.py log.json --keys mIoU mAcc aAcc --legend mIoU mAcc aAcc + ``` + +- 对 loss 指标画图 + + ```shell + python tools/analyze_logs.py log.json --keys loss --legend loss + ``` + +### 转换其他仓库的权重 + +`tools/model_converters/` 提供了若干个预训练权重转换脚本,支持将其他仓库的预训练权重的 key 转换为与 MMSegmentation 相匹配的 key。 + +#### ViT Swin MiT Transformer 模型 + +- ViT + +`tools/model_converters/vit2mmseg.py` 将 timm 预训练模型转换到 MMSegmentation。 + +```shell +python tools/model_converters/vit2mmseg.py ${SRC} ${DST} +``` + +- Swin + + `tools/model_converters/swin2mmseg.py` 将官方预训练模型转换到 MMSegmentation。 + + ```shell + python tools/model_converters/swin2mmseg.py ${SRC} ${DST} + ``` + +- SegFormer + + `tools/model_converters/mit2mmseg.py` 将官方预训练模型转换到 MMSegmentation。 + + ```shell + python tools/model_converters/mit2mmseg.py ${SRC} ${DST} + ``` + +## 模型服务 + +为了用 [`TorchServe`](https://pytorch.org/serve/) 服务 `MMSegmentation` 的模型 , 您可以遵循如下流程: + +### 1. 将 model 从 MMSegmentation 转换到 TorchServe + +```shell +python tools/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +**注意**: ${MODEL_STORE} 需要设置为某个文件夹的绝对路径 + +### 2. 构建 `mmseg-serve` 容器镜像 (docker image) + +```shell +docker build -t mmseg-serve:latest docker/serve/ +``` + +### 3. 运行 `mmseg-serve` + +请查阅官方文档: [使用容器运行 TorchServe](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment) + +为了在 GPU 环境下使用, 您需要安装 [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). 若在 CPU 环境下使用,您可以忽略添加 `--gpus` 参数。 + +示例: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmseg-serve:latest +``` + +阅读关于推理 (8080), 管理 (8081) 和指标 (8082) APIs 的 [文档](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md) 。 + +### 4. 测试部署 + +```shell +curl -O https://raw.githubusercontent.com/open-mmlab/mmsegmentation/master/resources/3dogs.jpg +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T 3dogs.jpg -o 3dogs_mask.png +``` + +得到的响应将是一个 ".png" 的分割掩码. + +您可以按照如下方法可视化输出: + +```python +import matplotlib.pyplot as plt +import mmcv +plt.imshow(mmcv.imread("3dogs_mask.png", "grayscale")) +plt.show() +``` + +看到的东西将会和下图类似: + +![3dogs_mask](../../resources/3dogs_mask.png) + +然后您可以使用 `test_torchserve.py` 比较 torchserve 和 pytorch 的结果,并将它们可视化。 + +```shell +python tools/torchserve/test_torchserve.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--result-image ${RESULT_IMAGE}] [--device ${DEVICE}] +``` + +示例: + +```shell +python tools/torchserve/test_torchserve.py \ +demo/demo.png \ +configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \ +checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \ +fcn +``` diff --git a/docs/zh_cn/user_guides/visualization.md b/docs/zh_cn/user_guides/visualization.md new file mode 100644 index 0000000000..ac8b9e289f --- /dev/null +++ b/docs/zh_cn/user_guides/visualization.md @@ -0,0 +1,173 @@ +# 可视化 + +MMSegmentation 1.x 提供了简便的方式监控训练时的状态以及可视化在模型预测时的数据。 + +## 训练状态监控 + +MMSegmentation 1.x 使用 TensorBoard 来监控训练时候的状态。 + +### TensorBoard 的配置 + +安装 TensorBoard 的过程可以按照 [官方安装指南](https://www.tensorflow.org/install) ,具体的步骤如下: + +```shell +pip install tensorboardX +pip install future tensorboard +``` + +在配置文件 `default_runtime.py` 的 `vis_backend` 中添加 `TensorboardVisBackend`。 + +```python +vis_backends = [dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') +``` + +### 检查 TensorBoard 中的标量 + +启动训练实验的命令如下 + +```shell +python tools/train.py configs/pspnet/pspnet_r50-d8_4xb4-80k_ade20k-512x512.py --work-dir work_dir/test_visual +``` + +开始训练后找到 `work_dir` 中的 `vis_data` 路径,例如:本次特定测试的 vis_data 路径如下所示: + +```shell +work_dirs/test_visual/20220810_115248/vis_data +``` + +vis_data 路径中的标量文件包括了学习率、损失函数和 data_time 等,还记录了指标结果,您可以参考 MMEngine 中的 [记录日志教程](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/logging.html) 中的日志教程来帮助记录自己定义的数据。 Tensorboard 的可视化结果使用下面的命令执行: + +```shell +tensorboard --logdir work_dirs/test_visual/20220810_115248/vis_data +``` + +## 数据和结果的可视化 + +### 模型测试或验证期间的可视化数据样本 + +MMSegmentation 提供了 `SegVisualizationHook` ,它是一个可以用于可视化 ground truth 和在模型测试和验证期间的预测分割结果的[钩子](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/hook.html) 。 它的配置在 `default_hooks` 中,更多详细信息请参见 [执行器教程](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/runner.html)。 + +例如,在 `_base_/schedules/schedule_20k.py` 中,修改 `SegVisualizationHook` 配置,将 `draw` 设置为 `True` 以启用网络推理结果的存储,`interval` 表示预测结果的采样间隔, 设置为 1 时,将保存网络的每个推理结果。 `interval` 默认设置为 50: + +```python +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook', draw=True, interval=1)) + +``` + +启动训练实验后,可视化结果将在 validation loop 存储到本地文件夹中,或者在一个数据集上启动评估模型时,预测结果将存储在本地。本地的可视化的存储结果保存在 `$WORK_DIRS/vis_data` 下的 `vis_image` 中,例如: + +```shell +work_dirs/test_visual/20220810_115248/vis_data/vis_image +``` + +另外,如果在 `vis_backends` 中添加 `TensorboardVisBackend` ,如 [TensorBoard 的配置](#tensorboard-configuration),我们还可以运行下面的命令在 TensorBoard 中查看它们: + +```shell +tensorboard --logdir work_dirs/test_visual/20220810_115248/vis_data +``` + +### 可视化单个数据样本 + +如果你想可视化单个样本数据,我们建议使用 `SegLocalVisualizer` 。 + +`SegLocalVisualizer`是继承自 MMEngine 中`Visualizer` 类的子类,适用于 MMSegmentation 可视化,有关`Visualizer`的详细信息请参考在 MMEngine 中的[可视化教程](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/visualization.html) 。 + +以下是一个关于 `SegLocalVisualizer` 的示例,首先你可以使用下面的命令下载这个案例中的数据: + +
+ +
+ +```shell +wget https://user-images.githubusercontent.com/24582831/189833109-eddad58f-f777-4fc0-b98a-6bd429143b06.png --output-document aachen_000000_000019_leftImg8bit.png +wget https://user-images.githubusercontent.com/24582831/189833143-15f60f8a-4d1e-4cbb-a6e7-5e2233869fac.png --output-document aachen_000000_000019_gtFine_labelTrainIds.png +``` + +然后你可以找到他们本地的路径和使用下面的脚本文件对其进行可视化: + +```python +import mmcv +import os.path as osp +import torch + +# `PixelData` 是 MMEngine 中用于定义像素级标注或预测的数据结构。 +# 请参考下面的MMEngine数据结构教程文件: +# https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/data_element.html#pixeldata + +from mmengine.structures import PixelData + +# `SegDataSample` 是在 MMSegmentation 中定义的不同组件之间的数据结构接口, +# 它包括 ground truth、语义分割的预测结果和预测逻辑。 +# 详情请参考下面的 `SegDataSample` 教程文件: +# https://github.com/open-mmlab/mmsegmentation/blob/1.x/docs/en/advanced_guides/structures.md + +from mmseg.structures import SegDataSample +from mmseg.visualization import SegLocalVisualizer + +out_file = 'out_file_cityscapes' +save_dir = './work_dirs' + +image = mmcv.imread( + osp.join( + osp.dirname(__file__), + './aachen_000000_000019_leftImg8bit.png' + ), + 'color') +sem_seg = mmcv.imread( + osp.join( + osp.dirname(__file__), + './aachen_000000_000019_gtFine_labelTrainIds.png' # noqa + ), + 'unchanged') +sem_seg = torch.from_numpy(sem_seg) +gt_sem_seg_data = dict(data=sem_seg) +gt_sem_seg = PixelData(**gt_sem_seg_data) +data_sample = SegDataSample() +data_sample.gt_sem_seg = gt_sem_seg + +seg_local_visualizer = SegLocalVisualizer( + vis_backends=[dict(type='LocalVisBackend')], + save_dir=save_dir) + +# 数据集的元信息通常包括类名的 `classes` 和 +# 用于可视化每个前景颜色的 `palette` 。 +# 所有类名和调色板都在此文件中定义: +# https://github.com/open-mmlab/mmsegmentation/blob/1.x/mmseg/utils/class_names.py + +seg_local_visualizer.dataset_meta = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', + 'pole', 'traffic light', 'traffic sign', + 'vegetation', 'terrain', 'sky', 'person', 'rider', + 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], + [102, 102, 156], [190, 153, 153], [153, 153, 153], + [250, 170, 30], [220, 220, 0], [107, 142, 35], + [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], + [119, 11, 32]]) + +# 当`show=True`时,直接显示结果, +# 当 `show=False`时,结果将保存在本地文件夹中。 + +seg_local_visualizer.add_datasample(out_file, image, + data_sample, show=False) +``` + +可视化后的图像结果和它的对应的 ground truth 图像可以在 `./work_dirs/vis_data/vis_image/` 路径找到,文件名字是:`out_file_cityscapes_0.png` : + +
+ +
+ +如果你想知道更多的关于可视化的使用指引,你可以参考 MMEngine 中的[可视化教程](<[https://mmengine.readthedocs.io/en/latest/advanced_tutorials/visualization.html](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/advanced_tutorials/visualization.md)>) diff --git a/mmseg/VERSION b/mmseg/VERSION deleted file mode 100644 index 8f0916f768..0000000000 --- a/mmseg/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.5.0 diff --git a/mmseg/__init__.py b/mmseg/__init__.py index 1c4f7e8fcc..b395013526 100644 --- a/mmseg/__init__.py +++ b/mmseg/__init__.py @@ -1,3 +1,74 @@ -from .version import __version__, short_version +# Copyright (c) OpenMMLab. All rights reserved. +import warnings -__all__ = ['__version__', 'short_version'] +import mmcv +import mmengine +from packaging.version import parse + +from .version import __version__, version_info + +MMCV_MIN = '2.0.0rc3' +MMCV_MAX = '2.1.0' +MMENGINE_MIN = '0.1.0' +MMENGINE_MAX = '1.0.0' + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +mmcv_min_version = digit_version(MMCV_MIN) +mmcv_max_version = digit_version(MMCV_MAX) +mmcv_version = digit_version(mmcv.__version__) + + +assert (mmcv_min_version <= mmcv_version < mmcv_max_version), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_min_version}, <{mmcv_max_version}.' + +mmengine_min_version = digit_version(MMENGINE_MIN) +mmengine_max_version = digit_version(MMENGINE_MAX) +mmengine_version = digit_version(mmengine.__version__) + +assert (mmengine_min_version <= mmengine_version < mmengine_max_version), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_min_version}, '\ + f'<{mmengine_max_version}.' + +__all__ = ['__version__', 'version_info', 'digit_version'] diff --git a/mmseg/apis/__init__.py b/mmseg/apis/__init__.py index 170724be38..9933b99b3c 100644 --- a/mmseg/apis/__init__.py +++ b/mmseg/apis/__init__.py @@ -1,9 +1,4 @@ -from .inference import inference_segmentor, init_segmentor, show_result_pyplot -from .test import multi_gpu_test, single_gpu_test -from .train import get_root_logger, set_random_seed, train_segmentor +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import inference_model, init_model, show_result_pyplot -__all__ = [ - 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', - 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', - 'show_result_pyplot' -] +__all__ = ['init_model', 'inference_model', 'show_result_pyplot'] diff --git a/mmseg/apis/inference.py b/mmseg/apis/inference.py index 3ba6b62ce1..9abc85d627 100644 --- a/mmseg/apis/inference.py +++ b/mmseg/apis/inference.py @@ -1,70 +1,128 @@ -import matplotlib.pyplot as plt +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import defaultdict +from pathlib import Path +from typing import Optional, Sequence, Union + import mmcv +import numpy as np import torch -from mmcv.parallel import collate, scatter -from mmcv.runner import load_checkpoint - -from mmseg.datasets.pipelines import Compose -from mmseg.models import build_segmentor - - -def init_segmentor(config, checkpoint=None, device='cuda:0'): +from mmengine import Config +from mmengine.dataset import Compose +from mmengine.runner import load_checkpoint +from mmengine.utils import mkdir_or_exist + +from mmseg.models import BaseSegmentor +from mmseg.registry import MODELS +from mmseg.structures import SegDataSample +from mmseg.utils import SampleList, dataset_aliases, get_classes, get_palette +from mmseg.visualization import SegLocalVisualizer + + +def init_model(config: Union[str, Path, Config], + checkpoint: Optional[str] = None, + device: str = 'cuda:0', + cfg_options: Optional[dict] = None): """Initialize a segmentor from config file. Args: - config (str or :obj:`mmcv.Config`): Config file path or the config - object. + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. - + device (str, optional) CPU/CUDA device option. Default 'cuda:0'. + Use 'cpu' for loading model on CPU. + cfg_options (dict, optional): Options to override some settings in + the used config. Returns: nn.Module: The constructed segmentor. """ - if isinstance(config, str): - config = mmcv.Config.fromfile(config) - elif not isinstance(config, mmcv.Config): + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): raise TypeError('config must be a filename or Config object, ' 'but got {}'.format(type(config))) + if cfg_options is not None: + config.merge_from_dict(cfg_options) + elif 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None config.model.pretrained = None - model = build_segmentor(config.model, test_cfg=config.test_cfg) + config.model.train_cfg = None + model = MODELS.build(config.model) if checkpoint is not None: - checkpoint = load_checkpoint(model, checkpoint) - model.CLASSES = checkpoint['meta']['CLASSES'] - model.PALETTE = checkpoint['meta']['PALETTE'] + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + dataset_meta = checkpoint['meta'].get('dataset_meta', None) + # save the dataset_meta in the model for convenience + if 'dataset_meta' in checkpoint.get('meta', {}): + # mmseg 1.x + model.dataset_meta = dataset_meta + elif 'CLASSES' in checkpoint.get('meta', {}): + # < mmseg 1.x + classes = checkpoint['meta']['CLASSES'] + palette = checkpoint['meta']['PALETTE'] + model.dataset_meta = {'classes': classes, 'palette': palette} + else: + warnings.simplefilter('once') + warnings.warn( + 'dataset_meta or class names are not saved in the ' + 'checkpoint\'s meta data, classes and palette will be' + 'set according to num_classes ') + num_classes = model.decode_head.num_classes + dataset_name = None + for name in dataset_aliases.keys(): + if len(get_classes(name)) == num_classes: + dataset_name = name + break + if dataset_name is None: + warnings.warn( + 'No suitable dataset found, use Cityscapes by default') + dataset_name = 'cityscapes' + model.dataset_meta = { + 'classes': get_classes(dataset_name), + 'palette': get_palette(dataset_name) + } model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model -class LoadImage: - """A simple pipeline to load image.""" +ImageType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]] + + +def _preprare_data(imgs: ImageType, model: BaseSegmentor): + + cfg = model.cfg + if dict(type='LoadAnnotations') in cfg.test_pipeline: + cfg.test_pipeline.remove(dict(type='LoadAnnotations')) - def __call__(self, results): - """Call function to load images into results. + is_batch = True + if not isinstance(imgs, (list, tuple)): + imgs = [imgs] + is_batch = False - Args: - results (dict): A result dict contains the file name - of the image to be read. + if isinstance(imgs[0], np.ndarray): + cfg.test_pipeline[0]['type'] = 'LoadImageFromNDArray' - Returns: - dict: ``results`` will be returned containing loaded image. - """ + # TODO: Consider using the singleton pattern to avoid building + # a pipeline for each inference + pipeline = Compose(cfg.test_pipeline) - if isinstance(results['img'], str): - results['filename'] = results['img'] - results['ori_filename'] = results['img'] + data = defaultdict(list) + for img in imgs: + if isinstance(img, np.ndarray): + data_ = dict(img=img) else: - results['filename'] = None - results['ori_filename'] = None - img = mmcv.imread(results['img']) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - return results + data_ = dict(img_path=img) + data_ = pipeline(data_) + data['inputs'].append(data_['inputs']) + data['data_samples'].append(data_['data_samples']) + + return data, is_batch -def inference_segmentor(model, img): +def inference_model(model: BaseSegmentor, + img: ImageType) -> Union[SegDataSample, SampleList]: """Inference image(s) with the segmentor. Args: @@ -73,44 +131,80 @@ def inference_segmentor(model, img): images. Returns: - (list[Tensor]): The segmentation result. + :obj:`SegDataSample` or list[:obj:`SegDataSample`]: + If imgs is a list or tuple, the same length list type results + will be returned, otherwise return the segmentation results directly. """ - cfg = model.cfg - device = next(model.parameters()).device # model device - # build the data pipeline - test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] - test_pipeline = Compose(test_pipeline) # prepare data - data = dict(img=img) - data = test_pipeline(data) - data = collate([data], samples_per_gpu=1) - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - data['img_metas'] = data['img_metas'][0].data + data, is_batch = _preprare_data(img, model) # forward the model with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - return result - - -def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10)): + results = model.test_step(data) + + return results if is_batch else results[0] + + +def show_result_pyplot(model: BaseSegmentor, + img: Union[str, np.ndarray], + result: SegDataSample, + opacity: float = 0.5, + title: str = '', + draw_gt: bool = True, + draw_pred: bool = True, + wait_time: float = 0, + show: bool = True, + save_dir=None, + out_file=None): """Visualize the segmentation results on the image. Args: model (nn.Module): The loaded segmentor. img (str or np.ndarray): Image filename or loaded image. - result (list): The segmentation result. - palette (list[list[int]]] | None): The palette of segmentation - map. If None is given, random palette will be generated. - Default: None - fig_size (tuple): Figure size of the pyplot figure. + result (SegDataSample): The prediction SegDataSample result. + opacity(float): Opacity of painted segmentation map. + Default 0.5. Must be in (0, 1] range. + title (str): The title of pyplot figure. + Default is ''. + draw_gt (bool): Whether to draw GT SegDataSample. Default to True. + draw_pred (bool): Whether to draw Prediction SegDataSample. + Defaults to True. + wait_time (float): The interval of show (s). 0 is the special value + that means "forever". Defaults to 0. + show (bool): Whether to display the drawn image. + Default to True. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + out_file (str, optional): Path to output file. Default to None. + + Returns: + np.ndarray: the drawn image which channel is RGB. """ if hasattr(model, 'module'): model = model.module - img = model.show_result(img, result, palette=palette, show=False) - plt.figure(figsize=fig_size) - plt.imshow(mmcv.bgr2rgb(img)) - plt.show() + if isinstance(img, str): + image = mmcv.imread(img) + else: + image = img + if save_dir is not None: + mkdir_or_exist(save_dir) + # init visualizer + visualizer = SegLocalVisualizer( + vis_backends=[dict(type='LocalVisBackend')], + save_dir=save_dir, + alpha=opacity) + visualizer.dataset_meta = dict( + classes=model.dataset_meta['classes'], + palette=model.dataset_meta['palette']) + visualizer.add_datasample( + name=title, + image=image, + data_sample=result, + draw_gt=draw_gt, + draw_pred=draw_pred, + wait_time=wait_time, + out_file=out_file, + show=show) + vis_img = visualizer.get_image() + + return vis_img diff --git a/mmseg/apis/test.py b/mmseg/apis/test.py deleted file mode 100644 index 8cbf236f05..0000000000 --- a/mmseg/apis/test.py +++ /dev/null @@ -1,191 +0,0 @@ -import os.path as osp -import pickle -import shutil -import tempfile - -import mmcv -import torch -import torch.distributed as dist -from mmcv.image import tensor2imgs -from mmcv.runner import get_dist_info - - -def single_gpu_test(model, data_loader, show=False, out_dir=None): - """Test with single GPU. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - show (bool): Whether show results during infernece. Default: False. - out_dir (str, optional): If specified, the results will be dumped - into the directory to save output results. - - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=not show, **data) - if isinstance(results, list): - results.extend(result) - else: - results.append(result) - - if show or out_dir: - img_tensor = data['img'][0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for img, img_meta in zip(imgs, img_metas): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - - model.module.show_result( - img_show, - result, - palette=dataset.PALETTE, - show=show, - out_file=out_file) - - batch_size = data['img'][0].size(0) - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - if isinstance(results, list): - results.extend(result) - else: - results.append(result) - - if rank == 0: - batch_size = data['img'][0].size(0) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results with CPU.""" - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - tmpdir = tempfile.mkdtemp() - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results with GPU.""" - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/mmseg/apis/train.py b/mmseg/apis/train.py deleted file mode 100644 index b703143587..0000000000 --- a/mmseg/apis/train.py +++ /dev/null @@ -1,106 +0,0 @@ -import random - -import numpy as np -import torch -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import IterBasedRunner, build_optimizer - -from mmseg.core import DistEvalHook, EvalHook -from mmseg.datasets import build_dataloader, build_dataset -from mmseg.utils import get_root_logger - - -def set_random_seed(seed, deterministic=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def train_segmentor(model, - dataset, - cfg, - distributed=False, - validate=False, - timestamp=None, - meta=None): - """Launch segmentor training.""" - logger = get_root_logger(cfg.log_level) - - # prepare data loaders - dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] - data_loaders = [ - build_dataloader( - ds, - cfg.data.samples_per_gpu, - cfg.data.workers_per_gpu, - # cfg.gpus will be ignored if distributed - len(cfg.gpu_ids), - dist=distributed, - seed=cfg.seed, - drop_last=True) for ds in dataset - ] - - # put model on gpus - if distributed: - find_unused_parameters = cfg.get('find_unused_parameters', False) - # Sets the `find_unused_parameters` parameter in - # torch.nn.parallel.DistributedDataParallel - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False, - find_unused_parameters=find_unused_parameters) - else: - model = MMDataParallel( - model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) - - # build runner - optimizer = build_optimizer(model, cfg.optimizer) - - runner = IterBasedRunner( - model=model, - batch_processor=None, - optimizer=optimizer, - work_dir=cfg.work_dir, - logger=logger, - meta=meta) - - # register hooks - runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, - cfg.checkpoint_config, cfg.log_config, - cfg.get('momentum_config', None)) - - # an ugly walkaround to make the .log and .log.json filenames the same - runner.timestamp = timestamp - - # register eval hooks - if validate: - val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) - val_dataloader = build_dataloader( - val_dataset, - samples_per_gpu=1, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False) - eval_cfg = cfg.get('evaluation', {}) - eval_hook = DistEvalHook if distributed else EvalHook - runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) - - if cfg.resume_from: - runner.resume(cfg.resume_from) - elif cfg.load_from: - runner.load_checkpoint(cfg.load_from) - runner.run(data_loaders, cfg.workflow, cfg.total_iters) diff --git a/mmseg/core/__init__.py b/mmseg/core/__init__.py deleted file mode 100644 index 9656055872..0000000000 --- a/mmseg/core/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .evaluation import * # noqa: F401, F403 -from .seg import * # noqa: F401, F403 -from .utils import * # noqa: F401, F403 diff --git a/mmseg/core/evaluation/__init__.py b/mmseg/core/evaluation/__init__.py deleted file mode 100644 index f169d1bf1b..0000000000 --- a/mmseg/core/evaluation/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .class_names import get_classes, get_palette -from .eval_hooks import DistEvalHook, EvalHook -from .mean_iou import mean_iou - -__all__ = [ - 'EvalHook', 'DistEvalHook', 'mean_iou', 'get_classes', 'get_palette' -] diff --git a/mmseg/core/evaluation/class_names.py b/mmseg/core/evaluation/class_names.py deleted file mode 100644 index 0d8e66d54b..0000000000 --- a/mmseg/core/evaluation/class_names.py +++ /dev/null @@ -1,152 +0,0 @@ -import mmcv - - -def cityscapes_classes(): - """Cityscapes class names for external use.""" - return [ - 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle' - ] - - -def ade_classes(): - """ADE20K class names for external use.""" - return [ - 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', - 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', - 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', - 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', - 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', - 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', - 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', - 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', - 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', - 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', - 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', - 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', - 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', - 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', - 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', - 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', - 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', - 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', - 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', - 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', - 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', - 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', - 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', - 'clock', 'flag' - ] - - -def voc_classes(): - """Pascal VOC class names for external use.""" - return [ - 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', - 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', - 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', - 'tvmonitor' - ] - - -def cityscapes_palette(): - """Cityscapes palette for external use.""" - return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], - [0, 0, 230], [119, 11, 32]] - - -def ade_palette(): - """ADE20K palette for external use.""" - return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - -def voc_palette(): - """Pascal VOC palette for external use.""" - return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - -dataset_aliases = { - 'cityscapes': ['cityscapes'], - 'ade': ['ade', 'ade20k'], - 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'] -} - - -def get_classes(dataset): - """Get class names of a dataset.""" - alias2name = {} - for name, aliases in dataset_aliases.items(): - for alias in aliases: - alias2name[alias] = name - - if mmcv.is_str(dataset): - if dataset in alias2name: - labels = eval(alias2name[dataset] + '_classes()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must a str, but got {type(dataset)}') - return labels - - -def get_palette(dataset): - """Get class palette (RGB) of a dataset.""" - alias2name = {} - for name, aliases in dataset_aliases.items(): - for alias in aliases: - alias2name[alias] = name - - if mmcv.is_str(dataset): - if dataset in alias2name: - labels = eval(alias2name[dataset] + '_palette()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must a str, but got {type(dataset)}') - return labels diff --git a/mmseg/core/evaluation/eval_hooks.py b/mmseg/core/evaluation/eval_hooks.py deleted file mode 100644 index cbd0b23fe9..0000000000 --- a/mmseg/core/evaluation/eval_hooks.py +++ /dev/null @@ -1,80 +0,0 @@ -import os.path as osp - -from mmcv.runner import Hook -from torch.utils.data import DataLoader - - -class EvalHook(Hook): - """Evaluation hook. - - Attributes: - dataloader (DataLoader): A PyTorch dataloader. - interval (int): Evaluation interval (by epochs). Default: 1. - """ - - def __init__(self, dataloader, interval=1, **eval_kwargs): - if not isinstance(dataloader, DataLoader): - raise TypeError('dataloader must be a pytorch DataLoader, but got ' - f'{type(dataloader)}') - self.dataloader = dataloader - self.interval = interval - self.eval_kwargs = eval_kwargs - - def after_train_iter(self, runner): - """After train epoch hook.""" - if not self.every_n_iters(runner, self.interval): - return - from mmseg.apis import single_gpu_test - runner.log_buffer.clear() - results = single_gpu_test(runner.model, self.dataloader, show=False) - self.evaluate(runner, results) - - def evaluate(self, runner, results): - """Call evaluate function of dataset.""" - eval_res = self.dataloader.dataset.evaluate( - results, logger=runner.logger, **self.eval_kwargs) - for name, val in eval_res.items(): - runner.log_buffer.output[name] = val - runner.log_buffer.ready = True - - -class DistEvalHook(EvalHook): - """Distributed evaluation hook. - - Attributes: - dataloader (DataLoader): A PyTorch dataloader. - interval (int): Evaluation interval (by epochs). Default: 1. - tmpdir (str | None): Temporary directory to save the results of all - processes. Default: None. - gpu_collect (bool): Whether to use gpu or cpu to collect results. - Default: False. - """ - - def __init__(self, - dataloader, - interval=1, - gpu_collect=False, - **eval_kwargs): - if not isinstance(dataloader, DataLoader): - raise TypeError( - 'dataloader must be a pytorch DataLoader, but got {}'.format( - type(dataloader))) - self.dataloader = dataloader - self.interval = interval - self.gpu_collect = gpu_collect - self.eval_kwargs = eval_kwargs - - def after_train_iter(self, runner): - """After train epoch hook.""" - if not self.every_n_iters(runner, self.interval): - return - from mmseg.apis import multi_gpu_test - runner.log_buffer.clear() - results = multi_gpu_test( - runner.model, - self.dataloader, - tmpdir=osp.join(runner.work_dir, '.eval_hook'), - gpu_collect=self.gpu_collect) - if runner.rank == 0: - print('\n') - self.evaluate(runner, results) diff --git a/mmseg/core/evaluation/mean_iou.py b/mmseg/core/evaluation/mean_iou.py deleted file mode 100644 index f0b4234fb4..0000000000 --- a/mmseg/core/evaluation/mean_iou.py +++ /dev/null @@ -1,70 +0,0 @@ -import numpy as np - - -def intersect_and_union(pred_label, label, num_classes, ignore_index): - """Calculate intersection and Union. - - Args: - pred_label (ndarray): Prediction segmentation map - label (ndarray): Ground truth segmentation map - num_classes (int): Number of categories - ignore_index (int): Index that will be ignored in evaluation. - - Returns: - ndarray: The intersection of prediction and ground truth histogram - on all classes - ndarray: The union of prediction and ground truth histogram on all - classes - ndarray: The prediction histogram on all classes. - ndarray: The ground truth histogram on all classes. - """ - - mask = (label != ignore_index) - pred_label = pred_label[mask] - label = label[mask] - - intersect = pred_label[pred_label == label] - area_intersect, _ = np.histogram( - intersect, bins=np.arange(num_classes + 1)) - area_pred_label, _ = np.histogram( - pred_label, bins=np.arange(num_classes + 1)) - area_label, _ = np.histogram(label, bins=np.arange(num_classes + 1)) - area_union = area_pred_label + area_label - area_intersect - - return area_intersect, area_union, area_pred_label, area_label - - -def mean_iou(results, gt_seg_maps, num_classes, ignore_index): - """Calculate Intersection and Union (IoU) - - Args: - results (list[ndarray]): List of prediction segmentation maps - gt_seg_maps (list[ndarray]): list of ground truth segmentation maps - num_classes (int): Number of categories - ignore_index (int): Index that will be ignored in evaluation. - - Returns: - float: Overall accuracy on all images. - ndarray: Per category accuracy, shape (num_classes, ) - ndarray: Per category IoU, shape (num_classes, ) - """ - - num_imgs = len(results) - assert len(gt_seg_maps) == num_imgs - total_area_intersect = np.zeros((num_classes, ), dtype=np.float) - total_area_union = np.zeros((num_classes, ), dtype=np.float) - total_area_pred_label = np.zeros((num_classes, ), dtype=np.float) - total_area_label = np.zeros((num_classes, ), dtype=np.float) - for i in range(num_imgs): - area_intersect, area_union, area_pred_label, area_label = \ - intersect_and_union(results[i], gt_seg_maps[i], num_classes, - ignore_index=ignore_index) - total_area_intersect += area_intersect - total_area_union += area_union - total_area_pred_label += area_pred_label - total_area_label += area_label - all_acc = total_area_intersect.sum() / total_area_label.sum() - acc = total_area_intersect / total_area_label - iou = total_area_intersect / total_area_union - - return all_acc, acc, iou diff --git a/mmseg/core/seg/__init__.py b/mmseg/core/seg/__init__.py deleted file mode 100644 index 93bc129b68..0000000000 --- a/mmseg/core/seg/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .builder import build_pixel_sampler -from .sampler import BasePixelSampler, OHEMPixelSampler - -__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/mmseg/core/seg/builder.py b/mmseg/core/seg/builder.py deleted file mode 100644 index f5a117ce7b..0000000000 --- a/mmseg/core/seg/builder.py +++ /dev/null @@ -1,8 +0,0 @@ -from mmcv.utils import Registry, build_from_cfg - -PIXEL_SAMPLERS = Registry('pixel sampler') - - -def build_pixel_sampler(cfg, **default_args): - """Build pixel sampler for segmentation map.""" - return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) diff --git a/mmseg/core/seg/sampler/__init__.py b/mmseg/core/seg/sampler/__init__.py deleted file mode 100644 index 332b242c03..0000000000 --- a/mmseg/core/seg/sampler/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base_pixel_sampler import BasePixelSampler -from .ohem_pixel_sampler import OHEMPixelSampler - -__all__ = ['BasePixelSampler', 'OHEMPixelSampler'] diff --git a/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/mmseg/core/seg/sampler/ohem_pixel_sampler.py deleted file mode 100644 index 28c14ab5d1..0000000000 --- a/mmseg/core/seg/sampler/ohem_pixel_sampler.py +++ /dev/null @@ -1,64 +0,0 @@ -import torch -import torch.nn.functional as F - -from ..builder import PIXEL_SAMPLERS -from .base_pixel_sampler import BasePixelSampler - - -@PIXEL_SAMPLERS.register_module() -class OHEMPixelSampler(BasePixelSampler): - """Online Hard Example Mining Sampler for segmentation. - - Args: - thresh (float): The threshold for hard example selection. Below - which, are prediction with low confidence. Default: 0.7. - min_kept (int): The minimum number of predictions to keep. - Default: 100000. - ignore_index (int): The ignore index for training. Default: 255. - """ - - def __init__(self, thresh=0.7, min_kept=100000, ignore_index=255): - super(OHEMPixelSampler, self).__init__() - assert min_kept > 1 - self.thresh = thresh - self.min_kept = min_kept - self.ignore_index = ignore_index - - def sample(self, seg_logit, seg_label): - """ - - Args: - seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) - seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) - - Returns: - torch.Tensor: segmentation weight, shape (N, H, W) - - """ - with torch.no_grad(): - assert seg_logit.shape[2:] == seg_label.shape[2:] - assert seg_label.shape[1] == 1 - seg_label = seg_label.squeeze(1).long() - batch_kept = self.min_kept * seg_label.size(0) - seg_prob = F.softmax(seg_logit, dim=1) - mask = seg_label.contiguous().view(-1, ) != self.ignore_index - - tmp_seg_label = seg_label.clone() - tmp_seg_label[tmp_seg_label == self.ignore_index] = 0 - seg_prob = seg_prob.gather(1, tmp_seg_label.unsqueeze(1)) - sort_prob, sort_indices = seg_prob.contiguous().view( - -1, )[mask].contiguous().sort() - - if sort_prob.numel() > 0: - min_threshold = sort_prob[min(batch_kept, - sort_prob.numel() - 1)] - else: - min_threshold = 0.0 - threshold = max(min_threshold, self.thresh) - - seg_weight = seg_logit.new_ones(size=seg_label.size()) - seg_weight = seg_weight.view(-1) - seg_weight[mask][sort_prob < threshold] = 0. - seg_weight = seg_weight.view_as(seg_label) - - return seg_weight diff --git a/mmseg/core/utils/__init__.py b/mmseg/core/utils/__init__.py deleted file mode 100644 index f2678b321c..0000000000 --- a/mmseg/core/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .misc import add_prefix - -__all__ = ['add_prefix'] diff --git a/mmseg/core/utils/misc.py b/mmseg/core/utils/misc.py deleted file mode 100644 index eb862a82bd..0000000000 --- a/mmseg/core/utils/misc.py +++ /dev/null @@ -1,17 +0,0 @@ -def add_prefix(inputs, prefix): - """Add prefix for dict. - - Args: - inputs (dict): The input dict with str keys. - prefix (str): The prefix to add. - - Returns: - - dict: The dict with keys updated with ``prefix``. - """ - - outputs = dict() - for name, value in inputs.items(): - outputs[f'{prefix}.{name}'] = value - - return outputs diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index cb81b9a2eb..b577f4bb0f 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -1,12 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. from .ade import ADE20KDataset -from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .basesegdataset import BaseSegDataset +from .chase_db1 import ChaseDB1Dataset from .cityscapes import CityscapesDataset -from .custom import CustomDataset -from .dataset_wrappers import ConcatDataset, RepeatDataset +from .coco_stuff import COCOStuffDataset +from .dark_zurich import DarkZurichDataset +from .dataset_wrappers import MultiImageMixDataset +from .decathlon import DecathlonDataset +from .drive import DRIVEDataset +from .hrf import HRFDataset +from .isaid import iSAIDDataset +from .isprs import ISPRSDataset +from .lip import LIPDataset +from .loveda import LoveDADataset +from .night_driving import NightDrivingDataset +from .pascal_context import PascalContextDataset, PascalContextDataset59 +from .potsdam import PotsdamDataset +from .stare import STAREDataset +from .synapse import SynapseDataset +from .transforms import (CLAHE, AdjustGamma, GenerateEdge, LoadAnnotations, + LoadBiomedicalAnnotation, LoadBiomedicalData, + LoadBiomedicalImageFromFile, LoadImageFromNDArray, + PackSegInputs, PhotoMetricDistortion, RandomCrop, + RandomCutOut, RandomMosaic, RandomRotate, + RandomRotFlip, Rerange, ResizeShortestEdge, + ResizeToMultiple, RGB2Gray, SegRescale) from .voc import PascalVOCDataset __all__ = [ - 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', - 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', - 'PascalVOCDataset', 'ADE20KDataset' + 'BaseSegDataset', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset', + 'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset', + 'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset', + 'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset', + 'MultiImageMixDataset', 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', + 'LoadAnnotations', 'RandomCrop', 'SegRescale', 'PhotoMetricDistortion', + 'RandomRotate', 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', + 'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', + 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', + 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', + 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', 'RandomRotFlip', + 'SynapseDataset' ] diff --git a/mmseg/datasets/ade.py b/mmseg/datasets/ade.py index 5913e43775..e9bdae7421 100644 --- a/mmseg/datasets/ade.py +++ b/mmseg/datasets/ade.py @@ -1,9 +1,10 @@ -from .builder import DATASETS -from .custom import CustomDataset +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset @DATASETS.register_module() -class ADE20KDataset(CustomDataset): +class ADE20KDataset(BaseSegDataset): """ADE20K dataset. In segmentation map annotation for ADE20K, 0 stands for background, which @@ -11,74 +12,81 @@ class ADE20KDataset(CustomDataset): The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to '.png'. """ - CLASSES = ( - 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', - 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', - 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', - 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', - 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', - 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', - 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', - 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', - 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', - 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', - 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', - 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', - 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', - 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', - 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', - 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', - 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', - 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', - 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', - 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', - 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', - 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', - 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', - 'clock', 'flag') + METAINFO = dict( + classes=('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', + 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', + 'person', 'earth', 'door', 'table', 'mountain', 'plant', + 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', + 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', + 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', + 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', + 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', + 'screen door', 'stairway', 'river', 'bridge', 'bookcase', + 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', + 'bench', 'countertop', 'stove', 'palm', 'kitchen island', + 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', + 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', + 'television receiver', 'airplane', 'dirt track', 'apparel', + 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', + 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', + 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', + 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', + 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', + 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', + 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', + 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', + 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag'), + palette=[[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]]) - PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - def __init__(self, **kwargs): - super(ADE20KDataset, self).__init__( - img_suffix='.jpg', - seg_map_suffix='.png', - reduce_zero_label=True, + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, **kwargs) diff --git a/mmseg/datasets/basesegdataset.py b/mmseg/datasets/basesegdataset.py new file mode 100644 index 0000000000..e97f8ca9d1 --- /dev/null +++ b/mmseg/datasets/basesegdataset.py @@ -0,0 +1,269 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from typing import Callable, Dict, List, Optional, Sequence, Union + +import mmengine +import numpy as np +from mmengine.dataset import BaseDataset, Compose + +from mmseg.registry import DATASETS + + +@DATASETS.register_module() +class BaseSegDataset(BaseDataset): + """Custom dataset for semantic segmentation. An example of file structure + is as followed. + + .. code-block:: none + + ├── data + │ ├── my_dataset + │ │ ├── img_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{img_suffix} + │ │ │ │ ├── yyy{img_suffix} + │ │ │ │ ├── zzz{img_suffix} + │ │ │ ├── val + │ │ ├── ann_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{seg_map_suffix} + │ │ │ │ ├── yyy{seg_map_suffix} + │ │ │ │ ├── zzz{seg_map_suffix} + │ │ │ ├── val + + The img/gt_semantic_seg pair of BaseSegDataset should be of the same + except suffix. A valid img/gt_semantic_seg filename pair should be like + ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included + in the suffix). If split is given, then ``xxx`` is specified in txt file. + Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded. + Please refer to ``docs/en/tutorials/new_dataset.md`` for more details. + + + Args: + ann_file (str): Annotation file path. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as + specify classes to load. Defaults to None. + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to None. + data_prefix (dict, optional): Prefix for training data. Defaults to + dict(img_path=None, seg_path=None). + img_suffix (str): Suffix of images. Default: '.jpg' + seg_map_suffix (str): Suffix of segmentation maps. Default: '.png' + filter_cfg (dict, optional): Config for filter data. Defaults to None. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Defaults to None which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. Defaults + to True. + pipeline (list, optional): Processing pipeline. Defaults to []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Defaults to False. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=True``. Defaults to False. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Defaults to 1000. + ignore_index (int): The label index to be ignored. Default: 255 + reduce_zero_label (bool): Whether to mark label zero as ignored. + Default to False. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmengine.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + METAINFO: dict = dict() + + def __init__( + self, + ann_file: str = '', + img_suffix='.jpg', + seg_map_suffix='.png', + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: dict = dict(img_path='', seg_map_path=''), + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: List[Union[dict, Callable]] = [], + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000, + ignore_index: int = 255, + reduce_zero_label: bool = False, + file_client_args: dict = dict(backend='disk') + ) -> None: + + self.img_suffix = img_suffix + self.seg_map_suffix = seg_map_suffix + self.ignore_index = ignore_index + self.reduce_zero_label = reduce_zero_label + self.file_client_args = file_client_args + self.file_client = mmengine.FileClient.infer_client( + self.file_client_args) + + self.data_root = data_root + self.data_prefix = copy.copy(data_prefix) + self.ann_file = ann_file + self.filter_cfg = copy.deepcopy(filter_cfg) + self._indices = indices + self.serialize_data = serialize_data + self.test_mode = test_mode + self.max_refetch = max_refetch + self.data_list: List[dict] = [] + self.data_bytes: np.ndarray + + # Set meta information. + self._metainfo = self._load_metainfo(copy.deepcopy(metainfo)) + + # Get label map for custom classes + new_classes = self._metainfo.get('classes', None) + self.label_map = self.get_label_map(new_classes) + self._metainfo.update( + dict( + label_map=self.label_map, + reduce_zero_label=self.reduce_zero_label)) + + # Update palette based on label map or generate palette + # if it is not defined + updated_palette = self._update_palette() + self._metainfo.update(dict(palette=updated_palette)) + + # Join paths. + if self.data_root is not None: + self._join_prefix() + + # Build pipeline. + self.pipeline = Compose(pipeline) + # Full initialize the dataset. + if not lazy_init: + self.full_init() + + if test_mode: + assert self._metainfo.get('classes') is not None, \ + 'dataset metainfo `classes` should be specified when testing' + + @classmethod + def get_label_map(cls, + new_classes: Optional[Sequence] = None + ) -> Union[Dict, None]: + """Require label mapping. + + The ``label_map`` is a dictionary, its keys are the old label ids and + its values are the new label ids, and is used for changing pixel + labels in load_annotations. If and only if old classes in cls.METAINFO + is not equal to new classes in self._metainfo and nether of them is not + None, `label_map` is not None. + + Args: + new_classes (list, tuple, optional): The new classes name from + metainfo. Default to None. + + + Returns: + dict, optional: The mapping from old classes in cls.METAINFO to + new classes in self._metainfo + """ + old_classes = cls.METAINFO.get('classes', None) + if (new_classes is not None and old_classes is not None + and list(new_classes) != list(old_classes)): + + label_map = {} + if not set(new_classes).issubset(cls.METAINFO['classes']): + raise ValueError( + f'new classes {new_classes} is not a ' + f'subset of classes {old_classes} in METAINFO.') + for i, c in enumerate(old_classes): + if c not in new_classes: + label_map[i] = 255 + else: + label_map[i] = new_classes.index(c) + return label_map + else: + return None + + def _update_palette(self) -> list: + """Update palette after loading metainfo. + + If length of palette is equal to classes, just return the palette. + If palette is not defined, it will randomly generate a palette. + If classes is updated by customer, it will return the subset of + palette. + + Returns: + Sequence: Palette for current dataset. + """ + palette = self._metainfo.get('palette', []) + classes = self._metainfo.get('classes', []) + # palette does match classes + if len(palette) == len(classes): + return palette + + if len(palette) == 0: + # Get random state before set seed, and restore + # random state later. + # It will prevent loss of randomness, as the palette + # may be different in each iteration if not specified. + # See: https://github.com/open-mmlab/mmdetection/issues/5844 + state = np.random.get_state() + np.random.seed(42) + # random palette + new_palette = np.random.randint( + 0, 255, size=(len(classes), 3)).tolist() + np.random.set_state(state) + elif len(palette) >= len(classes) and self.label_map is not None: + new_palette = [] + # return subset of palette + for old_id, new_id in sorted( + self.label_map.items(), key=lambda x: x[1]): + if new_id != -1: + new_palette.append(palette[old_id]) + new_palette = type(palette)(new_palette) + else: + raise ValueError('palette does not match classes ' + f'as metainfo is {self._metainfo}.') + return new_palette + + def load_data_list(self) -> List[dict]: + """Load annotation from directory or annotation file. + + Returns: + list[dict]: All data info of dataset. + """ + data_list = [] + img_dir = self.data_prefix.get('img_path', None) + ann_dir = self.data_prefix.get('seg_map_path', None) + if osp.isfile(self.ann_file): + lines = mmengine.list_from_file( + self.ann_file, file_client_args=self.file_client_args) + for line in lines: + img_name = line.strip() + data_info = dict( + img_path=osp.join(img_dir, img_name + self.img_suffix)) + if ann_dir is not None: + seg_map = img_name + self.seg_map_suffix + data_info['seg_map_path'] = osp.join(ann_dir, seg_map) + data_info['label_map'] = self.label_map + data_info['reduce_zero_label'] = self.reduce_zero_label + data_info['seg_fields'] = [] + data_list.append(data_info) + else: + for img in self.file_client.list_dir_or_file( + dir_path=img_dir, + list_dir=False, + suffix=self.img_suffix, + recursive=True): + data_info = dict(img_path=osp.join(img_dir, img)) + if ann_dir is not None: + seg_map = img.replace(self.img_suffix, self.seg_map_suffix) + data_info['seg_map_path'] = osp.join(ann_dir, seg_map) + data_info['label_map'] = self.label_map + data_info['reduce_zero_label'] = self.reduce_zero_label + data_info['seg_fields'] = [] + data_list.append(data_info) + data_list = sorted(data_list, key=lambda x: x['img_path']) + return data_list diff --git a/mmseg/datasets/builder.py b/mmseg/datasets/builder.py deleted file mode 100644 index f7a9926111..0000000000 --- a/mmseg/datasets/builder.py +++ /dev/null @@ -1,169 +0,0 @@ -import copy -import platform -import random -from functools import partial - -import numpy as np -from mmcv.parallel import collate -from mmcv.runner import get_dist_info -from mmcv.utils import Registry, build_from_cfg -from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader -from torch.utils.data import DistributedSampler - -if platform.system() != 'Windows': - # https://github.com/pytorch/pytorch/issues/973 - import resource - rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) - hard_limit = rlimit[1] - soft_limit = min(4096, hard_limit) - resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) - -DATASETS = Registry('dataset') -PIPELINES = Registry('pipeline') - - -def _concat_dataset(cfg, default_args=None): - """Build :obj:`ConcatDataset by.""" - from .dataset_wrappers import ConcatDataset - img_dir = cfg['img_dir'] - ann_dir = cfg.get('ann_dir', None) - split = cfg.get('split', None) - num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1 - if ann_dir is not None: - num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1 - else: - num_ann_dir = 0 - if split is not None: - num_split = len(split) if isinstance(split, (list, tuple)) else 1 - else: - num_split = 0 - if num_img_dir > 1: - assert num_img_dir == num_ann_dir or num_ann_dir == 0 - assert num_img_dir == num_split or num_split == 0 - else: - assert num_split == num_ann_dir or num_ann_dir <= 1 - num_dset = max(num_split, num_img_dir) - - datasets = [] - for i in range(num_dset): - data_cfg = copy.deepcopy(cfg) - if isinstance(img_dir, (list, tuple)): - data_cfg['img_dir'] = img_dir[i] - if isinstance(ann_dir, (list, tuple)): - data_cfg['ann_dir'] = ann_dir[i] - if isinstance(split, (list, tuple)): - data_cfg['split'] = split[i] - datasets.append(build_dataset(data_cfg, default_args)) - - return ConcatDataset(datasets) - - -def build_dataset(cfg, default_args=None): - """Build datasets.""" - from .dataset_wrappers import ConcatDataset, RepeatDataset - if isinstance(cfg, (list, tuple)): - dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) - elif cfg['type'] == 'RepeatDataset': - dataset = RepeatDataset( - build_dataset(cfg['dataset'], default_args), cfg['times']) - elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance( - cfg.get('split', None), (list, tuple)): - dataset = _concat_dataset(cfg, default_args) - else: - dataset = build_from_cfg(cfg, DATASETS, default_args) - - return dataset - - -def build_dataloader(dataset, - samples_per_gpu, - workers_per_gpu, - num_gpus=1, - dist=True, - shuffle=True, - seed=None, - drop_last=False, - pin_memory=True, - dataloader_type='PoolDataLoader', - **kwargs): - """Build PyTorch DataLoader. - - In distributed training, each GPU/process has a dataloader. - In non-distributed training, there is only one dataloader for all GPUs. - - Args: - dataset (Dataset): A PyTorch dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Default: True. - shuffle (bool): Whether to shuffle the data at every epoch. - Default: True. - seed (int | None): Seed to be used. Default: None. - drop_last (bool): Whether to drop the last incomplete batch in epoch. - Default: False - pin_memory (bool): Whether to use pin_memory in DataLoader. - Default: True - dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader' - kwargs: any keyword argument to be used to initialize DataLoader - - Returns: - DataLoader: A PyTorch dataloader. - """ - rank, world_size = get_dist_info() - if dist: - sampler = DistributedSampler( - dataset, world_size, rank, shuffle=shuffle) - shuffle = False - batch_size = samples_per_gpu - num_workers = workers_per_gpu - else: - sampler = None - batch_size = num_gpus * samples_per_gpu - num_workers = num_gpus * workers_per_gpu - - init_fn = partial( - worker_init_fn, num_workers=num_workers, rank=rank, - seed=seed) if seed is not None else None - - assert dataloader_type in ( - 'DataLoader', - 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}' - - if dataloader_type == 'PoolDataLoader': - dataloader = PoolDataLoader - elif dataloader_type == 'DataLoader': - dataloader = DataLoader - - data_loader = dataloader( - dataset, - batch_size=batch_size, - sampler=sampler, - num_workers=num_workers, - collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), - pin_memory=pin_memory, - shuffle=shuffle, - worker_init_fn=init_fn, - drop_last=drop_last, - **kwargs) - - return data_loader - - -def worker_init_fn(worker_id, num_workers, rank, seed): - """Worker init func for dataloader. - - The seed of each worker equals to num_worker * rank + worker_id + user_seed - - Args: - worker_id (int): Worker id. - num_workers (int): Number of workers. - rank (int): The rank of current process. - seed (int): The random seed to use. - """ - - worker_seed = num_workers * rank + worker_id + seed - np.random.seed(worker_seed) - random.seed(worker_seed) diff --git a/mmseg/datasets/chase_db1.py b/mmseg/datasets/chase_db1.py new file mode 100644 index 0000000000..5cc1fc5677 --- /dev/null +++ b/mmseg/datasets/chase_db1.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class ChaseDB1Dataset(BaseSegDataset): + """Chase_db1 dataset. + + In segmentation map annotation for Chase_db1, 0 stands for background, + which is included in 2 categories. ``reduce_zero_label`` is fixed to False. + The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_1stHO.png'. + """ + METAINFO = dict( + classes=('background', 'vessel'), + palette=[[120, 120, 120], [6, 230, 230]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='_1stHO.png', + reduce_zero_label=False, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) + assert self.file_client.exists(self.data_prefix['img_path']) diff --git a/mmseg/datasets/cityscapes.py b/mmseg/datasets/cityscapes.py index 9a12ab1724..f494d62424 100644 --- a/mmseg/datasets/cityscapes.py +++ b/mmseg/datasets/cityscapes.py @@ -1,213 +1,30 @@ -import os.path as osp -import tempfile - -import mmcv -import numpy as np -from mmcv.utils import print_log -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset @DATASETS.register_module() -class CityscapesDataset(CustomDataset): +class CityscapesDataset(BaseSegDataset): """Cityscapes dataset. The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. """ - - CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], - [0, 80, 100], [0, 0, 230], [119, 11, 32]] - - def __init__(self, **kwargs): - super(CityscapesDataset, self).__init__( - img_suffix='_leftImg8bit.png', - seg_map_suffix='_gtFine_labelTrainIds.png', - **kwargs) - - @staticmethod - def _convert_to_label_id(result): - """Convert trainId to id for cityscapes.""" - import cityscapesscripts.helpers.labels as CSLabels - result_copy = result.copy() - for trainId, label in CSLabels.trainId2label.items(): - result_copy[result == trainId] = label.id - - return result_copy - - def results2img(self, results, imgfile_prefix, to_label_id): - """Write the segmentation results to images. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - imgfile_prefix (str): The filename prefix of the png files. - If the prefix is "somepath/xxx", - the png files will be named "somepath/xxx.png". - to_label_id (bool): whether convert output to label_id for - submission - - Returns: - list[str: str]: result txt files which contains corresponding - semantic segmentation images. - """ - result_files = [] - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - if to_label_id: - result = self._convert_to_label_id(result) - filename = self.img_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - - png_filename = osp.join(imgfile_prefix, f'{basename}.png') - - output = Image.fromarray(result.astype(np.uint8)).convert('P') - import cityscapesscripts.helpers.labels as CSLabels - palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) - for label_id, label in CSLabels.id2label.items(): - palette[label_id] = label.color - - output.putpalette(palette) - output.save(png_filename) - result_files.append(png_filename) - prog_bar.update() - - return result_files - - def format_results(self, results, imgfile_prefix=None, to_label_id=True): - """Format the results into dir (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - imgfile_prefix (str | None): The prefix of images files. It - includes the file path and the prefix of filename, e.g., - "a/b/prefix". If not specified, a temp file will be created. - Default: None. - to_label_id (bool): whether convert output to label_id for - submission. Default: False - - Returns: - tuple: (result_files, tmp_dir), result_files is a list containing - the image paths, tmp_dir is the temporal directory created - for saving json/png files when img_prefix is not specified. - """ - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: ' - f'{len(results)} != {len(self)}') - - if imgfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - imgfile_prefix = tmp_dir.name - else: - tmp_dir = None - result_files = self.results2img(results, imgfile_prefix, to_label_id) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='mIoU', - logger=None, - imgfile_prefix=None): - """Evaluation in Cityscapes/default protocol. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file, - for cityscapes evaluation only. It includes the file path and - the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with cityscapes protocol, it would be - the prefix of output png files. The output files would be - png images under folder "a/b/prefix/xxx/", where "xxx" is the - video name of cityscapes. If not specified, a temp file will - be created. - Default: None. - - Returns: - dict[str, float]: Cityscapes/default metrics. - """ - - eval_results = dict() - metrics = metric.copy() if isinstance(metric, list) else [metric] - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, logger, imgfile_prefix)) - metrics.remove('cityscapes') - if len(metrics) > 0: - eval_results.update( - super(CityscapesDataset, - self).evaluate(results, metrics, logger)) - - return eval_results - - def _evaluate_cityscapes(self, results, logger, imgfile_prefix): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file - - Returns: - dict[str: float]: Cityscapes evaluation results. - """ - try: - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install citscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, imgfile_prefix) - - if tmp_dir is None: - result_dir = imgfile_prefix - else: - result_dir = tmp_dir.name - - eval_results = dict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - CSEval.args.evalInstLevelScore = True - CSEval.args.predictionPath = osp.abspath(result_dir) - CSEval.args.evalPixelAccuracy = True - CSEval.args.JSONOutput = False - - seg_map_list = [] - pred_list = [] - - # when evaluating with official cityscapesscripts, - # **_gtFine_labelIds.png is used - for seg_map in mmcv.scandir( - self.ann_dir, 'gtFine_labelIds.png', recursive=True): - seg_map_list.append(osp.join(self.ann_dir, seg_map)) - pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) - - eval_results.update( - CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) - - if tmp_dir is not None: - tmp_dir.cleanup() - - return eval_results + METAINFO = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', + 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', + 'motorcycle', 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, + 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], + [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]) + + def __init__(self, + img_suffix='_leftImg8bit.png', + seg_map_suffix='_gtFine_labelTrainIds.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/coco_stuff.py b/mmseg/datasets/coco_stuff.py new file mode 100644 index 0000000000..1e1574d970 --- /dev/null +++ b/mmseg/datasets/coco_stuff.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class COCOStuffDataset(BaseSegDataset): + """COCO-Stuff dataset. + + In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version + are from 1 to 171, where 0 is the ignore index, and Train-ID of COCO Stuff + 164k is from 0 to 170, where 255 is the ignore index. So, they are all 171 + semantic categories. ``reduce_zero_label`` is set to True and False for the + 10k and 164k versions, respectively. The ``img_suffix`` is fixed to '.jpg', + and ``seg_map_suffix`` is fixed to '.png'. + """ + METAINFO = dict( + classes=( + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', + 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', + 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', + 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', + 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', + 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', + 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', + 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', + 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', + 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', + 'paper', 'pavement', 'pillow', 'plant-other', 'plastic', + 'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', + 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', + 'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs', + 'stone', 'straw', 'structural-other', 'table', 'tent', + 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', + 'wall-concrete', 'wall-other', 'wall-panel', 'wall-stone', + 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', + 'window-blind', 'window-other', 'wood'), + palette=[[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], + [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], + [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], + [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], + [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], + [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], + [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], + [0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0], + [0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128], + [64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160], + [0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128], + [128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192], + [0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160], + [64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0], + [0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192], + [0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160], + [64, 32, 128], [128, 192, 192], [0, 0, 160], [192, 160, 128], + [128, 192, 0], [128, 0, 96], [192, 32, 0], [128, 64, 128], + [64, 128, 96], [64, 160, 0], [0, 64, 0], [192, 128, 224], + [64, 32, 0], [0, 192, 128], [64, 128, 224], [192, 160, 0], + [0, 192, 0], [192, 128, 96], [192, 96, 128], [0, 64, 128], + [64, 0, 96], [64, 224, 128], [128, 64, 0], [192, 0, 224], + [64, 96, 128], [128, 192, 128], [64, 0, 224], [192, 224, 128], + [128, 192, 64], [192, 0, 96], [192, 96, 0], [128, 64, 192], + [0, 128, 96], [0, 224, 0], [64, 64, 64], [128, 128, 224], + [0, 96, 0], [64, 192, 192], [0, 128, 224], [128, 224, 0], + [64, 192, 64], [128, 128, 96], [128, 32, 128], [64, 0, 192], + [0, 64, 96], [0, 160, 128], [192, 0, 64], [128, 64, 224], + [0, 32, 128], [192, 128, 192], [0, 64, 224], [128, 160, 128], + [192, 128, 0], [128, 64, 32], [128, 32, 64], [192, 0, 128], + [64, 192, 32], [0, 160, 64], [64, 0, 0], [192, 192, 160], + [0, 32, 64], [64, 128, 128], [64, 192, 160], [128, 160, 64], + [64, 128, 0], [192, 192, 32], [128, 96, 192], [64, 0, 128], + [64, 64, 32], [0, 224, 192], [192, 0, 0], [192, 64, 160], + [0, 96, 192], [192, 128, 128], [64, 64, 160], [128, 224, 192], + [192, 128, 64], [192, 64, 32], [128, 96, 64], [192, 0, 192], + [0, 192, 32], [64, 224, 64], [64, 0, 64], [128, 192, 160], + [64, 96, 64], [64, 128, 192], [0, 192, 160], [192, 224, 64], + [64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192], + [0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160], + [64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192], + [192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128], + [64, 192, 96], [64, 160, 64], [64, 64, 0]]) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='_labelTrainIds.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/custom.py b/mmseg/datasets/custom.py deleted file mode 100644 index 92d17c5252..0000000000 --- a/mmseg/datasets/custom.py +++ /dev/null @@ -1,291 +0,0 @@ -import os.path as osp -from functools import reduce - -import mmcv -import numpy as np -from mmcv.utils import print_log -from torch.utils.data import Dataset - -from mmseg.core import mean_iou -from mmseg.utils import get_root_logger -from .builder import DATASETS -from .pipelines import Compose - - -@DATASETS.register_module() -class CustomDataset(Dataset): - """Custom dataset for semantic segmentation. - - An example of file structure is as followed. - - .. code-block:: none - - ├── data - │ ├── my_dataset - │ │ ├── img_dir - │ │ │ ├── train - │ │ │ │ ├── xxx{img_suffix} - │ │ │ │ ├── yyy{img_suffix} - │ │ │ │ ├── zzz{img_suffix} - │ │ │ ├── val - │ │ ├── ann_dir - │ │ │ ├── train - │ │ │ │ ├── xxx{seg_map_suffix} - │ │ │ │ ├── yyy{seg_map_suffix} - │ │ │ │ ├── zzz{seg_map_suffix} - │ │ │ ├── val - - The img/gt_semantic_seg pair of CustomDataset should be of the same - except suffix. A valid img/gt_semantic_seg filename pair should be like - ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included - in the suffix). If split is given, then ``xxx`` is specified in txt file. - Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded. - Please refer to ``docs/tutorials/new_dataset.md`` for more details. - - - Args: - pipeline (list[dict]): Processing pipeline - img_dir (str): Path to image directory - img_suffix (str): Suffix of images. Default: '.jpg' - ann_dir (str, optional): Path to annotation directory. Default: None - seg_map_suffix (str): Suffix of segmentation maps. Default: '.png' - split (str, optional): Split txt file. If split is specified, only - file with suffix in the splits will be loaded. Otherwise, all - images in img_dir/ann_dir will be loaded. Default: None - data_root (str, optional): Data root for img_dir/ann_dir. Default: - None. - test_mode (bool): If test_mode=True, gt wouldn't be loaded. - ignore_index (int): The label index to be ignored. Default: 255 - reduce_zero_label (bool): Whether to mark label zero as ignored. - Default: False - """ - - CLASSES = None - - PALETTE = None - - def __init__(self, - pipeline, - img_dir, - img_suffix='.jpg', - ann_dir=None, - seg_map_suffix='.png', - split=None, - data_root=None, - test_mode=False, - ignore_index=255, - reduce_zero_label=False): - self.pipeline = Compose(pipeline) - self.img_dir = img_dir - self.img_suffix = img_suffix - self.ann_dir = ann_dir - self.seg_map_suffix = seg_map_suffix - self.split = split - self.data_root = data_root - self.test_mode = test_mode - self.ignore_index = ignore_index - self.reduce_zero_label = reduce_zero_label - - # join paths if data_root is specified - if self.data_root is not None: - if not osp.isabs(self.img_dir): - self.img_dir = osp.join(self.data_root, self.img_dir) - if not (self.ann_dir is None or osp.isabs(self.ann_dir)): - self.ann_dir = osp.join(self.data_root, self.ann_dir) - if not (self.split is None or osp.isabs(self.split)): - self.split = osp.join(self.data_root, self.split) - - # load annotations - self.img_infos = self.load_annotations(self.img_dir, self.img_suffix, - self.ann_dir, - self.seg_map_suffix, self.split) - - def __len__(self): - """Total number of samples of data.""" - return len(self.img_infos) - - def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, - split): - """Load annotation from directory. - - Args: - img_dir (str): Path to image directory - img_suffix (str): Suffix of images. - ann_dir (str|None): Path to annotation directory. - seg_map_suffix (str|None): Suffix of segmentation maps. - split (str|None): Split txt file. If split is specified, only file - with suffix in the splits will be loaded. Otherwise, all images - in img_dir/ann_dir will be loaded. Default: None - - Returns: - list[dict]: All image info of dataset. - """ - - img_infos = [] - if split is not None: - with open(split) as f: - for line in f: - img_name = line.strip() - img_file = osp.join(img_dir, img_name + img_suffix) - img_info = dict(filename=img_file) - if ann_dir is not None: - seg_map = osp.join(ann_dir, img_name + seg_map_suffix) - img_info['ann'] = dict(seg_map=seg_map) - img_infos.append(img_info) - else: - for img in mmcv.scandir(img_dir, img_suffix, recursive=True): - img_file = osp.join(img_dir, img) - img_info = dict(filename=img_file) - if ann_dir is not None: - seg_map = osp.join(ann_dir, - img.replace(img_suffix, seg_map_suffix)) - img_info['ann'] = dict(seg_map=seg_map) - img_infos.append(img_info) - - print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger()) - return img_infos - - def get_ann_info(self, idx): - """Get annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - return self.img_infos[idx]['ann'] - - def pre_pipeline(self, results): - """Prepare results dict for pipeline.""" - results['seg_fields'] = [] - - def __getitem__(self, idx): - """Get training/test data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training/test data (with annotation if `test_mode` is set - False). - """ - - if self.test_mode: - return self.prepare_test_img(idx) - else: - return self.prepare_train_img(idx) - - def prepare_train_img(self, idx): - """Get training data and annotations after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training data and annotation after pipeline with new keys - introduced by pipeline. - """ - - img_info = self.img_infos[idx] - ann_info = self.get_ann_info(idx) - results = dict(img_info=img_info, ann_info=ann_info) - self.pre_pipeline(results) - return self.pipeline(results) - - def prepare_test_img(self, idx): - """Get testing data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Testing data after pipeline with new keys intorduced by - piepline. - """ - - img_info = self.img_infos[idx] - results = dict(img_info=img_info) - self.pre_pipeline(results) - return self.pipeline(results) - - def format_results(self, results, **kwargs): - """Place holder to format result to dataset specific output.""" - pass - - def get_gt_seg_maps(self): - """Get ground truth segmentation maps for evaluation.""" - gt_seg_maps = [] - for img_info in self.img_infos: - gt_seg_map = mmcv.imread( - img_info['ann']['seg_map'], flag='unchanged', backend='pillow') - if self.reduce_zero_label: - # avoid using underflow conversion - gt_seg_map[gt_seg_map == 0] = 255 - gt_seg_map = gt_seg_map - 1 - gt_seg_map[gt_seg_map == 254] = 255 - - gt_seg_maps.append(gt_seg_map) - - return gt_seg_maps - - def evaluate(self, results, metric='mIoU', logger=None, **kwargs): - """Evaluate the dataset. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - - Returns: - dict[str, float]: Default metrics. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mIoU'] - if metric not in allowed_metrics: - raise KeyError('metric {} is not supported'.format(metric)) - - eval_results = {} - gt_seg_maps = self.get_gt_seg_maps() - if self.CLASSES is None: - num_classes = len( - reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps])) - else: - num_classes = len(self.CLASSES) - - all_acc, acc, iou = mean_iou( - results, gt_seg_maps, num_classes, ignore_index=self.ignore_index) - summary_str = '' - summary_str += 'per class results:\n' - - line_format = '{:<15} {:>10} {:>10}\n' - summary_str += line_format.format('Class', 'IoU', 'Acc') - if self.CLASSES is None: - class_names = tuple(range(num_classes)) - else: - class_names = self.CLASSES - for i in range(num_classes): - iou_str = '{:.2f}'.format(iou[i] * 100) - acc_str = '{:.2f}'.format(acc[i] * 100) - summary_str += line_format.format(class_names[i], iou_str, acc_str) - summary_str += 'Summary:\n' - line_format = '{:<15} {:>10} {:>10} {:>10}\n' - summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc') - - iou_str = '{:.2f}'.format(np.nanmean(iou) * 100) - acc_str = '{:.2f}'.format(np.nanmean(acc) * 100) - all_acc_str = '{:.2f}'.format(all_acc * 100) - summary_str += line_format.format('global', iou_str, acc_str, - all_acc_str) - print_log(summary_str, logger) - - eval_results['mIoU'] = np.nanmean(iou) - eval_results['mAcc'] = np.nanmean(acc) - eval_results['aAcc'] = all_acc - - return eval_results diff --git a/mmseg/datasets/dark_zurich.py b/mmseg/datasets/dark_zurich.py new file mode 100644 index 0000000000..9b5393fa9e --- /dev/null +++ b/mmseg/datasets/dark_zurich.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .cityscapes import CityscapesDataset + + +@DATASETS.register_module() +class DarkZurichDataset(CityscapesDataset): + """DarkZurichDataset dataset.""" + + def __init__(self, + img_suffix='_rgb_anon.png', + seg_map_suffix='_gt_labelTrainIds.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/dataset_wrappers.py b/mmseg/datasets/dataset_wrappers.py index d6a5e957ec..933eb50d99 100644 --- a/mmseg/datasets/dataset_wrappers.py +++ b/mmseg/datasets/dataset_wrappers.py @@ -1,50 +1,136 @@ -from torch.utils.data.dataset import ConcatDataset as _ConcatDataset +# Copyright (c) OpenMMLab. All rights reserved. +import collections +import copy +from typing import List, Optional, Sequence, Union -from .builder import DATASETS +from mmengine.dataset import ConcatDataset, force_full_init + +from mmseg.registry import DATASETS, TRANSFORMS @DATASETS.register_module() -class ConcatDataset(_ConcatDataset): - """A wrapper of concatenated dataset. +class MultiImageMixDataset: + """A wrapper of multiple images mixed dataset. - Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but - concat the group flag for image aspect ratio. + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. Args: - datasets (list[:obj:`Dataset`]): A list of datasets. + dataset (ConcatDataset or dict): The dataset to be mixed. + pipeline (Sequence[dict]): Sequence of transform object or + config dict to be composed. + skip_type_keys (list[str], optional): Sequence of type string to + be skip pipeline. Default to None. """ - def __init__(self, datasets): - super(ConcatDataset, self).__init__(datasets) - self.CLASSES = datasets[0].CLASSES - self.PALETTE = datasets[0].PALETTE + def __init__(self, + dataset: Union[ConcatDataset, dict], + pipeline: Sequence[dict], + skip_type_keys: Optional[List[str]] = None, + lazy_init: bool = False) -> None: + assert isinstance(pipeline, collections.abc.Sequence) + if isinstance(dataset, dict): + self.dataset = DATASETS.build(dataset) + elif isinstance(dataset, ConcatDataset): + self.dataset = dataset + else: + raise TypeError( + 'elements in datasets sequence should be config or ' + f'`ConcatDataset` instance, but got {type(dataset)}') -@DATASETS.register_module() -class RepeatDataset(object): - """A wrapper of repeated dataset. + if skip_type_keys is not None: + assert all([ + isinstance(skip_type_key, str) + for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys - The length of repeated dataset will be `times` larger than the original - dataset. This is useful when the data loading time is long but the dataset - is small. Using RepeatDataset can reduce the data loading time between - epochs. + self.pipeline = [] + self.pipeline_types = [] + for transform in pipeline: + if isinstance(transform, dict): + self.pipeline_types.append(transform['type']) + transform = TRANSFORMS.build(transform) + self.pipeline.append(transform) + else: + raise TypeError('pipeline must be a dict') - Args: - dataset (:obj:`Dataset`): The dataset to be repeated. - times (int): Repeat times. - """ + self._metainfo = self.dataset.metainfo + self.num_samples = len(self.dataset) + + self._fully_initialized = False + if not lazy_init: + self.full_init() - def __init__(self, dataset, times): - self.dataset = dataset - self.times = times - self.CLASSES = dataset.CLASSES - self.PALETTE = dataset.PALETTE + @property + def metainfo(self) -> dict: + """Get the meta information of the multi-image-mixed dataset. + + Returns: + dict: The meta information of multi-image-mixed dataset. + """ + return copy.deepcopy(self._metainfo) + + def full_init(self): + """Loop to ``full_init`` each dataset.""" + if self._fully_initialized: + return + + self.dataset.full_init() self._ori_len = len(self.dataset) + self._fully_initialized = True - def __getitem__(self, idx): - """Get item from original dataset.""" - return self.dataset[idx % self._ori_len] + @force_full_init + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): Global index of ``ConcatDataset``. + Returns: + dict: The idx-th annotation of the datasets. + """ + return self.dataset.get_data_info(idx) + + @force_full_init def __len__(self): - """The length is multiplied by ``times``""" - return self.times * self._ori_len + return self.num_samples + + def __getitem__(self, idx): + results = copy.deepcopy(self.dataset[idx]) + for (transform, transform_type) in zip(self.pipeline, + self.pipeline_types): + if self._skip_type_keys is not None and \ + transform_type in self._skip_type_keys: + continue + + if hasattr(transform, 'get_indices'): + indexes = transform.get_indices(self.dataset) + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + mix_results = [ + copy.deepcopy(self.dataset[index]) for index in indexes + ] + results['mix_results'] = mix_results + + results = transform(results) + + if 'mix_results' in results: + results.pop('mix_results') + + return results + + def update_skip_type_keys(self, skip_type_keys): + """Update skip_type_keys. + + It is called by an external hook. + + Args: + skip_type_keys (list[str], optional): Sequence of type + string to be skip pipeline. + """ + assert all([ + isinstance(skip_type_key, str) for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys diff --git a/mmseg/datasets/decathlon.py b/mmseg/datasets/decathlon.py new file mode 100644 index 0000000000..26aa4ef0d7 --- /dev/null +++ b/mmseg/datasets/decathlon.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from typing import List + +from mmengine.fileio import load + +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class DecathlonDataset(BaseSegDataset): + """Dataset for Dacathlon dataset. + + The dataset.json format is shown as follows + + .. code-block:: none + + { + "name": "BRATS", + "tensorImageSize": "4D", + "modality": + { + "0": "FLAIR", + "1": "T1w", + "2": "t1gd", + "3": "T2w" + }, + "labels": { + "0": "background", + "1": "edema", + "2": "non-enhancing tumor", + "3": "enhancing tumour" + }, + "numTraining": 484, + "numTest": 266, + "training": + [ + { + "image": "./imagesTr/BRATS_306.nii.gz" + "label": "./labelsTr/BRATS_306.nii.gz" + ... + } + ] + "test": + [ + "./imagesTs/BRATS_557.nii.gz" + ... + ] + } + """ + + def load_data_list(self) -> List[dict]: + """Load annotation from directory or annotation file. + + Returns: + list[dict]: All data info of dataset. + """ + # `self.ann_file` denotes the absolute annotation file path if + # `self.root=None` or relative path if `self.root=/path/to/data/`. + annotations = load(self.ann_file) + if not isinstance(annotations, dict): + raise TypeError(f'The annotations loaded from annotation file ' + f'should be a dict, but got {type(annotations)}!') + raw_data_list = annotations[ + 'training'] if not self.test_mode else annotations['test'] + data_list = [] + for raw_data_info in raw_data_list: + # `2:` works for removing './' in file path, which will break + # loading from cloud storage. + if isinstance(raw_data_info, dict): + data_info = dict( + img_path=osp.join(self.data_root, raw_data_info['image'] + [2:])) + data_info['seg_map_path'] = osp.join( + self.data_root, raw_data_info['label'][2:]) + else: + data_info = dict( + img_path=osp.join(self.data_root, raw_data_info)[2:]) + data_info['label_map'] = self.label_map + data_info['reduce_zero_label'] = self.reduce_zero_label + data_info['seg_fields'] = [] + data_list.append(data_info) + annotations.pop('training') + annotations.pop('test') + + metainfo = copy.deepcopy(annotations) + metainfo['classes'] = [*metainfo['labels'].values()] + # Meta information load from annotation file will not influence the + # existed meta information load from `BaseDataset.METAINFO` and + # `metainfo` arguments defined in constructor. + for k, v in metainfo.items(): + self._metainfo.setdefault(k, v) + + return data_list diff --git a/mmseg/datasets/drive.py b/mmseg/datasets/drive.py new file mode 100644 index 0000000000..c42e18e711 --- /dev/null +++ b/mmseg/datasets/drive.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class DRIVEDataset(BaseSegDataset): + """DRIVE dataset. + + In segmentation map annotation for DRIVE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_manual1.png'. + """ + METAINFO = dict( + classes=('background', 'vessel'), + palette=[[120, 120, 120], [6, 230, 230]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='_manual1.png', + reduce_zero_label=False, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) + assert self.file_client.exists(self.data_prefix['img_path']) diff --git a/mmseg/datasets/hrf.py b/mmseg/datasets/hrf.py new file mode 100644 index 0000000000..0df6ccc49c --- /dev/null +++ b/mmseg/datasets/hrf.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class HRFDataset(BaseSegDataset): + """HRF dataset. + + In segmentation map annotation for HRF, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.png'. + """ + METAINFO = dict( + classes=('background', 'vessel'), + palette=[[120, 120, 120], [6, 230, 230]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=False, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) + assert self.file_client.exists(self.data_prefix['img_path']) diff --git a/mmseg/datasets/isaid.py b/mmseg/datasets/isaid.py new file mode 100644 index 0000000000..d75cfcb7ea --- /dev/null +++ b/mmseg/datasets/isaid.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class iSAIDDataset(BaseSegDataset): + """ iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images + In segmentation map annotation for iSAID dataset, which is included + in 16 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_manual1.png'. + """ + + METAINFO = dict( + classes=('background', 'ship', 'store_tank', 'baseball_diamond', + 'tennis_court', 'basketball_court', 'Ground_Track_Field', + 'Bridge', 'Large_Vehicle', 'Small_Vehicle', 'Helicopter', + 'Swimming_pool', 'Roundabout', 'Soccer_ball_field', 'plane', + 'Harbor'), + palette=[[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127], + [0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127, 127], + [0, 0, 127], [0, 0, 191], [0, 0, 255], [0, 191, 127], + [0, 127, 191], [0, 127, 255], [0, 100, 155]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='_instance_color_RGB.png', + ignore_index=255, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + ignore_index=ignore_index, + **kwargs) + assert self.file_client.exists(self.data_prefix['img_path']) diff --git a/mmseg/datasets/isprs.py b/mmseg/datasets/isprs.py new file mode 100644 index 0000000000..30af53c569 --- /dev/null +++ b/mmseg/datasets/isprs.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class ISPRSDataset(BaseSegDataset): + """ISPRS dataset. + + In segmentation map annotation for ISPRS, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + METAINFO = dict( + classes=('impervious_surface', 'building', 'low_vegetation', 'tree', + 'car', 'clutter'), + palette=[[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) diff --git a/mmseg/datasets/lip.py b/mmseg/datasets/lip.py new file mode 100644 index 0000000000..3a32a193af --- /dev/null +++ b/mmseg/datasets/lip.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class LIPDataset(BaseSegDataset): + """LIP dataset. + + The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to + '.png'. + """ + METAINFO = dict( + classes=('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', + 'UpperClothes', 'Dress', 'Coat', 'Socks', 'Pants', + 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', + 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe', + 'Right-shoe'), + palette=( + [0, 0, 0], + [128, 0, 0], + [255, 0, 0], + [0, 85, 0], + [170, 0, 51], + [255, 85, 0], + [0, 0, 85], + [0, 119, 221], + [85, 85, 0], + [0, 85, 85], + [85, 51, 0], + [52, 86, 128], + [0, 128, 0], + [0, 0, 255], + [51, 170, 221], + [0, 255, 255], + [85, 255, 170], + [170, 255, 85], + [255, 255, 0], + [255, 170, 0], + )) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/loveda.py b/mmseg/datasets/loveda.py new file mode 100644 index 0000000000..5c16db503a --- /dev/null +++ b/mmseg/datasets/loveda.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class LoveDADataset(BaseSegDataset): + """LoveDA dataset. + + In segmentation map annotation for LoveDA, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + METAINFO = dict( + classes=('background', 'building', 'road', 'water', 'barren', 'forest', + 'agricultural'), + palette=[[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255], + [159, 129, 183], [0, 255, 0], [255, 195, 128]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) diff --git a/mmseg/datasets/night_driving.py b/mmseg/datasets/night_driving.py new file mode 100644 index 0000000000..3ead91ec77 --- /dev/null +++ b/mmseg/datasets/night_driving.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .cityscapes import CityscapesDataset + + +@DATASETS.register_module() +class NightDrivingDataset(CityscapesDataset): + """NightDrivingDataset dataset.""" + + def __init__(self, + img_suffix='_leftImg8bit.png', + seg_map_suffix='_gtCoarse_labelTrainIds.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/pascal_context.py b/mmseg/datasets/pascal_context.py new file mode 100644 index 0000000000..a6b2fba7b4 --- /dev/null +++ b/mmseg/datasets/pascal_context.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class PascalContextDataset(BaseSegDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + ann_file (str): Annotation file path. + """ + + METAINFO = dict( + classes=('background', 'aeroplane', 'bag', 'bed', 'bedclothes', + 'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle', + 'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling', + 'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog', + 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', + 'horse', 'keyboard', 'light', 'motorbike', 'mountain', + 'mouse', 'person', 'plate', 'platform', 'pottedplant', 'road', + 'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', + 'sofa', 'table', 'track', 'train', 'tree', 'truck', + 'tvmonitor', 'wall', 'water', 'window', 'wood'), + palette=[[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]) + + def __init__(self, + ann_file: str, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + ann_file=ann_file, + reduce_zero_label=False, + **kwargs) + assert self.file_client.exists( + self.data_prefix['img_path']) and osp.isfile(self.ann_file) + + +@DATASETS.register_module() +class PascalContextDataset59(BaseSegDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + ann_file (str): Annotation file path. + """ + METAINFO = dict( + classes=('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', + 'bird', 'boat', 'book', 'bottle', 'building', 'bus', + 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', + 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', + 'floor', 'flower', 'food', 'grass', 'ground', 'horse', + 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', + 'person', 'plate', 'platform', 'pottedplant', 'road', 'rock', + 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', + 'table', 'track', 'train', 'tree', 'truck', 'tvmonitor', + 'wall', 'water', 'window', 'wood'), + palette=[[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], + [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]) + + def __init__(self, + ann_file: str, + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs): + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + ann_file=ann_file, + reduce_zero_label=reduce_zero_label, + **kwargs) + assert self.file_client.exists( + self.data_prefix['img_path']) and osp.isfile(self.ann_file) diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py deleted file mode 100644 index e45f495070..0000000000 --- a/mmseg/datasets/pipelines/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from .compose import Compose -from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, - Transpose, to_tensor) -from .loading import LoadAnnotations, LoadImageFromFile -from .test_time_aug import MultiScaleFlipAug -from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop, - RandomFlip, Resize, SegRescale) - -__all__ = [ - 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', - 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', - 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', - 'Normalize', 'SegRescale', 'PhotoMetricDistortion' -] diff --git a/mmseg/datasets/pipelines/compose.py b/mmseg/datasets/pipelines/compose.py deleted file mode 100644 index ca48f1c935..0000000000 --- a/mmseg/datasets/pipelines/compose.py +++ /dev/null @@ -1,51 +0,0 @@ -import collections - -from mmcv.utils import build_from_cfg - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Compose(object): - """Compose multiple transforms sequentially. - - Args: - transforms (Sequence[dict | callable]): Sequence of transform object or - config dict to be composed. - """ - - def __init__(self, transforms): - assert isinstance(transforms, collections.abc.Sequence) - self.transforms = [] - for transform in transforms: - if isinstance(transform, dict): - transform = build_from_cfg(transform, PIPELINES) - self.transforms.append(transform) - elif callable(transform): - self.transforms.append(transform) - else: - raise TypeError('transform must be callable or a dict') - - def __call__(self, data): - """Call function to apply transforms sequentially. - - Args: - data (dict): A result dict contains the data to transform. - - Returns: - dict: Transformed data. - """ - - for t in self.transforms: - data = t(data) - if data is None: - return None - return data - - def __repr__(self): - format_string = self.__class__.__name__ + '(' - for t in self.transforms: - format_string += '\n' - format_string += f' {t}' - format_string += '\n)' - return format_string diff --git a/mmseg/datasets/pipelines/formating.py b/mmseg/datasets/pipelines/formating.py deleted file mode 100644 index e7029a8bac..0000000000 --- a/mmseg/datasets/pipelines/formating.py +++ /dev/null @@ -1,288 +0,0 @@ -from collections.abc import Sequence - -import mmcv -import numpy as np -import torch -from mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor(object): - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor(object): - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img.transpose(2, 0, 1)) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose(object): - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer(object): - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), - dict(key='gt_semantic_seg'))``. - """ - - def __init__(self, - fields=(dict(key='img', - stack=True), dict(key='gt_semantic_seg'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img" - and "gt_semantic_seg". These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with - default bundle. - """ - - if 'img' in results: - img = results['img'] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC(to_tensor(img), stack=True) - if 'gt_semantic_seg' in results: - # convert to long - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, - ...].astype(np.int64)), - stack=True) - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class Collect(object): - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "gt_semantic_seg". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple - (h, w, c). Note that images may be zero padded on the bottom/right - if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' diff --git a/mmseg/datasets/pipelines/loading.py b/mmseg/datasets/pipelines/loading.py deleted file mode 100644 index 9786269106..0000000000 --- a/mmseg/datasets/pipelines/loading.py +++ /dev/null @@ -1,149 +0,0 @@ -import os.path as osp - -import mmcv -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class LoadImageFromFile(object): - """Load an image from file. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename"). Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: - 'cv2' - """ - - def __init__(self, - to_float32=False, - color_type='color', - file_client_args=dict(backend='disk'), - imdecode_backend='cv2'): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - self.imdecode_backend = imdecode_backend - - def __call__(self, results): - """Call functions to load image and get image meta information. - - Args: - results (dict): Result dict from :obj:`mmseg.CustomDataset`. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results.get('img_prefix') is not None: - filename = osp.join(results['img_prefix'], - results['img_info']['filename']) - else: - filename = results['img_info']['filename'] - img_bytes = self.file_client.get(filename) - img = mmcv.imfrombytes( - img_bytes, flag=self.color_type, backend=self.imdecode_backend) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(to_float32={self.to_float32},' - repr_str += f"color_type='{self.color_type}'," - repr_str += f"imdecode_backend='{self.imdecode_backend}')" - return repr_str - - -@PIPELINES.register_module() -class LoadAnnotations(object): - """Load annotations for semantic segmentation. - - Args: - reduct_zero_label (bool): Whether reduce all label value by 1. - Usually used for datasets where 0 is background label. - Default: False. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: - 'pillow' - """ - - def __init__(self, - reduce_zero_label=False, - file_client_args=dict(backend='disk'), - imdecode_backend='pillow'): - self.reduce_zero_label = reduce_zero_label - self.file_client_args = file_client_args.copy() - self.file_client = None - self.imdecode_backend = imdecode_backend - - def __call__(self, results): - """Call function to load multiple types annotations. - - Args: - results (dict): Result dict from :obj:`mmseg.CustomDataset`. - - Returns: - dict: The dict contains loaded semantic segmentation annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results.get('seg_prefix', None) is not None: - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - else: - filename = results['ann_info']['seg_map'] - img_bytes = self.file_client.get(filename) - gt_semantic_seg = mmcv.imfrombytes( - img_bytes, flag='unchanged', - backend=self.imdecode_backend).squeeze().astype(np.uint8) - # reduce zero_label - if self.reduce_zero_label: - # avoid using underflow conversion - gt_semantic_seg[gt_semantic_seg == 0] = 255 - gt_semantic_seg = gt_semantic_seg - 1 - gt_semantic_seg[gt_semantic_seg == 254] = 255 - results['gt_semantic_seg'] = gt_semantic_seg - results['seg_fields'].append('gt_semantic_seg') - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(reduce_zero_label={self.reduce_zero_label},' - repr_str += f"imdecode_backend='{self.imdecode_backend}')" - return repr_str diff --git a/mmseg/datasets/pipelines/test_time_aug.py b/mmseg/datasets/pipelines/test_time_aug.py deleted file mode 100644 index 5712c79d58..0000000000 --- a/mmseg/datasets/pipelines/test_time_aug.py +++ /dev/null @@ -1,120 +0,0 @@ -import warnings - -import mmcv - -from ..builder import PIPELINES -from .compose import Compose - - -@PIPELINES.register_module() -class MultiScaleFlipAug(object): - """Test-time augmentation with multiple scales and flipping. - - An example configuration is as followed: - - .. code-block:: - - img_scale=(2048, 1024), - img_ratios=[0.5, 1.0], - flip=True, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ] - - After MultiScaleFLipAug with above configuration, the results are wrapped - into lists of the same length as followed: - - .. code-block:: - - dict( - img=[...], - img_shape=[...], - scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)] - flip=[False, True, False, True] - ... - ) - - Args: - transforms (list[dict]): Transforms to apply in each augmentation. - img_scale (tuple | list[tuple]): Images scales for resizing. - img_ratios (float | list[float]): Image ratios for resizing - flip (bool): Whether apply flip augmentation. Default: False. - flip_direction (str | list[str]): Flip augmentation directions, - options are "horizontal" and "vertical". If flip_direction is list, - multiple flip augmentations will be applied. - It has no effect when flip == False. Default: "horizontal". - """ - - def __init__(self, - transforms, - img_scale, - img_ratios=None, - flip=False, - flip_direction='horizontal'): - self.transforms = Compose(transforms) - if img_ratios is not None: - # mode 1: given a scale and a range of image ratio - img_ratios = img_ratios if isinstance(img_ratios, - list) else [img_ratios] - assert mmcv.is_list_of(img_ratios, float) - assert isinstance(img_scale, tuple) and len(img_scale) == 2 - self.img_scale = [(int(img_scale[0] * ratio), - int(img_scale[1] * ratio)) - for ratio in img_ratios] - else: - # mode 2: given multiple scales - self.img_scale = img_scale if isinstance(img_scale, - list) else [img_scale] - assert mmcv.is_list_of(self.img_scale, tuple) - self.flip = flip - self.flip_direction = flip_direction if isinstance( - flip_direction, list) else [flip_direction] - assert mmcv.is_list_of(self.flip_direction, str) - if not self.flip and self.flip_direction != ['horizontal']: - warnings.warn( - 'flip_direction has no effect when flip is set to False') - if (self.flip - and not any([t['type'] == 'RandomFlip' for t in transforms])): - warnings.warn( - 'flip has no effect when RandomFlip is not in transforms') - - def __call__(self, results): - """Call function to apply test time augment transforms on results. - - Args: - results (dict): Result dict contains the data to transform. - - Returns: - dict[str: list]: The augmented data, where each value is wrapped - into a list. - """ - - aug_data = [] - flip_aug = [False, True] if self.flip else [False] - for scale in self.img_scale: - for flip in flip_aug: - for direction in self.flip_direction: - _results = results.copy() - _results['scale'] = scale - _results['flip'] = flip - _results['flip_direction'] = direction - data = self.transforms(_results) - aug_data.append(data) - # list of dict to dict of list - aug_data_dict = {key: [] for key in aug_data[0]} - for data in aug_data: - for key, val in data.items(): - aug_data_dict[key].append(val) - return aug_data_dict - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(transforms={self.transforms}, ' - repr_str += f'img_scale={self.img_scale}, flip={self.flip})' - repr_str += f'flip_direction={self.flip_direction}' - return repr_str diff --git a/mmseg/datasets/pipelines/transforms.py b/mmseg/datasets/pipelines/transforms.py deleted file mode 100644 index b683973ca2..0000000000 --- a/mmseg/datasets/pipelines/transforms.py +++ /dev/null @@ -1,610 +0,0 @@ -import mmcv -import numpy as np -from numpy import random - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Resize(object): - """Resize images & seg. - - This transform resizes the input image to some scale. If the input dict - contains the key "scale", then the scale in the input dict is used, - otherwise the specified scale in the init method is used. - - ``img_scale`` can either be a tuple (single-scale) or a list of tuple - (multi-scale). There are 3 multiscale modes: - - ``ratio_range is not None``: randomly sample a ratio from the ratio range - and multiply it with the image scale. - - ``ratio_range is None and multiscale_mode == "range"``: randomly sample a - scale from the a range. - - ``ratio_range is None and multiscale_mode == "value"``: randomly sample a - scale from multiple scales. - - Args: - img_scale (tuple or list[tuple]): Images scales for resizing. - multiscale_mode (str): Either "range" or "value". - ratio_range (tuple[float]): (min_ratio, max_ratio) - keep_ratio (bool): Whether to keep the aspect ratio when resizing the - image. - """ - - def __init__(self, - img_scale=None, - multiscale_mode='range', - ratio_range=None, - keep_ratio=True): - if img_scale is None: - self.img_scale = None - else: - if isinstance(img_scale, list): - self.img_scale = img_scale - else: - self.img_scale = [img_scale] - assert mmcv.is_list_of(self.img_scale, tuple) - - if ratio_range is not None: - # mode 1: given a scale and a range of image ratio - assert len(self.img_scale) == 1 - else: - # mode 2: given multiple scales or a range of scales - assert multiscale_mode in ['value', 'range'] - - self.multiscale_mode = multiscale_mode - self.ratio_range = ratio_range - self.keep_ratio = keep_ratio - - @staticmethod - def random_select(img_scales): - """Randomly select an img_scale from given candidates. - - Args: - img_scales (list[tuple]): Images scales for selection. - - Returns: - (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, - where ``img_scale`` is the selected image scale and - ``scale_idx`` is the selected index in the given candidates. - """ - - assert mmcv.is_list_of(img_scales, tuple) - scale_idx = np.random.randint(len(img_scales)) - img_scale = img_scales[scale_idx] - return img_scale, scale_idx - - @staticmethod - def random_sample(img_scales): - """Randomly sample an img_scale when ``multiscale_mode=='range'``. - - Args: - img_scales (list[tuple]): Images scale range for sampling. - There must be two tuples in img_scales, which specify the lower - and uper bound of image scales. - - Returns: - (tuple, None): Returns a tuple ``(img_scale, None)``, where - ``img_scale`` is sampled scale and None is just a placeholder - to be consistent with :func:`random_select`. - """ - - assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 - img_scale_long = [max(s) for s in img_scales] - img_scale_short = [min(s) for s in img_scales] - long_edge = np.random.randint( - min(img_scale_long), - max(img_scale_long) + 1) - short_edge = np.random.randint( - min(img_scale_short), - max(img_scale_short) + 1) - img_scale = (long_edge, short_edge) - return img_scale, None - - @staticmethod - def random_sample_ratio(img_scale, ratio_range): - """Randomly sample an img_scale when ``ratio_range`` is specified. - - A ratio will be randomly sampled from the range specified by - ``ratio_range``. Then it would be multiplied with ``img_scale`` to - generate sampled scale. - - Args: - img_scale (tuple): Images scale base to multiply with ratio. - ratio_range (tuple[float]): The minimum and maximum ratio to scale - the ``img_scale``. - - Returns: - (tuple, None): Returns a tuple ``(scale, None)``, where - ``scale`` is sampled ratio multiplied with ``img_scale`` and - None is just a placeholder to be consistent with - :func:`random_select`. - """ - - assert isinstance(img_scale, tuple) and len(img_scale) == 2 - min_ratio, max_ratio = ratio_range - assert min_ratio <= max_ratio - ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio - scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) - return scale, None - - def _random_scale(self, results): - """Randomly sample an img_scale according to ``ratio_range`` and - ``multiscale_mode``. - - If ``ratio_range`` is specified, a ratio will be sampled and be - multiplied with ``img_scale``. - If multiple scales are specified by ``img_scale``, a scale will be - sampled according to ``multiscale_mode``. - Otherwise, single scale will be used. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: Two new keys 'scale` and 'scale_idx` are added into - ``results``, which would be used by subsequent pipelines. - """ - - if self.ratio_range is not None: - scale, scale_idx = self.random_sample_ratio( - self.img_scale[0], self.ratio_range) - elif len(self.img_scale) == 1: - scale, scale_idx = self.img_scale[0], 0 - elif self.multiscale_mode == 'range': - scale, scale_idx = self.random_sample(self.img_scale) - elif self.multiscale_mode == 'value': - scale, scale_idx = self.random_select(self.img_scale) - else: - raise NotImplementedError - - results['scale'] = scale - results['scale_idx'] = scale_idx - - def _resize_img(self, results): - """Resize images with ``results['scale']``.""" - if self.keep_ratio: - img, scale_factor = mmcv.imrescale( - results['img'], results['scale'], return_scale=True) - # the w_scale and h_scale has minor difference - # a real fix should be done in the mmcv.imrescale in the future - new_h, new_w = img.shape[:2] - h, w = results['img'].shape[:2] - w_scale = new_w / w - h_scale = new_h / h - else: - img, w_scale, h_scale = mmcv.imresize( - results['img'], results['scale'], return_scale=True) - scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], - dtype=np.float32) - results['img'] = img - results['img_shape'] = img.shape - results['pad_shape'] = img.shape # in case that there is no padding - results['scale_factor'] = scale_factor - results['keep_ratio'] = self.keep_ratio - - def _resize_seg(self, results): - """Resize semantic segmentation map with ``results['scale']``.""" - for key in results.get('seg_fields', []): - if self.keep_ratio: - gt_seg = mmcv.imrescale( - results[key], results['scale'], interpolation='nearest') - else: - gt_seg = mmcv.imresize( - results[key], results['scale'], interpolation='nearest') - results['gt_semantic_seg'] = gt_seg - - def __call__(self, results): - """Call function to resize images, bounding boxes, masks, semantic - segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', - 'keep_ratio' keys are added into result dict. - """ - - if 'scale' not in results: - self._random_scale(results) - self._resize_img(results) - self._resize_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += (f'(img_scale={self.img_scale}, ' - f'multiscale_mode={self.multiscale_mode}, ' - f'ratio_range={self.ratio_range}, ' - f'keep_ratio={self.keep_ratio})') - return repr_str - - -@PIPELINES.register_module() -class RandomFlip(object): - """Flip the image & seg. - - If the input dict contains the key "flip", then the flag will be used, - otherwise it will be randomly decided by a ratio specified in the init - method. - - Args: - flip_ratio (float, optional): The flipping probability. Default: None. - direction(str, optional): The flipping direction. Options are - 'horizontal' and 'vertical'. Default: 'horizontal'. - """ - - def __init__(self, flip_ratio=None, direction='horizontal'): - self.flip_ratio = flip_ratio - self.direction = direction - if flip_ratio is not None: - assert flip_ratio >= 0 and flip_ratio <= 1 - assert direction in ['horizontal', 'vertical'] - - def __call__(self, results): - """Call function to flip bounding boxes, masks, semantic segmentation - maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Flipped results, 'flip', 'flip_direction' keys are added into - result dict. - """ - - if 'flip' not in results: - flip = True if np.random.rand() < self.flip_ratio else False - results['flip'] = flip - if 'flip_direction' not in results: - results['flip_direction'] = self.direction - if results['flip']: - # flip image - results['img'] = mmcv.imflip( - results['img'], direction=results['flip_direction']) - - # flip segs - for key in results.get('seg_fields', []): - # use copy() to make numpy stride positive - results[key] = mmcv.imflip( - results[key], direction=results['flip_direction']).copy() - return results - - def __repr__(self): - return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' - - -@PIPELINES.register_module() -class Pad(object): - """Pad the image & mask. - - There are two padding modes: (1) pad to a fixed size and (2) pad to the - minimum size that is divisible by some number. - Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", - - Args: - size (tuple, optional): Fixed padding size. - size_divisor (int, optional): The divisor of padded size. - pad_val (float, optional): Padding value. Default: 0. - seg_pad_val (float, optional): Padding value of segmentation map. - Default: 255. - """ - - def __init__(self, - size=None, - size_divisor=None, - pad_val=0, - seg_pad_val=255): - self.size = size - self.size_divisor = size_divisor - self.pad_val = pad_val - self.seg_pad_val = seg_pad_val - # only one of size and size_divisor should be valid - assert size is not None or size_divisor is not None - assert size is None or size_divisor is None - - def _pad_img(self, results): - """Pad images according to ``self.size``.""" - if self.size is not None: - padded_img = mmcv.impad( - results['img'], shape=self.size, pad_val=self.pad_val) - elif self.size_divisor is not None: - padded_img = mmcv.impad_to_multiple( - results['img'], self.size_divisor, pad_val=self.pad_val) - results['img'] = padded_img - results['pad_shape'] = padded_img.shape - results['pad_fixed_size'] = self.size - results['pad_size_divisor'] = self.size_divisor - - def _pad_seg(self, results): - """Pad masks according to ``results['pad_shape']``.""" - for key in results.get('seg_fields', []): - results[key] = mmcv.impad( - results[key], - shape=results['pad_shape'][:2], - pad_val=self.seg_pad_val) - - def __call__(self, results): - """Call function to pad images, masks, semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Updated result dict. - """ - - self._pad_img(results) - self._pad_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \ - f'pad_val={self.pad_val})' - return repr_str - - -@PIPELINES.register_module() -class Normalize(object): - """Normalize the image. - - Added key is "img_norm_cfg". - - Args: - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB, - default is true. - """ - - def __init__(self, mean, std, to_rgb=True): - self.mean = np.array(mean, dtype=np.float32) - self.std = np.array(std, dtype=np.float32) - self.to_rgb = to_rgb - - def __call__(self, results): - """Call function to normalize images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Normalized results, 'img_norm_cfg' key is added into - result dict. - """ - - results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std, - self.to_rgb) - results['img_norm_cfg'] = dict( - mean=self.mean, std=self.std, to_rgb=self.to_rgb) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \ - f'{self.to_rgb})' - return repr_str - - -@PIPELINES.register_module() -class RandomCrop(object): - """Random crop the image & seg. - - Args: - crop_size (tuple): Expected size after cropping, (h, w). - cat_max_ratio (float): The maximum ratio that single category could - occupy. - """ - - def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255): - assert crop_size[0] > 0 and crop_size[1] > 0 - self.crop_size = crop_size - self.cat_max_ratio = cat_max_ratio - self.ignore_index = ignore_index - - def get_crop_bbox(self, img): - """Randomly get a crop bounding box.""" - margin_h = max(img.shape[0] - self.crop_size[0], 0) - margin_w = max(img.shape[1] - self.crop_size[1], 0) - offset_h = np.random.randint(0, margin_h + 1) - offset_w = np.random.randint(0, margin_w + 1) - crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0] - crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1] - - return crop_y1, crop_y2, crop_x1, crop_x2 - - def crop(self, img, crop_bbox): - """Crop from ``img``""" - crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox - img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] - return img - - def __call__(self, results): - """Call function to randomly crop images, semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Randomly cropped results, 'img_shape' key in result dict is - updated according to crop size. - """ - - img = results['img'] - crop_bbox = self.get_crop_bbox(img) - if self.cat_max_ratio < 1.: - # Repeat 10 times - for _ in range(10): - seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox) - labels, cnt = np.unique(seg_temp, return_counts=True) - cnt = cnt[labels != self.ignore_index] - if len(cnt) > 1 and np.max(cnt) / np.sum( - cnt) < self.cat_max_ratio: - break - crop_bbox = self.get_crop_bbox(img) - - # crop the image - img = self.crop(img, crop_bbox) - img_shape = img.shape - results['img'] = img - results['img_shape'] = img_shape - - # crop semantic seg - for key in results.get('seg_fields', []): - results[key] = self.crop(results[key], crop_bbox) - - return results - - def __repr__(self): - return self.__class__.__name__ + f'(crop_size={self.crop_size})' - - -@PIPELINES.register_module() -class SegRescale(object): - """Rescale semantic segmentation maps. - - Args: - scale_factor (float): The scale factor of the final output. - """ - - def __init__(self, scale_factor=1): - self.scale_factor = scale_factor - - def __call__(self, results): - """Call function to scale the semantic segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with semantic segmentation map scaled. - """ - for key in results.get('seg_fields', []): - if self.scale_factor != 1: - results[key] = mmcv.imrescale( - results[key], self.scale_factor, interpolation='nearest') - return results - - def __repr__(self): - return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' - - -@PIPELINES.register_module() -class PhotoMetricDistortion(object): - """Apply photometric distortion to image sequentially, every transformation - is applied with a probability of 0.5. The position of random contrast is in - second or second to last. - - 1. random brightness - 2. random contrast (mode 0) - 3. convert color from BGR to HSV - 4. random saturation - 5. random hue - 6. convert color from HSV to BGR - 7. random contrast (mode 1) - 8. randomly swap channels - - Args: - brightness_delta (int): delta of brightness. - contrast_range (tuple): range of contrast. - saturation_range (tuple): range of saturation. - hue_delta (int): delta of hue. - """ - - def __init__(self, - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18): - self.brightness_delta = brightness_delta - self.contrast_lower, self.contrast_upper = contrast_range - self.saturation_lower, self.saturation_upper = saturation_range - self.hue_delta = hue_delta - - def convert(self, img, alpha=1, beta=0): - """Multiple with alpha and add beat with clip.""" - img = img.astype(np.float32) * alpha + beta - img = np.clip(img, 0, 255) - return img.astype(np.uint8) - - def brightness(self, img): - """Brightness distortion.""" - if random.randint(2): - return self.convert( - img, - beta=random.uniform(-self.brightness_delta, - self.brightness_delta)) - return img - - def contrast(self, img): - """Contrast distortion.""" - if random.randint(2): - return self.convert( - img, - alpha=random.uniform(self.contrast_lower, self.contrast_upper)) - return img - - def saturation(self, img): - """Saturation distortion.""" - if random.randint(2): - img = mmcv.bgr2hsv(img) - img[:, :, 1] = self.convert( - img[:, :, 1], - alpha=random.uniform(self.saturation_lower, - self.saturation_upper)) - img = mmcv.hsv2bgr(img) - return img - - def hue(self, img): - """Hue distortion.""" - if random.randint(2): - img = mmcv.bgr2hsv(img) - img[:, :, - 0] = (img[:, :, 0].astype(int) + - random.randint(-self.hue_delta, self.hue_delta)) % 180 - img = mmcv.hsv2bgr(img) - return img - - def __call__(self, results): - """Call function to perform photometric distortion on images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - img = results['img'] - # random brightness - img = self.brightness(img) - - # mode == 0 --> do random contrast first - # mode == 1 --> do random contrast last - mode = random.randint(2) - if mode == 1: - img = self.contrast(img) - - # random saturation - img = self.saturation(img) - - # random hue - img = self.hue(img) - - # random contrast - if mode == 0: - img = self.contrast(img) - - results['img'] = img - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += (f'(brightness_delta={self.brightness_delta}, ' - f'contrast_range=({self.contrast_lower}, ' - f'{self.contrast_upper}), ' - f'saturation_range=({self.saturation_lower}, ' - f'{self.saturation_upper}), ' - f'hue_delta={self.hue_delta})') - return repr_str diff --git a/mmseg/datasets/potsdam.py b/mmseg/datasets/potsdam.py new file mode 100644 index 0000000000..6892de3dd2 --- /dev/null +++ b/mmseg/datasets/potsdam.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class PotsdamDataset(BaseSegDataset): + """ISPRS Potsdam dataset. + + In segmentation map annotation for Potsdam dataset, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + METAINFO = dict( + classes=('impervious_surface', 'building', 'low_vegetation', 'tree', + 'car', 'clutter'), + palette=[[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) diff --git a/mmseg/datasets/stare.py b/mmseg/datasets/stare.py new file mode 100644 index 0000000000..2bfce23449 --- /dev/null +++ b/mmseg/datasets/stare.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class STAREDataset(BaseSegDataset): + """STARE dataset. + + In segmentation map annotation for STARE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.ah.png'. + """ + METAINFO = dict( + classes=('background', 'vessel'), + palette=[[120, 120, 120], [6, 230, 230]]) + + def __init__(self, + img_suffix='.png', + seg_map_suffix='.ah.png', + reduce_zero_label=False, + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + reduce_zero_label=reduce_zero_label, + **kwargs) + assert self.file_client.exists(self.data_prefix['img_path']) diff --git a/mmseg/datasets/synapse.py b/mmseg/datasets/synapse.py new file mode 100644 index 0000000000..2d016b9475 --- /dev/null +++ b/mmseg/datasets/synapse.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class SynapseDataset(BaseSegDataset): + """Synapse dataset. + + In segmentation map annotation for Synapse, 0 stands for background, which + is not include in 13 categories. The ``img_suffix`` is fixed to '.jpg' and + ``seg_map_suffix`` is fixed to '.png'. + """ + METAINFO = dict( + classes=('background', 'aorta', 'gallbladder', 'left_kidney', + 'right_kidney', 'liver', 'pancreas', 'spleen', 'stomach'), + palette=[[0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], + [0, 255, 255], [255, 0, 255], [255, 255, 0], [60, 255, 255], + [240, 240, 240]]) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py new file mode 100644 index 0000000000..a37725bee6 --- /dev/null +++ b/mmseg/datasets/transforms/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .formatting import PackSegInputs +from .loading import (LoadAnnotations, LoadBiomedicalAnnotation, + LoadBiomedicalData, LoadBiomedicalImageFromFile, + LoadImageFromNDArray) +from .transforms import (CLAHE, AdjustGamma, GenerateEdge, + PhotoMetricDistortion, RandomCrop, RandomCutOut, + RandomMosaic, RandomRotate, RandomRotFlip, Rerange, + ResizeShortestEdge, ResizeToMultiple, RGB2Gray, + SegRescale) + +__all__ = [ + 'LoadAnnotations', 'RandomCrop', 'SegRescale', 'PhotoMetricDistortion', + 'RandomRotate', 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', + 'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', + 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', + 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', + 'ResizeShortestEdge', 'RandomRotFlip' +] diff --git a/mmseg/datasets/transforms/formatting.py b/mmseg/datasets/transforms/formatting.py new file mode 100644 index 0000000000..bb4db4484e --- /dev/null +++ b/mmseg/datasets/transforms/formatting.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms import to_tensor +from mmcv.transforms.base import BaseTransform +from mmengine.structures import PixelData + +from mmseg.registry import TRANSFORMS +from mmseg.structures import SegDataSample + + +@TRANSFORMS.register_module() +class PackSegInputs(BaseTransform): + """Pack the inputs data for the semantic segmentation. + + The ``img_meta`` item is always populated. The contents of the + ``img_meta`` dictionary depends on ``meta_keys``. By default this includes: + + - ``img_path``: filename of the image + + - ``ori_shape``: original shape of the image as a tuple (h, w, c) + + - ``img_shape``: shape of the image input to the network as a tuple \ + (h, w, c). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + + - ``pad_shape``: shape of padded images + + - ``scale_factor``: a float indicating the preprocessing scale + + - ``flip``: a boolean indicating if image flip transform was used + + - ``flip_direction``: the flipping direction + + Args: + meta_keys (Sequence[str], optional): Meta keys to be packed from + ``SegDataSample`` and collected in ``data[img_metas]``. + Default: ``('img_path', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction')`` + """ + + def __init__(self, + meta_keys=('img_path', 'seg_map_path', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction')): + self.meta_keys = meta_keys + + def transform(self, results: dict) -> dict: + """Method to pack the input data. + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: + + - 'inputs' (obj:`torch.Tensor`): The forward data of models. + - 'data_sample' (obj:`SegDataSample`): The annotation info of the + sample. + """ + packed_results = dict() + if 'img' in results: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + packed_results['inputs'] = to_tensor(img) + + data_sample = SegDataSample() + if 'gt_seg_map' in results: + gt_sem_seg_data = dict( + data=to_tensor(results['gt_seg_map'][None, + ...].astype(np.int64))) + data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + + img_meta = {} + for key in self.meta_keys: + if key in results: + img_meta[key] = results[key] + data_sample.set_metainfo(img_meta) + packed_results['data_samples'] = data_sample + + return packed_results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(meta_keys={self.meta_keys})' + return repr_str diff --git a/mmseg/datasets/transforms/loading.py b/mmseg/datasets/transforms/loading.py new file mode 100644 index 0000000000..ea51e0df59 --- /dev/null +++ b/mmseg/datasets/transforms/loading.py @@ -0,0 +1,444 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict + +import mmcv +import mmengine +import numpy as np +from mmcv.transforms import BaseTransform +from mmcv.transforms import LoadAnnotations as MMCV_LoadAnnotations +from mmcv.transforms import LoadImageFromFile + +from mmseg.registry import TRANSFORMS +from mmseg.utils import datafrombytes + + +@TRANSFORMS.register_module() +class LoadAnnotations(MMCV_LoadAnnotations): + """Load annotations for semantic segmentation provided by dataset. + + The annotation format is as the following: + + .. code-block:: python + + { + # Filename of semantic segmentation ground truth file. + 'seg_map_path': 'a/b/c' + } + + After this module, the annotation has been changed to the format below: + + .. code-block:: python + + { + # in str + 'seg_fields': List + # In uint8 type. + 'gt_seg_map': np.ndarray (H, W) + } + + Required Keys: + + - seg_map_path (str): Path of semantic segmentation ground truth file. + + Added Keys: + + - seg_fields (List) + - gt_seg_map (np.uint8) + + Args: + reduce_zero_label (bool, optional): Whether reduce all label value + by 1. Usually used for datasets where 0 is background label. + Defaults to None. + imdecode_backend (str): The image decoding backend type. The backend + argument for :func:``mmcv.imfrombytes``. + See :fun:``mmcv.imfrombytes`` for details. + Defaults to 'pillow'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:``mmcv.fileio.FileClient`` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__( + self, + reduce_zero_label=None, + file_client_args=dict(backend='disk'), + imdecode_backend='pillow', + ) -> None: + super().__init__( + with_bbox=False, + with_label=False, + with_seg=True, + with_keypoints=False, + imdecode_backend=imdecode_backend, + file_client_args=file_client_args) + self.reduce_zero_label = reduce_zero_label + if self.reduce_zero_label is not None: + warnings.warn('`reduce_zero_label` will be deprecated, ' + 'if you would like to ignore the zero label, please ' + 'set `reduce_zero_label=True` when dataset ' + 'initialized') + self.file_client_args = file_client_args.copy() + self.imdecode_backend = imdecode_backend + + def _load_seg_map(self, results: dict) -> None: + """Private function to load semantic segmentation annotations. + + Args: + results (dict): Result dict from :obj:``mmcv.BaseDataset``. + + Returns: + dict: The dict contains loaded semantic segmentation annotations. + """ + + img_bytes = self.file_client.get(results['seg_map_path']) + gt_semantic_seg = mmcv.imfrombytes( + img_bytes, flag='unchanged', + backend=self.imdecode_backend).squeeze().astype(np.uint8) + + # modify if custom classes + if results.get('label_map', None) is not None: + # Add deep copy to solve bug of repeatedly + # replace `gt_semantic_seg`, which is reported in + # https://github.com/open-mmlab/mmsegmentation/pull/1445/ + gt_semantic_seg_copy = gt_semantic_seg.copy() + for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id + # reduce zero_label + if self.reduce_zero_label is None: + self.reduce_zero_label = results['reduce_zero_label'] + assert self.reduce_zero_label == results['reduce_zero_label'], \ + 'Initialize dataset with `reduce_zero_label` as ' \ + f'{results["reduce_zero_label"]} but when load annotation ' \ + f'the `reduce_zero_label` is {self.reduce_zero_label}' + if self.reduce_zero_label: + # avoid using underflow conversion + gt_semantic_seg[gt_semantic_seg == 0] = 255 + gt_semantic_seg = gt_semantic_seg - 1 + gt_semantic_seg[gt_semantic_seg == 254] = 255 + results['gt_seg_map'] = gt_semantic_seg + results['seg_fields'].append('gt_seg_map') + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(reduce_zero_label={self.reduce_zero_label},' + repr_str += f"imdecode_backend='{self.imdecode_backend}')" + repr_str += f'file_client_args={self.file_client_args})' + return repr_str + + +@TRANSFORMS.register_module() +class LoadImageFromNDArray(LoadImageFromFile): + """Load an image from ``results['img']``. + + Similar with :obj:`LoadImageFromFile`, but the image has been loaded as + :obj:`np.ndarray` in ``results['img']``. Can be used when loading image + from webcam. + + Required Keys: + + - img + + Modified Keys: + + - img + - img_path + - img_shape + - ori_shape + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + """ + + def transform(self, results: dict) -> dict: + """Transform function to add image meta information. + + Args: + results (dict): Result dict with Webcam read image in + ``results['img']``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + img = results['img'] + if self.to_float32: + img = img.astype(np.float32) + + results['img_path'] = None + results['img'] = img + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + return results + + +@TRANSFORMS.register_module() +class LoadBiomedicalImageFromFile(BaseTransform): + """Load an biomedical mage from file. + + Required Keys: + + - img_path + + Added Keys: + + - img (np.ndarray): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities, and data type is float32 + if set to_float32 = True, or float64 if decode_backend is 'nifti' and + to_float32 is False. + - img_shape + - ori_shape + + Args: + decode_backend (str): The data decoding backend type. Options are + 'numpy'and 'nifti', and there is a convention that when backend is + 'nifti' the axis of data loaded is XYZ, and when backend is + 'numpy', the the axis is ZYX. The data will be transposed if the + backend is 'nifti'. Defaults to 'nifti'. + to_xyz (bool): Whether transpose data from Z, Y, X to X, Y, Z. + Defaults to False. + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an float64 array. + Defaults to True. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmengine.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__( + self, + decode_backend: str = 'nifti', + to_xyz: bool = False, + to_float32: bool = True, + file_client_args: dict = dict(backend='disk') + ) -> None: + self.decode_backend = decode_backend + self.to_xyz = to_xyz + self.to_float32 = to_float32 + self.file_client_args = file_client_args.copy() + self.file_client = mmengine.FileClient(**self.file_client_args) + + def transform(self, results: Dict) -> Dict: + """Functions to load image. + + Args: + results (dict): Result dict from :obj:``mmcv.BaseDataset``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + filename = results['img_path'] + + data_bytes = self.file_client.get(filename) + img = datafrombytes(data_bytes, backend=self.decode_backend) + + if self.to_float32: + img = img.astype(np.float32) + + if len(img.shape) == 3: + img = img[None, ...] + + if self.decode_backend == 'nifti': + img = img.transpose(0, 3, 2, 1) + + if self.to_xyz: + img = img.transpose(0, 3, 2, 1) + + results['img'] = img + results['img_shape'] = img.shape[1:] + results['ori_shape'] = img.shape[1:] + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f"decode_backend='{self.decode_backend}', " + f'to_xyz={self.to_xyz}, ' + f'to_float32={self.to_float32}, ' + f'file_client_args={self.file_client_args})') + return repr_str + + +@TRANSFORMS.register_module() +class LoadBiomedicalAnnotation(BaseTransform): + """Load ``seg_map`` annotation provided by biomedical dataset. + + The annotation format is as the following: + + .. code-block:: python + + { + 'gt_seg_map': np.ndarray (X, Y, Z) or (Z, Y, X) + } + + Required Keys: + + - seg_map_path + + Added Keys: + + - gt_seg_map (np.ndarray): Biomedical seg map with shape (Z, Y, X) by + default, and data type is float32 if set to_float32 = True, or + float64 if decode_backend is 'nifti' and to_float32 is False. + + Args: + decode_backend (str): The data decoding backend type. Options are + 'numpy'and 'nifti', and there is a convention that when backend is + 'nifti' the axis of data loaded is XYZ, and when backend is + 'numpy', the the axis is ZYX. The data will be transposed if the + backend is 'nifti'. Defaults to 'nifti'. + to_xyz (bool): Whether transpose data from Z, Y, X to X, Y, Z. + Defaults to False. + to_float32 (bool): Whether to convert the loaded seg map to a float32 + numpy array. If set to False, the loaded image is an float64 array. + Defaults to True. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmengine.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__( + self, + decode_backend: str = 'nifti', + to_xyz: bool = False, + to_float32: bool = True, + file_client_args: dict = dict(backend='disk') + ) -> None: + super().__init__() + self.decode_backend = decode_backend + self.to_xyz = to_xyz + self.to_float32 = to_float32 + self.file_client_args = file_client_args.copy() + self.file_client = mmengine.FileClient(**self.file_client_args) + + def transform(self, results: Dict) -> Dict: + """Functions to load image. + + Args: + results (dict): Result dict from :obj:``mmcv.BaseDataset``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + data_bytes = self.file_client.get(results['seg_map_path']) + gt_seg_map = datafrombytes(data_bytes, backend=self.decode_backend) + + if self.to_float32: + gt_seg_map = gt_seg_map.astype(np.float32) + + if self.decode_backend == 'nifti': + gt_seg_map = gt_seg_map.transpose(2, 1, 0) + + if self.to_xyz: + gt_seg_map = gt_seg_map.transpose(2, 1, 0) + + results['gt_seg_map'] = gt_seg_map + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f"decode_backend='{self.decode_backend}', " + f'to_xyz={self.to_xyz}, ' + f'to_float32={self.to_float32}, ' + f'file_client_args={self.file_client_args})') + return repr_str + + +@TRANSFORMS.register_module() +class LoadBiomedicalData(BaseTransform): + """Load an biomedical image and annotation from file. + + The loading data format is as the following: + + .. code-block:: python + + { + 'img': np.ndarray data[:-1, X, Y, Z] + 'seg_map': np.ndarray data[-1, X, Y, Z] + } + + + Required Keys: + + - img_path + + Added Keys: + + - img (np.ndarray): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities. + - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape + (Z, Y, X) by default. + - img_shape + - ori_shape + + Args: + with_seg (bool): Whether to parse and load the semantic segmentation + annotation. Defaults to False. + decode_backend (str): The data decoding backend type. Options are + 'numpy'and 'nifti', and there is a convention that when backend is + 'nifti' the axis of data loaded is XYZ, and when backend is + 'numpy', the the axis is ZYX. The data will be transposed if the + backend is 'nifti'. Defaults to 'nifti'. + to_xyz (bool): Whether transpose data from Z, Y, X to X, Y, Z. + Defaults to False. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmengine.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__( + self, + with_seg=False, + decode_backend: str = 'numpy', + to_xyz: bool = False, + file_client_args: dict = dict(backend='disk') + ) -> None: + self.with_seg = with_seg + self.decode_backend = decode_backend + self.to_xyz = to_xyz + self.file_client_args = file_client_args.copy() + self.file_client = mmengine.FileClient(**self.file_client_args) + + def transform(self, results: Dict) -> Dict: + """Functions to load image. + + Args: + results (dict): Result dict from :obj:``mmcv.BaseDataset``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + data_bytes = self.file_client.get(results['img_path']) + data = datafrombytes(data_bytes, backend=self.decode_backend) + # img is 4D data (N, X, Y, Z), N is the number of protocol + img = data[:-1, :] + + if self.decode_backend == 'nifti': + img = img.transpose(0, 3, 2, 1) + + if self.to_xyz: + img = img.transpose(0, 3, 2, 1) + + results['img'] = img + results['img_shape'] = img.shape[1:] + results['ori_shape'] = img.shape[1:] + + if self.with_seg: + gt_seg_map = data[-1, :] + if self.decode_backend == 'nifti': + gt_seg_map = gt_seg_map.transpose(2, 1, 0) + + if self.to_xyz: + gt_seg_map = gt_seg_map.transpose(2, 1, 0) + results['gt_seg_map'] = gt_seg_map + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'with_seg={self.with_seg}, ' + f"decode_backend='{self.decode_backend}', " + f'to_xyz={self.to_xyz}, ' + f'file_client_args={self.file_client_args})') + return repr_str diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py new file mode 100644 index 0000000000..bafc2b48ca --- /dev/null +++ b/mmseg/datasets/transforms/transforms.py @@ -0,0 +1,1390 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, Sequence, Tuple, Union + +import cv2 +import mmcv +import numpy as np +from mmcv.transforms.base import BaseTransform +from mmcv.transforms.utils import cache_randomness +from mmengine.utils import is_tuple_of +from numpy import random + +from mmseg.datasets.dataset_wrappers import MultiImageMixDataset +from mmseg.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class ResizeToMultiple(BaseTransform): + """Resize images & seg to multiple of divisor. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - img_shape + - pad_shape + + Args: + size_divisor (int): images and gt seg maps need to resize to multiple + of size_divisor. Default: 32. + interpolation (str, optional): The interpolation mode of image resize. + Default: None + """ + + def __init__(self, size_divisor=32, interpolation=None): + self.size_divisor = size_divisor + self.interpolation = interpolation + + def transform(self, results: dict) -> dict: + """Call function to resize images, semantic segmentation map to + multiple of size divisor. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape' keys are updated. + """ + # Align image to multiple of size divisor. + img = results['img'] + img = mmcv.imresize_to_multiple( + img, + self.size_divisor, + scale_factor=1, + interpolation=self.interpolation + if self.interpolation else 'bilinear') + + results['img'] = img + results['img_shape'] = img.shape[:2] + results['pad_shape'] = img.shape[:2] + + # Align segmentation map to multiple of size divisor. + for key in results.get('seg_fields', []): + gt_seg = results[key] + gt_seg = mmcv.imresize_to_multiple( + gt_seg, + self.size_divisor, + scale_factor=1, + interpolation='nearest') + results[key] = gt_seg + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(size_divisor={self.size_divisor}, ' + f'interpolation={self.interpolation})') + return repr_str + + +@TRANSFORMS.register_module() +class Rerange(BaseTransform): + """Rerange the image pixel value. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + min_value (float or int): Minimum value of the reranged image. + Default: 0. + max_value (float or int): Maximum value of the reranged image. + Default: 255. + """ + + def __init__(self, min_value=0, max_value=255): + assert isinstance(min_value, float) or isinstance(min_value, int) + assert isinstance(max_value, float) or isinstance(max_value, int) + assert min_value < max_value + self.min_value = min_value + self.max_value = max_value + + def transform(self, results: dict) -> dict: + """Call function to rerange images. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Reranged results. + """ + + img = results['img'] + img_min_value = np.min(img) + img_max_value = np.max(img) + + assert img_min_value < img_max_value + # rerange to [0, 1] + img = (img - img_min_value) / (img_max_value - img_min_value) + # rerange to [min_value, max_value] + img = img * (self.max_value - self.min_value) + self.min_value + results['img'] = img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(min_value={self.min_value}, max_value={self.max_value})' + return repr_str + + +@TRANSFORMS.register_module() +class CLAHE(BaseTransform): + """Use CLAHE method to process the image. + + See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. + Graphics Gems, 1994:474-485.` for more information. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + clip_limit (float): Threshold for contrast limiting. Default: 40.0. + tile_grid_size (tuple[int]): Size of grid for histogram equalization. + Input image will be divided into equally sized rectangular tiles. + It defines the number of tiles in row and column. Default: (8, 8). + """ + + def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)): + assert isinstance(clip_limit, (float, int)) + self.clip_limit = clip_limit + assert is_tuple_of(tile_grid_size, int) + assert len(tile_grid_size) == 2 + self.tile_grid_size = tile_grid_size + + def transform(self, results: dict) -> dict: + """Call function to Use CLAHE method process images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + for i in range(results['img'].shape[2]): + results['img'][:, :, i] = mmcv.clahe( + np.array(results['img'][:, :, i], dtype=np.uint8), + self.clip_limit, self.tile_grid_size) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(clip_limit={self.clip_limit}, '\ + f'tile_grid_size={self.tile_grid_size})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomCrop(BaseTransform): + """Random crop the image & seg. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - img_shape + - gt_seg_map + + + Args: + crop_size (Union[int, Tuple[int, int]]): Expected size after cropping + with the format of (h, w). If set to an integer, then cropping + width and height are equal to this integer. + cat_max_ratio (float): The maximum ratio that single category could + occupy. + ignore_index (int): The label index to be ignored. Default: 255 + """ + + def __init__(self, + crop_size: Union[int, Tuple[int, int]], + cat_max_ratio: float = 1., + ignore_index: int = 255): + super().__init__() + assert isinstance(crop_size, int) or ( + isinstance(crop_size, tuple) and len(crop_size) == 2 + ), 'The expected crop_size is an integer, or a tuple containing two ' + 'intergers' + + if isinstance(crop_size, int): + crop_size = (crop_size, crop_size) + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + self.cat_max_ratio = cat_max_ratio + self.ignore_index = ignore_index + + @cache_randomness + def crop_bbox(self, results: dict) -> tuple: + """get a crop bounding box. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + tuple: Coordinates of the cropped image. + """ + + def generate_crop_bbox(img: np.ndarray) -> tuple: + """Randomly get a crop bounding box. + + Args: + img (np.ndarray): Original input image. + + Returns: + tuple: Coordinates of the cropped image. + """ + + margin_h = max(img.shape[0] - self.crop_size[0], 0) + margin_w = max(img.shape[1] - self.crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1] + + return crop_y1, crop_y2, crop_x1, crop_x2 + + img = results['img'] + crop_bbox = generate_crop_bbox(img) + if self.cat_max_ratio < 1.: + # Repeat 10 times + for _ in range(10): + seg_temp = self.crop(results['gt_seg_map'], crop_bbox) + labels, cnt = np.unique(seg_temp, return_counts=True) + cnt = cnt[labels != self.ignore_index] + if len(cnt) > 1 and np.max(cnt) / np.sum( + cnt) < self.cat_max_ratio: + break + crop_bbox = generate_crop_bbox(img) + + return crop_bbox + + def crop(self, img: np.ndarray, crop_bbox: tuple) -> np.ndarray: + """Crop from ``img`` + + Args: + img (np.ndarray): Original input image. + crop_bbox (tuple): Coordinates of the cropped image. + + Returns: + np.ndarray: The cropped image. + """ + + crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + return img + + def transform(self, results: dict) -> dict: + """Transform function to randomly crop images, semantic segmentation + maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + + img = results['img'] + crop_bbox = self.crop_bbox(results) + + # crop the image + img = self.crop(img, crop_bbox) + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = self.crop(results[key], crop_bbox) + img_shape = img.shape + results['img'] = img + results['img_shape'] = img_shape + return results + + def __repr__(self): + return self.__class__.__name__ + f'(crop_size={self.crop_size})' + + +@TRANSFORMS.register_module() +class RandomRotate(BaseTransform): + """Rotate the image & seg. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - gt_seg_map + + Args: + prob (float): The rotation probability. + degree (float, tuple[float]): Range of degrees to select from. If + degree is a number instead of tuple like (min, max), + the range of degree will be (``-degree``, ``+degree``) + pad_val (float, optional): Padding value of image. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If not specified, the center of the image will be + used. Default: None. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. Default: False + """ + + def __init__(self, + prob, + degree, + pad_val=0, + seg_pad_val=255, + center=None, + auto_bound=False): + self.prob = prob + assert prob >= 0 and prob <= 1 + if isinstance(degree, (float, int)): + assert degree > 0, f'degree {degree} should be positive' + self.degree = (-degree, degree) + else: + self.degree = degree + assert len(self.degree) == 2, f'degree {self.degree} should be a ' \ + f'tuple of (min, max)' + self.pal_val = pad_val + self.seg_pad_val = seg_pad_val + self.center = center + self.auto_bound = auto_bound + + @cache_randomness + def generate_degree(self): + return np.random.rand() < self.prob, np.random.uniform( + min(*self.degree), max(*self.degree)) + + def transform(self, results: dict) -> dict: + """Call function to rotate image, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated results. + """ + + rotate, degree = self.generate_degree() + if rotate: + # rotate image + results['img'] = mmcv.imrotate( + results['img'], + angle=degree, + border_value=self.pal_val, + center=self.center, + auto_bound=self.auto_bound) + + # rotate segs + for key in results.get('seg_fields', []): + results[key] = mmcv.imrotate( + results[key], + angle=degree, + border_value=self.seg_pad_val, + center=self.center, + auto_bound=self.auto_bound, + interpolation='nearest') + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' \ + f'degree={self.degree}, ' \ + f'pad_val={self.pal_val}, ' \ + f'seg_pad_val={self.seg_pad_val}, ' \ + f'center={self.center}, ' \ + f'auto_bound={self.auto_bound})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomRotFlip(BaseTransform): + """Rotate and flip the image & seg or just rotate the image & seg. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - gt_seg_map + + Args: + rotate_prob (float): The probability of rotate image. + flip_prob (float): The probability of rotate&flip image. + degree (float, tuple[float]): Range of degrees to select from. If + degree is a number instead of tuple like (min, max), + the range of degree will be (``-degree``, ``+degree``) + """ + + def __init__(self, rotate_prob=0.5, flip_prob=0.5, degree=(-20, 20)): + self.rotate_prob = rotate_prob + self.flip_prob = flip_prob + assert 0 <= rotate_prob <= 1 and 0 <= flip_prob <= 1 + if isinstance(degree, (float, int)): + assert degree > 0, f'degree {degree} should be positive' + self.degree = (-degree, degree) + else: + self.degree = degree + assert len(self.degree) == 2, f'degree {self.degree} should be a ' \ + f'tuple of (min, max)' + + def random_rot_flip(self, results: dict) -> dict: + k = np.random.randint(0, 4) + results['img'] = np.rot90(results['img'], k) + for key in results.get('seg_fields', []): + results[key] = np.rot90(results[key], k) + axis = np.random.randint(0, 2) + results['img'] = np.flip(results['img'], axis=axis).copy() + for key in results.get('seg_fields', []): + results[key] = np.flip(results[key], axis=axis).copy() + return results + + def random_rotate(self, results: dict) -> dict: + angle = np.random.uniform(min(*self.degree), max(*self.degree)) + results['img'] = mmcv.imrotate(results['img'], angle=angle) + for key in results.get('seg_fields', []): + results[key] = mmcv.imrotate(results[key], angle=angle) + return results + + def transform(self, results: dict) -> dict: + """Call function to rotate or rotate & flip image, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated or rotated & flipped results. + """ + rotate_flag = 0 + if random.random() < self.rotate_prob: + results = self.random_rotate(results) + rotate_flag = 1 + if random.random() < self.flip_prob and rotate_flag == 0: + results = self.random_rot_flip(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(rotate_prob={self.rotate_prob}, ' \ + f'flip_prob={self.flip_prob}, ' \ + f'degree={self.degree})' + return repr_str + + +@TRANSFORMS.register_module() +class RGB2Gray(BaseTransform): + """Convert RGB image to grayscale image. + + Required Keys: + + - img + + Modified Keys: + + - img + - img_shape + + This transform calculate the weighted mean of input image channels with + ``weights`` and then expand the channels to ``out_channels``. When + ``out_channels`` is None, the number of output channels is the same as + input channels. + + Args: + out_channels (int): Expected number of output channels after + transforming. Default: None. + weights (tuple[float]): The weights to calculate the weighted mean. + Default: (0.299, 0.587, 0.114). + """ + + def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)): + assert out_channels is None or out_channels > 0 + self.out_channels = out_channels + assert isinstance(weights, tuple) + for item in weights: + assert isinstance(item, (float, int)) + self.weights = weights + + def transform(self, results: dict) -> dict: + """Call function to convert RGB image to grayscale image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with grayscale image. + """ + img = results['img'] + assert len(img.shape) == 3 + assert img.shape[2] == len(self.weights) + weights = np.array(self.weights).reshape((1, 1, -1)) + img = (img * weights).sum(2, keepdims=True) + if self.out_channels is None: + img = img.repeat(weights.shape[2], axis=2) + else: + img = img.repeat(self.out_channels, axis=2) + + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(out_channels={self.out_channels}, ' \ + f'weights={self.weights})' + return repr_str + + +@TRANSFORMS.register_module() +class AdjustGamma(BaseTransform): + """Using gamma correction to process the image. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + gamma (float or int): Gamma value used in gamma correction. + Default: 1.0. + """ + + def __init__(self, gamma=1.0): + assert isinstance(gamma, float) or isinstance(gamma, int) + assert gamma > 0 + self.gamma = gamma + inv_gamma = 1.0 / gamma + self.table = np.array([(i / 255.0)**inv_gamma * 255 + for i in np.arange(256)]).astype('uint8') + + def transform(self, results: dict) -> dict: + """Call function to process the image with gamma correction. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + results['img'] = mmcv.lut_transform( + np.array(results['img'], dtype=np.uint8), self.table) + + return results + + def __repr__(self): + return self.__class__.__name__ + f'(gamma={self.gamma})' + + +@TRANSFORMS.register_module() +class SegRescale(BaseTransform): + """Rescale semantic segmentation maps. + + Required Keys: + + - gt_seg_map + + Modified Keys: + + - gt_seg_map + + Args: + scale_factor (float): The scale factor of the final output. + """ + + def __init__(self, scale_factor=1): + self.scale_factor = scale_factor + + def transform(self, results: dict) -> dict: + """Call function to scale the semantic segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with semantic segmentation map scaled. + """ + for key in results.get('seg_fields', []): + if self.scale_factor != 1: + results[key] = mmcv.imrescale( + results[key], self.scale_factor, interpolation='nearest') + return results + + def __repr__(self): + return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' + + +@TRANSFORMS.register_module() +class PhotoMetricDistortion(BaseTransform): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta: int = 32, + contrast_range: Sequence[float] = (0.5, 1.5), + saturation_range: Sequence[float] = (0.5, 1.5), + hue_delta: int = 18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def convert(self, + img: np.ndarray, + alpha: int = 1, + beta: int = 0) -> np.ndarray: + """Multiple with alpha and add beat with clip. + + Args: + img (np.ndarray): The input image. + alpha (int): Image weights, change the contrast/saturation + of the image. Default: 1 + beta (int): Image bias, change the brightness of the + image. Default: 0 + + Returns: + np.ndarray: The transformed image. + """ + + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def brightness(self, img: np.ndarray) -> np.ndarray: + """Brightness distortion. + + Args: + img (np.ndarray): The input image. + Returns: + np.ndarray: Image after brightness change. + """ + + if random.randint(2): + return self.convert( + img, + beta=random.uniform(-self.brightness_delta, + self.brightness_delta)) + return img + + def contrast(self, img: np.ndarray) -> np.ndarray: + """Contrast distortion. + + Args: + img (np.ndarray): The input image. + Returns: + np.ndarray: Image after contrast change. + """ + + if random.randint(2): + return self.convert( + img, + alpha=random.uniform(self.contrast_lower, self.contrast_upper)) + return img + + def saturation(self, img: np.ndarray) -> np.ndarray: + """Saturation distortion. + + Args: + img (np.ndarray): The input image. + Returns: + np.ndarray: Image after saturation change. + """ + + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, 1] = self.convert( + img[:, :, 1], + alpha=random.uniform(self.saturation_lower, + self.saturation_upper)) + img = mmcv.hsv2bgr(img) + return img + + def hue(self, img: np.ndarray) -> np.ndarray: + """Hue distortion. + + Args: + img (np.ndarray): The input image. + Returns: + np.ndarray: Image after hue change. + """ + + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, + 0] = (img[:, :, 0].astype(int) + + random.randint(-self.hue_delta, self.hue_delta)) % 180 + img = mmcv.hsv2bgr(img) + return img + + def transform(self, results: dict) -> dict: + """Transform function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img = results['img'] + # random brightness + img = self.brightness(img) + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + img = self.contrast(img) + + # random saturation + img = self.saturation(img) + + # random hue + img = self.hue(img) + + # random contrast + if mode == 0: + img = self.contrast(img) + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(brightness_delta={self.brightness_delta}, ' + f'contrast_range=({self.contrast_lower}, ' + f'{self.contrast_upper}), ' + f'saturation_range=({self.saturation_lower}, ' + f'{self.saturation_upper}), ' + f'hue_delta={self.hue_delta})') + return repr_str + + +@TRANSFORMS.register_module() +class RandomCutOut(BaseTransform): + """CutOut operation. + + Randomly drop some regions of image used in + `Cutout `_. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - gt_seg_map + + Args: + prob (float): cutout probability. + n_holes (int | tuple[int, int]): Number of regions to be dropped. + If it is given as a list, number of holes will be randomly + selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. + cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate + shape of dropped regions. It can be `tuple[int, int]` to use a + fixed cutout shape, or `list[tuple[int, int]]` to randomly choose + shape from the list. + cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The + candidate ratio of dropped regions. It can be `tuple[float, float]` + to use a fixed ratio or `list[tuple[float, float]]` to randomly + choose ratio from the list. Please note that `cutout_shape` + and `cutout_ratio` cannot be both given at the same time. + fill_in (tuple[float, float, float] | tuple[int, int, int]): The value + of pixel to fill in the dropped regions. Default: (0, 0, 0). + seg_fill_in (int): The labels of pixel to fill in the dropped regions. + If seg_fill_in is None, skip. Default: None. + """ + + def __init__(self, + prob, + n_holes, + cutout_shape=None, + cutout_ratio=None, + fill_in=(0, 0, 0), + seg_fill_in=None): + + assert 0 <= prob and prob <= 1 + assert (cutout_shape is None) ^ (cutout_ratio is None), \ + 'Either cutout_shape or cutout_ratio should be specified.' + assert (isinstance(cutout_shape, (list, tuple)) + or isinstance(cutout_ratio, (list, tuple))) + if isinstance(n_holes, tuple): + assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] + else: + n_holes = (n_holes, n_holes) + if seg_fill_in is not None: + assert (isinstance(seg_fill_in, int) and 0 <= seg_fill_in + and seg_fill_in <= 255) + self.prob = prob + self.n_holes = n_holes + self.fill_in = fill_in + self.seg_fill_in = seg_fill_in + self.with_ratio = cutout_ratio is not None + self.candidates = cutout_ratio if self.with_ratio else cutout_shape + if not isinstance(self.candidates, list): + self.candidates = [self.candidates] + + @cache_randomness + def do_cutout(self): + return np.random.rand() < self.prob + + @cache_randomness + def generate_patches(self, results): + cutout = self.do_cutout() + + h, w, _ = results['img'].shape + if cutout: + n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) + else: + n_holes = 0 + x1_lst = [] + y1_lst = [] + index_lst = [] + for _ in range(n_holes): + x1_lst.append(np.random.randint(0, w)) + y1_lst.append(np.random.randint(0, h)) + index_lst.append(np.random.randint(0, len(self.candidates))) + return cutout, n_holes, x1_lst, y1_lst, index_lst + + def transform(self, results: dict) -> dict: + """Call function to drop some regions of image.""" + cutout, n_holes, x1_lst, y1_lst, index_lst = self.generate_patches( + results) + if cutout: + h, w, c = results['img'].shape + for i in range(n_holes): + x1 = x1_lst[i] + y1 = y1_lst[i] + index = index_lst[i] + if not self.with_ratio: + cutout_w, cutout_h = self.candidates[index] + else: + cutout_w = int(self.candidates[index][0] * w) + cutout_h = int(self.candidates[index][1] * h) + + x2 = np.clip(x1 + cutout_w, 0, w) + y2 = np.clip(y1 + cutout_h, 0, h) + results['img'][y1:y2, x1:x2, :] = self.fill_in + + if self.seg_fill_in is not None: + for key in results.get('seg_fields', []): + results[key][y1:y2, x1:x2] = self.seg_fill_in + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'n_holes={self.n_holes}, ' + repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio + else f'cutout_shape={self.candidates}, ') + repr_str += f'fill_in={self.fill_in}, ' + repr_str += f'seg_fill_in={self.seg_fill_in})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomMosaic(BaseTransform): + """Mosaic augmentation. Given 4 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | pad | + | +-----------+ | + | | | | + | | image1 |--------+ | + | | | | | + | | | image2 | | + center_y |----+-------------+-----------| + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + The mosaic transform steps are as follows: + 1. Choose the mosaic center as the intersections of 4 images + 2. Get the left top image according to the index, and randomly + sample another 3 images from the custom dataset. + 3. Sub image will be cropped if image is larger than mosaic patch + + Required Keys: + + - img + - gt_seg_map + - mix_results + + Modified Keys: + + - img + - img_shape + - ori_shape + - gt_seg_map + + Args: + prob (float): mosaic probability. + img_scale (Sequence[int]): Image size after mosaic pipeline of + a single image. The size of the output image is four times + that of a single image. The output image comprises 4 single images. + Default: (640, 640). + center_ratio_range (Sequence[float]): Center ratio range of mosaic + output. Default: (0.5, 1.5). + pad_val (int): Pad value. Default: 0. + seg_pad_val (int): Pad value of segmentation map. Default: 255. + """ + + def __init__(self, + prob, + img_scale=(640, 640), + center_ratio_range=(0.5, 1.5), + pad_val=0, + seg_pad_val=255): + assert 0 <= prob and prob <= 1 + assert isinstance(img_scale, tuple) + self.prob = prob + self.img_scale = img_scale + self.center_ratio_range = center_ratio_range + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + + @cache_randomness + def do_mosaic(self): + return np.random.rand() < self.prob + + def transform(self, results: dict) -> dict: + """Call function to make a mosaic of image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with mosaic transformed. + """ + mosaic = self.do_mosaic() + if mosaic: + results = self._mosaic_transform_img(results) + results = self._mosaic_transform_seg(results) + return results + + def get_indices(self, dataset: MultiImageMixDataset) -> list: + """Call function to collect indexes. + + Args: + dataset (:obj:`MultiImageMixDataset`): The dataset. + + Returns: + list: indexes. + """ + + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes + + @cache_randomness + def generate_mosaic_center(self): + # mosaic center x, y + center_x = int( + random.uniform(*self.center_ratio_range) * self.img_scale[1]) + center_y = int( + random.uniform(*self.center_ratio_range) * self.img_scale[0]) + return center_x, center_y + + def _mosaic_transform_img(self, results: dict) -> dict: + """Mosaic transform function. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), + self.pad_val, + dtype=results['img'].dtype) + + # mosaic center x, y + self.center_x, self.center_y = self.generate_mosaic_center() + center_position = (self.center_x, self.center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + result_patch = copy.deepcopy(results) + else: + result_patch = copy.deepcopy(results['mix_results'][i - 1]) + + img_i = result_patch['img'] + h_i, w_i = img_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(self.img_scale[0] / h_i, + self.img_scale[1] / w_i) + img_i = mmcv.imresize( + img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, img_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['ori_shape'] = mosaic_img.shape + + return results + + def _mosaic_transform_seg(self, results: dict) -> dict: + """Mosaic transform function for label annotations. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + for key in results.get('seg_fields', []): + mosaic_seg = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), + self.seg_pad_val, + dtype=results[key].dtype) + + # mosaic center x, y + center_position = (self.center_x, self.center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + result_patch = copy.deepcopy(results) + else: + result_patch = copy.deepcopy(results['mix_results'][i - 1]) + + gt_seg_i = result_patch[key] + h_i, w_i = gt_seg_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(self.img_scale[0] / h_i, + self.img_scale[1] / w_i) + gt_seg_i = mmcv.imresize( + gt_seg_i, + (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)), + interpolation='nearest') + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, gt_seg_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_seg[y1_p:y2_p, x1_p:x2_p] = gt_seg_i[y1_c:y2_c, + x1_c:x2_c] + + results[key] = mosaic_seg + + return results + + def _mosaic_combine(self, loc: str, center_position_xy: Sequence[float], + img_shape_wh: Sequence[int]) -> tuple: + """Calculate global coordinate of mosaic image and local coordinate of + cropped sub-image. + + Args: + loc (str): Index for the sub-image, loc in ('top_left', + 'top_right', 'bottom_left', 'bottom_right'). + center_position_xy (Sequence[float]): Mixing center for 4 images, + (x, y). + img_shape_wh (Sequence[int]): Width and height of sub-image + + Returns: + tuple[tuple[float]]: Corresponding coordinate of pasting and + cropping + - paste_coord (tuple): paste corner coordinate in mosaic image. + - crop_coord (tuple): crop corner coordinate in mosaic image. + """ + + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + if loc == 'top_left': + # index0 to top left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + center_position_xy[0], \ + center_position_xy[1] + crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( + y2 - y1), img_shape_wh[0], img_shape_wh[1] + + elif loc == 'top_right': + # index1 to top right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + center_position_xy[1] + crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( + img_shape_wh[0], x2 - x1), img_shape_wh[1] + + elif loc == 'bottom_left': + # index2 to bottom left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + center_position_xy[1], \ + center_position_xy[0], \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( + y2 - y1, img_shape_wh[1]) + + else: + # index3 to bottom right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + center_position_xy[1], \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = 0, 0, min(img_shape_wh[0], + x2 - x1), min(y2 - y1, img_shape_wh[1]) + + paste_coord = x1, y1, x2, y2 + return paste_coord, crop_coord + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'img_scale={self.img_scale}, ' + repr_str += f'center_ratio_range={self.center_ratio_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'seg_pad_val={self.pad_val})' + return repr_str + + +@TRANSFORMS.register_module() +class GenerateEdge(BaseTransform): + """Generate Edge for CE2P approach. + + Edge will be used to calculate loss of + `CE2P `_. + + Modified from https://github.com/liutinglt/CE2P/blob/master/dataset/target_generation.py # noqa:E501 + + Required Keys: + + - img_shape + - gt_seg_map + + Added Keys: + - gt_edge (np.ndarray, uint8): The edge annotation generated from the + seg map by extracting border between different semantics. + + Args: + edge_width (int): The width of edge. Default to 3. + ignore_index (int): Index that will be ignored. Default to 255. + """ + + def __init__(self, edge_width: int = 3, ignore_index: int = 255) -> None: + super().__init__() + self.edge_width = edge_width + self.ignore_index = ignore_index + + def transform(self, results: Dict) -> Dict: + """Call function to generate edge from segmentation map. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with edge mask. + """ + h, w = results['img_shape'] + edge = np.zeros((h, w), dtype=np.uint8) + seg_map = results['gt_seg_map'] + + # down + edge_down = edge[1:h, :] + edge_down[(seg_map[1:h, :] != seg_map[:h - 1, :]) + & (seg_map[1:h, :] != self.ignore_index) & + (seg_map[:h - 1, :] != self.ignore_index)] = 1 + # left + edge_left = edge[:, :w - 1] + edge_left[(seg_map[:, :w - 1] != seg_map[:, 1:w]) + & (seg_map[:, :w - 1] != self.ignore_index) & + (seg_map[:, 1:w] != self.ignore_index)] = 1 + # up_left + edge_upleft = edge[:h - 1, :w - 1] + edge_upleft[(seg_map[:h - 1, :w - 1] != seg_map[1:h, 1:w]) + & (seg_map[:h - 1, :w - 1] != self.ignore_index) & + (seg_map[1:h, 1:w] != self.ignore_index)] = 1 + # up_right + edge_upright = edge[:h - 1, 1:w] + edge_upright[(seg_map[:h - 1, 1:w] != seg_map[1:h, :w - 1]) + & (seg_map[:h - 1, 1:w] != self.ignore_index) & + (seg_map[1:h, :w - 1] != self.ignore_index)] = 1 + + kernel = cv2.getStructuringElement(cv2.MORPH_RECT, + (self.edge_width, self.edge_width)) + edge = cv2.dilate(edge, kernel) + + results['gt_edge'] = edge + results['edge_width'] = self.edge_width + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'edge_width={self.edge_width}, ' + repr_str += f'ignore_index={self.ignore_index})' + return repr_str + + +@TRANSFORMS.register_module() +class ResizeShortestEdge(BaseTransform): + """Resize the image and mask while keeping the aspect ratio unchanged. + + Modified from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/augmentation_impl.py#L130 # noqa:E501 + Copyright (c) Facebook, Inc. and its affiliates. + Licensed under the Apache-2.0 License + + This transform attempts to scale the shorter edge to the given + `scale`, as long as the longer edge does not exceed `max_size`. + If `max_size` is reached, then downscale so that the longer + edge does not exceed `max_size`. + + Required Keys: + + - img + - gt_seg_map (optional) + + Modified Keys: + + - img + - img_shape + - gt_seg_map (optional)) + + Added Keys: + + - scale + - scale_factor + - keep_ratio + + + Args: + scale (Union[int, Tuple[int, int]]): The target short edge length. + If it's tuple, will select the min value as the short edge length. + max_size (int): The maximum allowed longest edge length. + """ + + def __init__(self, scale: Union[int, Tuple[int, int]], + max_size: int) -> None: + super().__init__() + self.scale = scale + self.max_size = max_size + + # Create a empty Resize object + self.resize = TRANSFORMS.build({ + 'type': 'Resize', + 'scale': 0, + 'keep_ratio': True + }) + + def _get_output_shape(self, img, short_edge_length) -> Tuple[int, int]: + """Compute the target image shape with the given `short_edge_length`. + + Args: + img (np.ndarray): The input image. + short_edge_length (Union[int, Tuple[int, int]]): The target short + edge length. If it's tuple, will select the min value as the + short edge length. + """ + h, w = img.shape[:2] + if isinstance(short_edge_length, int): + size = short_edge_length * 1.0 + elif isinstance(short_edge_length, tuple): + size = min(short_edge_length) * 1.0 + scale = size / min(h, w) + if h < w: + new_h, new_w = size, scale * w + else: + new_h, new_w = scale * h, size + + if max(new_h, new_w) > self.max_size: + scale = self.max_size * 1.0 / max(new_h, new_w) + new_h *= scale + new_w *= scale + + new_h = int(new_h + 0.5) + new_w = int(new_w + 0.5) + return (new_w, new_h) + + def transform(self, results: Dict) -> Dict: + self.resize.scale = self._get_output_shape(results['img'], self.scale) + return self.resize(results) diff --git a/mmseg/datasets/voc.py b/mmseg/datasets/voc.py index a8855203b1..66f2230788 100644 --- a/mmseg/datasets/voc.py +++ b/mmseg/datasets/voc.py @@ -1,29 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from .builder import DATASETS -from .custom import CustomDataset +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset @DATASETS.register_module() -class PascalVOCDataset(CustomDataset): +class PascalVOCDataset(BaseSegDataset): """Pascal VOC dataset. Args: split (str): Split txt file for Pascal VOC. """ + METAINFO = dict( + classes=('background', 'aeroplane', 'bicycle', 'bird', 'boat', + 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', + 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', + 'sofa', 'train', 'tvmonitor'), + palette=[[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], + [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], + [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], + [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], + [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], + [0, 64, 128]]) - CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', - 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', - 'train', 'tvmonitor') - - PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - def __init__(self, split, **kwargs): - super(PascalVOCDataset, self).__init__( - img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) - assert osp.exists(self.img_dir) and self.split is not None + def __init__(self, + ann_file, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, + seg_map_suffix=seg_map_suffix, + ann_file=ann_file, + **kwargs) + assert self.file_client.exists( + self.data_prefix['img_path']) and osp.isfile(self.ann_file) diff --git a/mmseg/engine/__init__.py b/mmseg/engine/__init__.py new file mode 100644 index 0000000000..ada4057012 --- /dev/null +++ b/mmseg/engine/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import SegVisualizationHook +from .optimizers import (LayerDecayOptimizerConstructor, + LearningRateDecayOptimizerConstructor) + +__all__ = [ + 'LearningRateDecayOptimizerConstructor', 'LayerDecayOptimizerConstructor', + 'SegVisualizationHook' +] diff --git a/mmseg/engine/hooks/__init__.py b/mmseg/engine/hooks/__init__.py new file mode 100644 index 0000000000..c6048088a7 --- /dev/null +++ b/mmseg/engine/hooks/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .visualization_hook import SegVisualizationHook + +__all__ = ['SegVisualizationHook'] diff --git a/mmseg/engine/hooks/visualization_hook.py b/mmseg/engine/hooks/visualization_hook.py new file mode 100644 index 0000000000..5388a659a8 --- /dev/null +++ b/mmseg/engine/hooks/visualization_hook.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from typing import Sequence + +import mmcv +from mmengine.fileio import FileClient +from mmengine.hooks import Hook +from mmengine.runner import Runner + +from mmseg.registry import HOOKS +from mmseg.structures import SegDataSample +from mmseg.visualization import SegLocalVisualizer + + +@HOOKS.register_module() +class SegVisualizationHook(Hook): + """Segmentation Visualization Hook. Used to visualize validation and + testing process prediction results. + + In the testing phase: + + 1. If ``show`` is True, it means that only the prediction results are + visualized without storing data, so ``vis_backends`` needs to + be excluded. + + Args: + draw (bool): whether to draw prediction results. If it is False, + it means that no drawing will be done. Defaults to False. + interval (int): The interval of visualization. Defaults to 50. + show (bool): Whether to display the drawn image. Default to False. + wait_time (float): The interval of show (s). Defaults to 0. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmengine.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + draw: bool = False, + interval: int = 50, + show: bool = False, + wait_time: float = 0., + file_client_args: dict = dict(backend='disk')): + self._visualizer: SegLocalVisualizer = \ + SegLocalVisualizer.get_current_instance() + self.interval = interval + self.show = show + if self.show: + # No need to think about vis backends. + self._visualizer._vis_backends = {} + warnings.warn('The show is True, it means that only ' + 'the prediction results are visualized ' + 'without storing data, so vis_backends ' + 'needs to be excluded.') + + self.wait_time = wait_time + self.file_client_args = file_client_args.copy() + self.file_client = None + self.draw = draw + if not self.draw: + warnings.warn('The draw is False, it means that the ' + 'hook for visualization will not take ' + 'effect. The results will NOT be ' + 'visualized or stored.') + + def _after_iter(self, + runner: Runner, + batch_idx: int, + data_batch: dict, + outputs: Sequence[SegDataSample], + mode: str = 'val') -> None: + """Run after every ``self.interval`` validation iterations. + + Args: + runner (:obj:`Runner`): The runner of the validation process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`SegDataSample`]): Outputs from model. + mode (str): mode (str): Current mode of runner. Defaults to 'val'. + """ + if self.draw is False or mode == 'train': + return + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + + if self.every_n_inner_iters(batch_idx, self.interval): + for output in outputs: + img_path = output.img_path + img_bytes = self.file_client.get(img_path) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + window_name = f'{mode}_{osp.basename(img_path)}' + + self._visualizer.add_datasample( + window_name, + img, + data_sample=output, + show=self.show, + wait_time=self.wait_time, + step=runner.iter) diff --git a/mmseg/engine/optimizers/__init__.py b/mmseg/engine/optimizers/__init__.py new file mode 100644 index 0000000000..4fbf4ecfcd --- /dev/null +++ b/mmseg/engine/optimizers/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .layer_decay_optimizer_constructor import ( + LayerDecayOptimizerConstructor, LearningRateDecayOptimizerConstructor) + +__all__ = [ + 'LearningRateDecayOptimizerConstructor', 'LayerDecayOptimizerConstructor' +] diff --git a/mmseg/engine/optimizers/layer_decay_optimizer_constructor.py b/mmseg/engine/optimizers/layer_decay_optimizer_constructor.py new file mode 100644 index 0000000000..fdae3ca698 --- /dev/null +++ b/mmseg/engine/optimizers/layer_decay_optimizer_constructor.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import warnings + +from mmengine.dist import get_dist_info +from mmengine.logging import print_log +from mmengine.optim import DefaultOptimWrapperConstructor + +from mmseg.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +def get_layer_id_for_convnext(var_name, max_layer_id): + """Get the layer id to set the different learning rates in ``layer_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_layer_id (int): Maximum number of backbone layers. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + stage_id = int(var_name.split('.')[2]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + block_id = int(var_name.split('.')[3]) + if stage_id == 0: + layer_id = 1 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + else: + return max_layer_id + 1 + + +def get_stage_id_for_convnext(var_name, max_stage_id): + """Get the stage id to set the different learning rates in ``stage_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_stage_id (int): Maximum number of backbone layers. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + return 0 + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + return stage_id + 1 + else: + return max_stage_id - 1 + + +def get_layer_id_for_vit(var_name, max_layer_id): + """Get the layer id to set the different learning rates. + + Args: + var_name (str): The key of the model. + num_max_layer (int): Maximum number of backbone layers. + + Returns: + int: Returns the layer id of the key. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.patch_embed'): + return 0 + elif var_name.startswith('backbone.layers'): + layer_id = int(var_name.split('.')[2]) + return layer_id + 1 + else: + return max_layer_id - 1 + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class LearningRateDecayOptimizerConstructor(DefaultOptimWrapperConstructor): + """Different learning rates are set for different layers of backbone. + + Note: Currently, this optimizer constructor is built for ConvNeXt, + BEiT and MAE. + """ + + def add_params(self, params, module, **kwargs): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + """ + + parameter_groups = {} + print_log(f'self.paramwise_cfg is {self.paramwise_cfg}') + num_layers = self.paramwise_cfg.get('num_layers') + 2 + decay_rate = self.paramwise_cfg.get('decay_rate') + decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') + print_log('Build LearningRateDecayOptimizerConstructor ' + f'{decay_type} {decay_rate} - {num_layers}') + weight_decay = self.base_wd + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or name in ( + 'pos_embed', 'cls_token'): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + if 'layer_wise' in decay_type: + if 'ConvNeXt' in module.backbone.__class__.__name__: + layer_id = get_layer_id_for_convnext( + name, self.paramwise_cfg.get('num_layers')) + print_log(f'set param {name} as id {layer_id}') + elif 'BEiT' in module.backbone.__class__.__name__ or \ + 'MAE' in module.backbone.__class__.__name__: + layer_id = get_layer_id_for_vit(name, num_layers) + print_log(f'set param {name} as id {layer_id}') + else: + raise NotImplementedError() + elif decay_type == 'stage_wise': + if 'ConvNeXt' in module.backbone.__class__.__name__: + layer_id = get_stage_id_for_convnext(name, num_layers) + print_log(f'set param {name} as id {layer_id}') + else: + raise NotImplementedError() + group_name = f'layer_{layer_id}_{group_name}' + + if group_name not in parameter_groups: + scale = decay_rate**(num_layers - layer_id - 1) + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + print_log(f'Param groups = {json.dumps(to_display, indent=2)}') + params.extend(parameter_groups.values()) + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class LayerDecayOptimizerConstructor(LearningRateDecayOptimizerConstructor): + """Different learning rates are set for different layers of backbone. + + Note: Currently, this optimizer constructor is built for BEiT, + and it will be deprecated. + Please use ``LearningRateDecayOptimizerConstructor`` instead. + """ + + def __init__(self, optim_wrapper_cfg, paramwise_cfg): + warnings.warn('DeprecationWarning: Original ' + 'LayerDecayOptimizerConstructor of BEiT ' + 'will be deprecated. Please use ' + 'LearningRateDecayOptimizerConstructor instead, ' + 'and set decay_type = layer_wise_vit in paramwise_cfg.') + paramwise_cfg.update({'decay_type': 'layer_wise_vit'}) + warnings.warn('DeprecationWarning: Layer_decay_rate will ' + 'be deleted, please use decay_rate instead.') + paramwise_cfg['decay_rate'] = paramwise_cfg.pop('layer_decay_rate') + super().__init__(optim_wrapper_cfg, paramwise_cfg) diff --git a/mmseg/evaluation/__init__.py b/mmseg/evaluation/__init__.py new file mode 100644 index 0000000000..c28bb75cb4 --- /dev/null +++ b/mmseg/evaluation/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .metrics import CitysMetric, IoUMetric + +__all__ = ['IoUMetric', 'CitysMetric'] diff --git a/mmseg/evaluation/metrics/__init__.py b/mmseg/evaluation/metrics/__init__.py new file mode 100644 index 0000000000..aec08bb071 --- /dev/null +++ b/mmseg/evaluation/metrics/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .citys_metric import CitysMetric +from .iou_metric import IoUMetric + +__all__ = ['IoUMetric', 'CitysMetric'] diff --git a/mmseg/evaluation/metrics/citys_metric.py b/mmseg/evaluation/metrics/citys_metric.py new file mode 100644 index 0000000000..50e9ea68a0 --- /dev/null +++ b/mmseg/evaluation/metrics/citys_metric.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Dict, List, Optional, Sequence + +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger, print_log +from mmengine.utils import mkdir_or_exist, scandir +from PIL import Image + +from mmseg.registry import METRICS + + +@METRICS.register_module() +class CitysMetric(BaseMetric): + """Cityscapes evaluation metric. + + Args: + ignore_index (int): Index that will be ignored in evaluation. + Default: 255. + citys_metrics (list[str] | str): Metrics to be evaluated, + Default: ['cityscapes']. + to_label_id (bool): whether convert output to label_id for + submission. Default: True. + suffix (str): The filename prefix of the png files. + If the prefix is "somepath/xxx", the png files will be + named "somepath/xxx.png". Default: '.format_cityscapes'. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + """ + + def __init__(self, + ignore_index: int = 255, + citys_metrics: List[str] = ['cityscapes'], + to_label_id: bool = True, + suffix: str = '.format_cityscapes', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + + self.ignore_index = ignore_index + self.metrics = citys_metrics + assert self.metrics[0] == 'cityscapes' + self.to_label_id = to_label_id + self.suffix = suffix + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data and data_samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + mkdir_or_exist(self.suffix) + + for data_sample in data_samples: + pred_label = data_sample['pred_sem_seg']['data'][0].cpu().numpy() + # results2img + if self.to_label_id: + pred_label = self._convert_to_label_id(pred_label) + basename = osp.splitext(osp.basename(data_sample['img_path']))[0] + png_filename = osp.join(self.suffix, f'{basename}.png') + output = Image.fromarray(pred_label.astype(np.uint8)).convert('P') + import cityscapesscripts.helpers.labels as CSLabels + palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) + for label_id, label in CSLabels.id2label.items(): + palette[label_id] = label.color + output.putpalette(palette) + output.save(png_filename) + + ann_dir = osp.join(data_samples[0]['seg_map_path'].split('val')[0], + 'val') + self.results.append(ann_dir) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): Testing results of the dataset. + + Returns: + dict[str: float]: Cityscapes evaluation results. + """ + logger: MMLogger = MMLogger.get_current_instance() + try: + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa + except ImportError: + raise ImportError('Please run "pip install cityscapesscripts" to ' + 'install cityscapesscripts first.') + msg = 'Evaluating in Cityscapes style' + + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + result_dir = self.suffix + + eval_results = dict() + print_log(f'Evaluating results under {result_dir} ...', logger=logger) + + CSEval.args.evalInstLevelScore = True + CSEval.args.predictionPath = osp.abspath(result_dir) + CSEval.args.evalPixelAccuracy = True + CSEval.args.JSONOutput = False + + seg_map_list = [] + pred_list = [] + ann_dir = results[0] + # when evaluating with official cityscapesscripts, + # **_gtFine_labelIds.png is used + for seg_map in scandir(ann_dir, 'gtFine_labelIds.png', recursive=True): + seg_map_list.append(osp.join(ann_dir, seg_map)) + pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) + metric = dict() + eval_results.update( + CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) + metric['averageScoreCategories'] = eval_results[ + 'averageScoreCategories'] + metric['averageScoreInstCategories'] = eval_results[ + 'averageScoreInstCategories'] + return metric + + @staticmethod + def _convert_to_label_id(result): + """Convert trainId to id for cityscapes.""" + if isinstance(result, str): + result = np.load(result) + import cityscapesscripts.helpers.labels as CSLabels + result_copy = result.copy() + for trainId, label in CSLabels.trainId2label.items(): + result_copy[result == trainId] = label.id + + return result_copy diff --git a/mmseg/evaluation/metrics/iou_metric.py b/mmseg/evaluation/metrics/iou_metric.py new file mode 100644 index 0000000000..a152ef9dd6 --- /dev/null +++ b/mmseg/evaluation/metrics/iou_metric.py @@ -0,0 +1,250 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import Dict, List, Optional, Sequence + +import numpy as np +import torch +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger, print_log +from prettytable import PrettyTable + +from mmseg.registry import METRICS + + +@METRICS.register_module() +class IoUMetric(BaseMetric): + """IoU evaluation metric. + + Args: + ignore_index (int): Index that will be ignored in evaluation. + Default: 255. + iou_metrics (list[str] | str): Metrics to be calculated, the options + includes 'mIoU', 'mDice' and 'mFscore'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + beta (int): Determines the weight of recall in the combined score. + Default: 1. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + """ + + def __init__(self, + ignore_index: int = 255, + iou_metrics: List[str] = ['mIoU'], + nan_to_num: Optional[int] = None, + beta: int = 1, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + + self.ignore_index = ignore_index + self.metrics = iou_metrics + self.nan_to_num = nan_to_num + self.beta = beta + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data and data_samples. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + num_classes = len(self.dataset_meta['classes']) + for data_sample in data_samples: + pred_label = data_sample['pred_sem_seg']['data'].squeeze() + label = data_sample['gt_sem_seg']['data'].squeeze().to(pred_label) + self.results.append( + self.intersect_and_union(pred_label, label, num_classes, + self.ignore_index)) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. The key + mainly includes aAcc, mIoU, mAcc, mDice, mFscore, mPrecision, + mRecall. + """ + logger: MMLogger = MMLogger.get_current_instance() + + # convert list of tuples to tuple of lists, e.g. + # [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to + # ([A_1, ..., A_n], ..., [D_1, ..., D_n]) + results = tuple(zip(*results)) + assert len(results) == 4 + + total_area_intersect = sum(results[0]) + total_area_union = sum(results[1]) + total_area_pred_label = sum(results[2]) + total_area_label = sum(results[3]) + ret_metrics = self.total_area_to_metrics( + total_area_intersect, total_area_union, total_area_pred_label, + total_area_label, self.metrics, self.nan_to_num, self.beta) + + class_names = self.dataset_meta['classes'] + + # summary table + ret_metrics_summary = OrderedDict({ + ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + metrics = dict() + for key, val in ret_metrics_summary.items(): + if key == 'aAcc': + metrics[key] = val + else: + metrics['m' + key] = val + + # each class table + ret_metrics.pop('aAcc', None) + ret_metrics_class = OrderedDict({ + ret_metric: np.round(ret_metric_value * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + ret_metrics_class.update({'Class': class_names}) + ret_metrics_class.move_to_end('Class', last=False) + class_table_data = PrettyTable() + for key, val in ret_metrics_class.items(): + class_table_data.add_column(key, val) + + print_log('per class results:', logger) + print_log('\n' + class_table_data.get_string(), logger=logger) + + return metrics + + @staticmethod + def intersect_and_union(pred_label: torch.tensor, label: torch.tensor, + num_classes: int, ignore_index: int): + """Calculate Intersection and Union. + + Args: + pred_label (torch.tensor): Prediction segmentation map + or predict result filename. The shape is (H, W). + label (torch.tensor): Ground truth segmentation map + or label filename. The shape is (H, W). + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + + Returns: + torch.Tensor: The intersection of prediction and ground truth + histogram on all classes. + torch.Tensor: The union of prediction and ground truth histogram on + all classes. + torch.Tensor: The prediction histogram on all classes. + torch.Tensor: The ground truth histogram on all classes. + """ + + mask = (label != ignore_index) + pred_label = pred_label[mask] + label = label[mask] + + intersect = pred_label[pred_label == label] + area_intersect = torch.histc( + intersect.float(), bins=(num_classes), min=0, + max=num_classes - 1).cpu() + area_pred_label = torch.histc( + pred_label.float(), bins=(num_classes), min=0, + max=num_classes - 1).cpu() + area_label = torch.histc( + label.float(), bins=(num_classes), min=0, + max=num_classes - 1).cpu() + area_union = area_pred_label + area_label - area_intersect + return area_intersect, area_union, area_pred_label, area_label + + @staticmethod + def total_area_to_metrics(total_area_intersect: np.ndarray, + total_area_union: np.ndarray, + total_area_pred_label: np.ndarray, + total_area_label: np.ndarray, + metrics: List[str] = ['mIoU'], + nan_to_num: Optional[int] = None, + beta: int = 1): + """Calculate evaluation metrics + Args: + total_area_intersect (np.ndarray): The intersection of prediction + and ground truth histogram on all classes. + total_area_union (np.ndarray): The union of prediction and ground + truth histogram on all classes. + total_area_pred_label (np.ndarray): The prediction histogram on + all classes. + total_area_label (np.ndarray): The ground truth histogram on + all classes. + metrics (List[str] | str): Metrics to be evaluated, 'mIoU' and + 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be + replaced by the numbers defined by the user. Default: None. + beta (int): Determines the weight of recall in the combined score. + Default: 1. + Returns: + Dict[str, np.ndarray]: per category evaluation metrics, + shape (num_classes, ). + """ + + def f_score(precision, recall, beta=1): + """calculate the f-score value. + + Args: + precision (float | torch.Tensor): The precision value. + recall (float | torch.Tensor): The recall value. + beta (int): Determines the weight of recall in the combined + score. Default: 1. + + Returns: + [torch.tensor]: The f-score value. + """ + score = (1 + beta**2) * (precision * recall) / ( + (beta**2 * precision) + recall) + return score + + if isinstance(metrics, str): + metrics = [metrics] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metrics).issubset(set(allowed_metrics)): + raise KeyError(f'metrics {metrics} is not supported') + + all_acc = total_area_intersect.sum() / total_area_label.sum() + ret_metrics = OrderedDict({'aAcc': all_acc}) + for metric in metrics: + if metric == 'mIoU': + iou = total_area_intersect / total_area_union + acc = total_area_intersect / total_area_label + ret_metrics['IoU'] = iou + ret_metrics['Acc'] = acc + elif metric == 'mDice': + dice = 2 * total_area_intersect / ( + total_area_pred_label + total_area_label) + acc = total_area_intersect / total_area_label + ret_metrics['Dice'] = dice + ret_metrics['Acc'] = acc + elif metric == 'mFscore': + precision = total_area_intersect / total_area_pred_label + recall = total_area_intersect / total_area_label + f_value = torch.tensor([ + f_score(x[0], x[1], beta) for x in zip(precision, recall) + ]) + ret_metrics['Fscore'] = f_value + ret_metrics['Precision'] = precision + ret_metrics['Recall'] = recall + + ret_metrics = { + metric: value.numpy() + for metric, value in ret_metrics.items() + } + if nan_to_num is not None: + ret_metrics = OrderedDict({ + metric: np.nan_to_num(metric_value, nan=nan_to_num) + for metric, metric_value in ret_metrics.items() + }) + return ret_metrics diff --git a/mmseg/models/__init__.py b/mmseg/models/__init__.py index d492a2324f..7a520fb2fa 100644 --- a/mmseg/models/__init__.py +++ b/mmseg/models/__init__.py @@ -1,11 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, build_head, build_loss, build_segmentor) +from .data_preprocessor import SegDataPreProcessor from .decode_heads import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 from .segmentors import * # noqa: F401,F403 __all__ = [ 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', - 'build_head', 'build_loss', 'build_segmentor' + 'build_head', 'build_loss', 'build_segmentor', 'SegDataPreProcessor' ] diff --git a/mmseg/models/backbones/__init__.py b/mmseg/models/backbones/__init__.py index 367b398ce8..bda42bb692 100644 --- a/mmseg/models/backbones/__init__.py +++ b/mmseg/models/backbones/__init__.py @@ -1,5 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .beit import BEiT +from .bisenetv1 import BiSeNetV1 +from .bisenetv2 import BiSeNetV2 +from .cgnet import CGNet +from .erfnet import ERFNet +from .fast_scnn import FastSCNN from .hrnet import HRNet +from .icnet import ICNet +from .mae import MAE +from .mit import MixVisionTransformer +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .resnest import ResNeSt from .resnet import ResNet, ResNetV1c, ResNetV1d from .resnext import ResNeXt +from .stdc import STDCContextPathNet, STDCNet +from .swin import SwinTransformer +from .timm_backbone import TIMMBackbone +from .twins import PCPVT, SVT +from .unet import UNet +from .vit import VisionTransformer -__all__ = ['ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet'] +__all__ = [ + 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN', + 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', + 'VisionTransformer', 'SwinTransformer', 'MixVisionTransformer', + 'BiSeNetV1', 'BiSeNetV2', 'ICNet', 'TIMMBackbone', 'ERFNet', 'PCPVT', + 'SVT', 'STDCNet', 'STDCContextPathNet', 'BEiT', 'MAE' +] diff --git a/mmseg/models/backbones/beit.py b/mmseg/models/backbones/beit.py new file mode 100644 index 0000000000..e5da71e729 --- /dev/null +++ b/mmseg/models/backbones/beit.py @@ -0,0 +1,554 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import (constant_init, kaiming_init, + trunc_normal_) +from mmengine.runner.checkpoint import _load_checkpoint +from scipy import interpolate +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.utils import _pair as to_2tuple + +from mmseg.registry import MODELS +from ..utils import PatchEmbed +from .vit import TransformerEncoderLayer as VisionTransformerEncoderLayer + + +class BEiTAttention(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + bias (bool): The option to add leanable bias for q, k, v. If bias is + True, it will add leanable bias. If bias is 'qv_bias', it will only + add leanable bias for q, v. If bias is False, it will not add bias + for q, k, v. Default to 'qv_bias'. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + bias='qv_bias', + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None, + **kwargs): + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.bias = bias + self.scale = qk_scale or head_embed_dims**-0.5 + + qkv_bias = bias + if bias == 'qv_bias': + self._init_qv_bias() + qkv_bias = False + + self.window_size = window_size + self._init_rel_pos_embedding() + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + def _init_qv_bias(self): + self.q_bias = nn.Parameter(torch.zeros(self.embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(self.embed_dims)) + + def _init_rel_pos_embedding(self): + Wh, Ww = self.window_size + # cls to token & token 2 cls & cls to cls + self.num_relative_distance = (2 * Wh - 1) * (2 * Ww - 1) + 3 + # relative_position_bias_table shape is (2*Wh-1 * 2*Ww-1 + 3, nH) + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, self.num_heads)) + + # get pair-wise relative position index for + # each token inside the window + coords_h = torch.arange(Wh) + coords_w = torch.arange(Ww) + # coords shape is (2, Wh, Ww) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) + # coords_flatten shape is (2, Wh*Ww) + coords_flatten = torch.flatten(coords, 1) + relative_coords = ( + coords_flatten[:, :, None] - coords_flatten[:, None, :]) + # relative_coords shape is (Wh*Ww, Wh*Ww, 2) + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + # shift to start from 0 + relative_coords[:, :, 0] += Wh - 1 + relative_coords[:, :, 1] += Ww - 1 + relative_coords[:, :, 0] *= 2 * Ww - 1 + relative_position_index = torch.zeros( + size=(Wh * Ww + 1, ) * 2, dtype=relative_coords.dtype) + # relative_position_index shape is (Wh*Ww, Wh*Ww) + relative_position_index[1:, 1:] = relative_coords.sum(-1) + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer('relative_position_index', + relative_position_index) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x): + """ + Args: + x (tensor): input features with shape of (num_windows*B, N, C). + """ + B, N, C = x.shape + + if self.bias == 'qv_bias': + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) + qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + else: + qkv = self.qkv(x) + + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + if self.relative_position_bias_table is not None: + Wh = self.window_size[0] + Ww = self.window_size[1] + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + Wh * Ww + 1, Wh * Ww + 1, -1) + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class BEiTTransformerEncoderLayer(VisionTransformerEncoderLayer): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + bias (bool): The option to add leanable bias for q, k, v. If bias is + True, it will add leanable bias. If bias is 'qv_bias', it will only + add leanable bias for q, v. If bias is False, it will not add bias + for q, k, v. Default to 'qv_bias'. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (tuple[int], optional): The height and width of the window. + Default: None. + init_values (float, optional): Initialize the values of BEiTAttention + and FFN with learnable scaling. Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + bias='qv_bias', + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=None, + attn_cfg=dict(), + ffn_cfg=dict(add_identity=False), + init_values=None): + attn_cfg.update(dict(window_size=window_size, qk_scale=None)) + + super().__init__( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + attn_drop_rate=attn_drop_rate, + drop_path_rate=0., + drop_rate=0., + num_fcs=num_fcs, + qkv_bias=bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + attn_cfg=attn_cfg, + ffn_cfg=ffn_cfg) + + # NOTE: drop path for stochastic depth, we shall see if + # this is better than dropout here + dropout_layer = dict(type='DropPath', drop_prob=drop_path_rate) + self.drop_path = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + self.gamma_1 = nn.Parameter( + init_values * torch.ones(embed_dims), requires_grad=True) + self.gamma_2 = nn.Parameter( + init_values * torch.ones(embed_dims), requires_grad=True) + + def build_attn(self, attn_cfg): + self.attn = BEiTAttention(**attn_cfg) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.ffn(self.norm2(x))) + return x + + +@MODELS.register_module() +class BEiT(BaseModule): + """BERT Pre-Training of Image Transformers. + + Args: + img_size (int | tuple): Input image size. Default: 224. + patch_size (int): The patch size. Default: 16. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 768. + num_layers (int): Depth of transformer. Default: 12. + num_heads (int): Number of attention heads. Default: 12. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + qv_bias (bool): Enable bias for qv if True. Default: True. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + patch_norm (bool): Whether to add a norm in PatchEmbed Block. + Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + pretrained (str, optional): Model pretrained path. Default: None. + init_values (float): Initialize the values of BEiTAttention and FFN + with learnable scaling. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=-1, + qv_bias=True, + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_norm=False, + final_norm=False, + num_fcs=2, + norm_eval=False, + pretrained=None, + init_values=0.1, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.in_channels = in_channels + self.img_size = img_size + self.patch_size = patch_size + self.norm_eval = norm_eval + self.pretrained = pretrained + self.num_layers = num_layers + self.embed_dims = embed_dims + self.num_heads = num_heads + self.mlp_ratio = mlp_ratio + self.attn_drop_rate = attn_drop_rate + self.drop_path_rate = drop_path_rate + self.num_fcs = num_fcs + self.qv_bias = qv_bias + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.patch_norm = patch_norm + self.init_values = init_values + self.window_size = (img_size[0] // patch_size, + img_size[1] // patch_size) + self.patch_shape = self.window_size + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + self._build_patch_embedding() + self._build_layers() + + if isinstance(out_indices, int): + if out_indices == -1: + out_indices = num_layers - 1 + self.out_indices = [out_indices] + elif isinstance(out_indices, list) or isinstance(out_indices, tuple): + self.out_indices = out_indices + else: + raise TypeError('out_indices must be type of int, list or tuple') + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + def _build_patch_embedding(self): + """Build patch embedding layer.""" + self.patch_embed = PatchEmbed( + in_channels=self.in_channels, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=self.patch_size, + stride=self.patch_size, + padding=0, + norm_cfg=self.norm_cfg if self.patch_norm else None, + init_cfg=None) + + def _build_layers(self): + """Build transformer encoding layers.""" + + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, self.num_layers) + ] + self.layers = ModuleList() + for i in range(self.num_layers): + self.layers.append( + BEiTTransformerEncoderLayer( + embed_dims=self.embed_dims, + num_heads=self.num_heads, + feedforward_channels=self.mlp_ratio * self.embed_dims, + attn_drop_rate=self.attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=self.num_fcs, + bias='qv_bias' if self.qv_bias else False, + act_cfg=self.act_cfg, + norm_cfg=self.norm_cfg, + window_size=self.window_size, + init_values=self.init_values)) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _geometric_sequence_interpolation(self, src_size, dst_size, sequence, + num): + """Get new sequence via geometric sequence interpolation. + + Args: + src_size (int): Pos_embedding size in pre-trained model. + dst_size (int): Pos_embedding size in the current model. + sequence (tensor): The relative position bias of the pretrain + model after removing the extra tokens. + num (int): Number of attention heads. + Returns: + new_sequence (tensor): Geometric sequence interpolate the + pre-trained relative position bias to the size of + the current model. + """ + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + # Here is a binary function. + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + # The position of each interpolated point is determined + # by the ratio obtained by dichotomy. + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q**(i + 1) + r_ids = [-_ for _ in reversed(dis)] + x = r_ids + [0] + dis + y = r_ids + [0] + dis + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + # Interpolation functions are being executed and called. + new_sequence = [] + for i in range(num): + z = sequence[:, i].view(src_size, src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind='cubic') + new_sequence.append( + torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(sequence)) + new_sequence = torch.cat(new_sequence, dim=-1) + return new_sequence + + def resize_rel_pos_embed(self, checkpoint): + """Resize relative pos_embed weights. + + This function is modified from + https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_custom/checkpoint.py. # noqa: E501 + Copyright (c) Microsoft Corporation + Licensed under the MIT License + Args: + checkpoint (dict): Key and value of the pretrain model. + Returns: + state_dict (dict): Interpolate the relative pos_embed weights + in the pre-train model to the current model size. + """ + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + all_keys = list(state_dict.keys()) + for key in all_keys: + if 'relative_position_index' in key: + state_dict.pop(key) + # In order to keep the center of pos_bias as consistent as + # possible after interpolation, and vice versa in the edge + # area, the geometric sequence interpolation method is adopted. + if 'relative_position_bias_table' in key: + rel_pos_bias = state_dict[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = self.state_dict()[key].size() + dst_patch_shape = self.patch_shape + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + # Count the number of extra tokens. + num_extra_tokens = dst_num_pos - ( + dst_patch_shape[0] * 2 - 1) * ( + dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens)**0.5) + dst_size = int((dst_num_pos - num_extra_tokens)**0.5) + if src_size != dst_size: + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + new_rel_pos_bias = self._geometric_sequence_interpolation( + src_size, dst_size, rel_pos_bias, num_attn_heads) + new_rel_pos_bias = torch.cat( + (new_rel_pos_bias, extra_tokens), dim=0) + state_dict[key] = new_rel_pos_bias + + return state_dict + + def init_weights(self): + + def _init_weights(m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + self.apply(_init_weights) + + if (isinstance(self.init_cfg, dict) + and self.init_cfg.get('type') == 'Pretrained'): + checkpoint = _load_checkpoint( + self.init_cfg['checkpoint'], logger=None, map_location='cpu') + state_dict = self.resize_rel_pos_embed(checkpoint) + self.load_state_dict(state_dict, False) + elif self.init_cfg is not None: + super().init_weights() + else: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + # Copyright 2019 Ross Wightman + # Licensed under the Apache License, Version 2.0 (the "License") + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'ffn' in n: + nn.init.normal_(m.bias, mean=0., std=1e-6) + else: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + + def forward(self, inputs): + B = inputs.shape[0] + + x, hw_shape = self.patch_embed(inputs) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + if self.final_norm: + x = self.norm1(x) + if i in self.out_indices: + # Remove class token and reshape token for decoder head + out = x[:, 1:] + B, _, C = out.shape + out = out.reshape(B, hw_shape[0], hw_shape[1], + C).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.LayerNorm): + m.eval() diff --git a/mmseg/models/backbones/bisenetv1.py b/mmseg/models/backbones/bisenetv1.py new file mode 100644 index 0000000000..ca58bf9c59 --- /dev/null +++ b/mmseg/models/backbones/bisenetv1.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +class SpatialPath(BaseModule): + """Spatial Path to preserve the spatial size of the original input image + and encode affluent spatial information. + + Args: + in_channels(int): The number of channels of input + image. Default: 3. + num_channels (Tuple[int]): The number of channels of + each layers in Spatial Path. + Default: (64, 64, 64, 128). + Returns: + x (torch.Tensor): Feature map for Feature Fusion Module. + """ + + def __init__(self, + in_channels=3, + num_channels=(64, 64, 64, 128), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert len(num_channels) == 4, 'Length of input channels \ + of Spatial Path must be 4!' + + self.layers = [] + for i in range(len(num_channels)): + layer_name = f'layer{i + 1}' + self.layers.append(layer_name) + if i == 0: + self.add_module( + layer_name, + ConvModule( + in_channels=in_channels, + out_channels=num_channels[i], + kernel_size=7, + stride=2, + padding=3, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + elif i == len(num_channels) - 1: + self.add_module( + layer_name, + ConvModule( + in_channels=num_channels[i - 1], + out_channels=num_channels[i], + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + else: + self.add_module( + layer_name, + ConvModule( + in_channels=num_channels[i - 1], + out_channels=num_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + for i, layer_name in enumerate(self.layers): + layer_stage = getattr(self, layer_name) + x = layer_stage(x) + return x + + +class AttentionRefinementModule(BaseModule): + """Attention Refinement Module (ARM) to refine the features of each stage. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + Returns: + x_out (torch.Tensor): Feature map of Attention Refinement Module. + """ + + def __init__(self, + in_channels, + out_channel, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.conv_layer = ConvModule( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.atten_conv_layer = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + in_channels=out_channel, + out_channels=out_channel, + kernel_size=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), nn.Sigmoid()) + + def forward(self, x): + x = self.conv_layer(x) + x_atten = self.atten_conv_layer(x) + x_out = x * x_atten + return x_out + + +class ContextPath(BaseModule): + """Context Path to provide sufficient receptive field. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + context_channels (Tuple[int]): The number of channel numbers + of various modules in Context Path. + Default: (128, 256, 512). + align_corners (bool, optional): The align_corners argument of + resize operation. Default: False. + Returns: + x_16_up, x_32_up (torch.Tensor, torch.Tensor): Two feature maps + undergoing upsampling from 1/16 and 1/32 downsampling + feature maps. These two feature maps are used for Feature + Fusion Module and Auxiliary Head. + """ + + def __init__(self, + backbone_cfg, + context_channels=(128, 256, 512), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert len(context_channels) == 3, 'Length of input channels \ + of Context Path must be 3!' + + self.backbone = MODELS.build(backbone_cfg) + + self.align_corners = align_corners + self.arm16 = AttentionRefinementModule(context_channels[1], + context_channels[0]) + self.arm32 = AttentionRefinementModule(context_channels[2], + context_channels[0]) + self.conv_head32 = ConvModule( + in_channels=context_channels[0], + out_channels=context_channels[0], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv_head16 = ConvModule( + in_channels=context_channels[0], + out_channels=context_channels[0], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.gap_conv = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + in_channels=context_channels[2], + out_channels=context_channels[0], + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + x_4, x_8, x_16, x_32 = self.backbone(x) + x_gap = self.gap_conv(x_32) + + x_32_arm = self.arm32(x_32) + x_32_sum = x_32_arm + x_gap + x_32_up = resize(input=x_32_sum, size=x_16.shape[2:], mode='nearest') + x_32_up = self.conv_head32(x_32_up) + + x_16_arm = self.arm16(x_16) + x_16_sum = x_16_arm + x_32_up + x_16_up = resize(input=x_16_sum, size=x_8.shape[2:], mode='nearest') + x_16_up = self.conv_head16(x_16_up) + + return x_16_up, x_32_up + + +class FeatureFusionModule(BaseModule): + """Feature Fusion Module to fuse low level output feature of Spatial Path + and high level output feature of Context Path. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + Returns: + x_out (torch.Tensor): Feature map of Feature Fusion Module. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + self.conv_atten = nn.Sequential( + ConvModule( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), nn.Sigmoid()) + + def forward(self, x_sp, x_cp): + x_concat = torch.cat([x_sp, x_cp], dim=1) + x_fuse = self.conv1(x_concat) + x_atten = self.gap(x_fuse) + # Note: No BN and more 1x1 conv in paper. + x_atten = self.conv_atten(x_atten) + x_atten = x_fuse * x_atten + x_out = x_atten + x_fuse + return x_out + + +@MODELS.register_module() +class BiSeNetV1(BaseModule): + """BiSeNetV1 backbone. + + This backbone is the implementation of `BiSeNet: Bilateral + Segmentation Network for Real-time Semantic + Segmentation `_. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + in_channels (int): The number of channels of input + image. Default: 3. + spatial_channels (Tuple[int]): Size of channel numbers of + various layers in Spatial Path. + Default: (64, 64, 64, 128). + context_channels (Tuple[int]): Size of channel numbers of + various modules in Context Path. + Default: (128, 256, 512). + out_indices (Tuple[int] | int, optional): Output from which stages. + Default: (0, 1, 2). + align_corners (bool, optional): The align_corners argument of + resize operation in Bilateral Guided Aggregation Layer. + Default: False. + out_channels(int): The number of channels of output. + It must be the same with `in_channels` of decode_head. + Default: 256. + """ + + def __init__(self, + backbone_cfg, + in_channels=3, + spatial_channels=(64, 64, 64, 128), + context_channels=(128, 256, 512), + out_indices=(0, 1, 2), + align_corners=False, + out_channels=256, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + assert len(spatial_channels) == 4, 'Length of input channels \ + of Spatial Path must be 4!' + + assert len(context_channels) == 3, 'Length of input channels \ + of Context Path must be 3!' + + self.out_indices = out_indices + self.align_corners = align_corners + self.context_path = ContextPath(backbone_cfg, context_channels, + self.align_corners) + self.spatial_path = SpatialPath(in_channels, spatial_channels) + self.ffm = FeatureFusionModule(context_channels[1], out_channels) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + def forward(self, x): + # stole refactoring code from Coin Cheung, thanks + x_context8, x_context16 = self.context_path(x) + x_spatial = self.spatial_path(x) + x_fuse = self.ffm(x_spatial, x_context8) + + outs = [x_fuse, x_context8, x_context16] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/mmseg/models/backbones/bisenetv2.py b/mmseg/models/backbones/bisenetv2.py new file mode 100644 index 0000000000..32aa49822f --- /dev/null +++ b/mmseg/models/backbones/bisenetv2.py @@ -0,0 +1,622 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, + build_activation_layer, build_norm_layer) +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +class DetailBranch(BaseModule): + """Detail Branch with wide channels and shallow layers to capture low-level + details and generate high-resolution feature representation. + + Args: + detail_channels (Tuple[int]): Size of channel numbers of each stage + in Detail Branch, in paper it has 3 stages. + Default: (64, 64, 128). + in_channels (int): Number of channels of input image. Default: 3. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Feature map of Detail Branch. + """ + + def __init__(self, + detail_channels=(64, 64, 128), + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + detail_branch = [] + for i in range(len(detail_channels)): + if i == 0: + detail_branch.append( + nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=detail_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg))) + else: + detail_branch.append( + nn.Sequential( + ConvModule( + in_channels=detail_channels[i - 1], + out_channels=detail_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg))) + self.detail_branch = nn.ModuleList(detail_branch) + + def forward(self, x): + for stage in self.detail_branch: + x = stage(x) + return x + + +class StemBlock(BaseModule): + """Stem Block at the beginning of Semantic Branch. + + Args: + in_channels (int): Number of input channels. + Default: 3. + out_channels (int): Number of output channels. + Default: 16. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): First feature map in Semantic Branch. + """ + + def __init__(self, + in_channels=3, + out_channels=16, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.conv_first = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.convs = nn.Sequential( + ConvModule( + in_channels=out_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=out_channels // 2, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.pool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1, ceil_mode=False) + self.fuse_last = ConvModule( + in_channels=out_channels * 2, + out_channels=out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.conv_first(x) + x_left = self.convs(x) + x_right = self.pool(x) + x = self.fuse_last(torch.cat([x_left, x_right], dim=1)) + return x + + +class GELayer(BaseModule): + """Gather-and-Expansion Layer. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + exp_ratio (int): Expansion ratio for middle channels. + Default: 6. + stride (int): Stride of GELayer. Default: 1 + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Intermediate feature map in + Semantic Branch. + """ + + def __init__(self, + in_channels, + out_channels, + exp_ratio=6, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + mid_channel = in_channels * exp_ratio + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if stride == 1: + self.dwconv = nn.Sequential( + # ReLU in ConvModule not shown in paper + ConvModule( + in_channels=in_channels, + out_channels=mid_channel, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.shortcut = None + else: + self.dwconv = nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=mid_channel, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + # ReLU in ConvModule not shown in paper + ConvModule( + in_channels=mid_channel, + out_channels=mid_channel, + kernel_size=3, + stride=1, + padding=1, + groups=mid_channel, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + self.shortcut = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=norm_cfg, + pw_act_cfg=None, + )) + + self.conv2 = nn.Sequential( + ConvModule( + in_channels=mid_channel, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + )) + + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + identity = x + x = self.conv1(x) + x = self.dwconv(x) + x = self.conv2(x) + if self.shortcut is not None: + shortcut = self.shortcut(identity) + x = x + shortcut + else: + x = x + identity + x = self.act(x) + return x + + +class CEBlock(BaseModule): + """Context Embedding Block for large receptive filed in Semantic Branch. + + Args: + in_channels (int): Number of input channels. + Default: 3. + out_channels (int): Number of output channels. + Default: 16. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Last feature map in Semantic Branch. + """ + + def __init__(self, + in_channels=3, + out_channels=16, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + build_norm_layer(norm_cfg, self.in_channels)[1]) + self.conv_gap = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + # Note: in paper here is naive conv2d, no bn-relu + self.conv_last = ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + identity = x + x = self.gap(x) + x = self.conv_gap(x) + x = identity + x + x = self.conv_last(x) + return x + + +class SemanticBranch(BaseModule): + """Semantic Branch which is lightweight with narrow channels and deep + layers to obtain high-level semantic context. + + Args: + semantic_channels(Tuple[int]): Size of channel numbers of + various stages in Semantic Branch. + Default: (16, 32, 64, 128). + in_channels (int): Number of channels of input image. Default: 3. + exp_ratio (int): Expansion ratio for middle channels. + Default: 6. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + semantic_outs (List[torch.Tensor]): List of several feature maps + for auxiliary heads (Booster) and Bilateral + Guided Aggregation Layer. + """ + + def __init__(self, + semantic_channels=(16, 32, 64, 128), + in_channels=3, + exp_ratio=6, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.semantic_channels = semantic_channels + self.semantic_stages = [] + for i in range(len(semantic_channels)): + stage_name = f'stage{i + 1}' + self.semantic_stages.append(stage_name) + if i == 0: + self.add_module( + stage_name, + StemBlock(self.in_channels, semantic_channels[i])) + elif i == (len(semantic_channels) - 1): + self.add_module( + stage_name, + nn.Sequential( + GELayer(semantic_channels[i - 1], semantic_channels[i], + exp_ratio, 2), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1))) + else: + self.add_module( + stage_name, + nn.Sequential( + GELayer(semantic_channels[i - 1], semantic_channels[i], + exp_ratio, 2), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1))) + + self.add_module(f'stage{len(semantic_channels)}_CEBlock', + CEBlock(semantic_channels[-1], semantic_channels[-1])) + self.semantic_stages.append(f'stage{len(semantic_channels)}_CEBlock') + + def forward(self, x): + semantic_outs = [] + for stage_name in self.semantic_stages: + semantic_stage = getattr(self, stage_name) + x = semantic_stage(x) + semantic_outs.append(x) + return semantic_outs + + +class BGALayer(BaseModule): + """Bilateral Guided Aggregation Layer to fuse the complementary information + from both Detail Branch and Semantic Branch. + + Args: + out_channels (int): Number of output channels. + Default: 128. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + output (torch.Tensor): Output feature map for Segment heads. + """ + + def __init__(self, + out_channels=128, + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.out_channels = out_channels + self.align_corners = align_corners + self.detail_dwconv = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=None, + pw_act_cfg=None, + )) + self.detail_down = nn.Sequential( + ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)) + self.semantic_conv = nn.Sequential( + ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None)) + self.semantic_dwconv = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=None, + pw_act_cfg=None, + )) + self.conv = ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + inplace=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + + def forward(self, x_d, x_s): + detail_dwconv = self.detail_dwconv(x_d) + detail_down = self.detail_down(x_d) + semantic_conv = self.semantic_conv(x_s) + semantic_dwconv = self.semantic_dwconv(x_s) + semantic_conv = resize( + input=semantic_conv, + size=detail_dwconv.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + fuse_1 = detail_dwconv * torch.sigmoid(semantic_conv) + fuse_2 = detail_down * torch.sigmoid(semantic_dwconv) + fuse_2 = resize( + input=fuse_2, + size=fuse_1.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + output = self.conv(fuse_1 + fuse_2) + return output + + +@MODELS.register_module() +class BiSeNetV2(BaseModule): + """BiSeNetV2: Bilateral Network with Guided Aggregation for + Real-time Semantic Segmentation. + + This backbone is the implementation of + `BiSeNetV2 `_. + + Args: + in_channels (int): Number of channel of input image. Default: 3. + detail_channels (Tuple[int], optional): Channels of each stage + in Detail Branch. Default: (64, 64, 128). + semantic_channels (Tuple[int], optional): Channels of each stage + in Semantic Branch. Default: (16, 32, 64, 128). + See Table 1 and Figure 3 of paper for more details. + semantic_expansion_ratio (int, optional): The expansion factor + expanding channel number of middle channels in Semantic Branch. + Default: 6. + bga_channels (int, optional): Number of middle channels in + Bilateral Guided Aggregation Layer. Default: 128. + out_indices (Tuple[int] | int, optional): Output from which stages. + Default: (0, 1, 2, 3, 4). + align_corners (bool, optional): The align_corners argument of + resize operation in Bilateral Guided Aggregation Layer. + Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=3, + detail_channels=(64, 64, 128), + semantic_channels=(16, 32, 64, 128), + semantic_expansion_ratio=6, + bga_channels=128, + out_indices=(0, 1, 2, 3, 4), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + if init_cfg is None: + init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_indices = out_indices + self.detail_channels = detail_channels + self.semantic_channels = semantic_channels + self.semantic_expansion_ratio = semantic_expansion_ratio + self.bga_channels = bga_channels + self.align_corners = align_corners + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.detail = DetailBranch(self.detail_channels, self.in_channels) + self.semantic = SemanticBranch(self.semantic_channels, + self.in_channels, + self.semantic_expansion_ratio) + self.bga = BGALayer(self.bga_channels, self.align_corners) + + def forward(self, x): + # stole refactoring code from Coin Cheung, thanks + x_detail = self.detail(x) + x_semantic_lst = self.semantic(x) + x_head = self.bga(x_detail, x_semantic_lst[-1]) + outs = [x_head] + x_semantic_lst[:-1] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/mmseg/models/backbones/cgnet.py b/mmseg/models/backbones/cgnet.py new file mode 100644 index 0000000000..b74b494f53 --- /dev/null +++ b/mmseg/models/backbones/cgnet.py @@ -0,0 +1,372 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmseg.registry import MODELS + + +class GlobalContextExtractor(nn.Module): + """Global Context Extractor for CGNet. + + This class is employed to refine the joint feature of both local feature + and surrounding context. + + Args: + channel (int): Number of input feature channels. + reduction (int): Reductions for global context extractor. Default: 16. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, channel, reduction=16, with_cp=False): + super().__init__() + self.channel = channel + self.reduction = reduction + assert reduction >= 1 and channel >= reduction + self.with_cp = with_cp + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel), nn.Sigmoid()) + + def forward(self, x): + + def _inner_forward(x): + num_batch, num_channel = x.size()[:2] + y = self.avg_pool(x).view(num_batch, num_channel) + y = self.fc(y).view(num_batch, num_channel, 1, 1) + return x * y + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class ContextGuidedBlock(nn.Module): + """Context Guided Block for CGNet. + + This class consists of four components: local feature extractor, + surrounding feature extractor, joint feature extractor and global + context extractor. + + Args: + in_channels (int): Number of input feature channels. + out_channels (int): Number of output feature channels. + dilation (int): Dilation rate for surrounding context extractor. + Default: 2. + reduction (int): Reduction for global context extractor. Default: 16. + skip_connect (bool): Add input to output or not. Default: True. + downsample (bool): Downsample the input to 1/2 or not. Default: False. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels, + out_channels, + dilation=2, + reduction=16, + skip_connect=True, + downsample=False, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + with_cp=False): + super().__init__() + self.with_cp = with_cp + self.downsample = downsample + + channels = out_channels if downsample else out_channels // 2 + if 'type' in act_cfg and act_cfg['type'] == 'PReLU': + act_cfg['num_parameters'] = channels + kernel_size = 3 if downsample else 1 + stride = 2 if downsample else 1 + padding = (kernel_size - 1) // 2 + + self.conv1x1 = ConvModule( + in_channels, + channels, + kernel_size, + stride, + padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.f_loc = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=1, + groups=channels, + bias=False) + self.f_sur = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=dilation, + groups=channels, + dilation=dilation, + bias=False) + + self.bn = build_norm_layer(norm_cfg, 2 * channels)[1] + self.activate = nn.PReLU(2 * channels) + + if downsample: + self.bottleneck = build_conv_layer( + conv_cfg, + 2 * channels, + out_channels, + kernel_size=1, + bias=False) + + self.skip_connect = skip_connect and not downsample + self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp) + + def forward(self, x): + + def _inner_forward(x): + out = self.conv1x1(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], 1) # the joint feature + joi_feat = self.bn(joi_feat) + joi_feat = self.activate(joi_feat) + if self.downsample: + joi_feat = self.bottleneck(joi_feat) # channel = out_channels + # f_glo is employed to refine the joint feature + out = self.f_glo(joi_feat) + + if self.skip_connect: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InputInjection(nn.Module): + """Downsampling module for CGNet.""" + + def __init__(self, num_downsampling): + super().__init__() + self.pool = nn.ModuleList() + for i in range(num_downsampling): + self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +@MODELS.register_module() +class CGNet(BaseModule): + """CGNet backbone. + + This backbone is the implementation of `A Light-weight Context Guided + Network for Semantic Segmentation `_. + + Args: + in_channels (int): Number of input image channels. Normally 3. + num_channels (tuple[int]): Numbers of feature channels at each stages. + Default: (32, 64, 128). + num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2. + Default: (3, 21). + dilations (tuple[int]): Dilation rate for surrounding context + extractors at stage 1 and stage 2. Default: (2, 4). + reductions (tuple[int]): Reductions for global context extractors at + stage 1 and stage 2. Default: (8, 16). + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16), + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + + super().__init__(init_cfg) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer=['Conv2d', 'Linear']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Constant', val=0, layer='PReLU') + ] + else: + raise TypeError('pretrained must be a str or None') + + self.in_channels = in_channels + self.num_channels = num_channels + assert isinstance(self.num_channels, tuple) and len( + self.num_channels) == 3 + self.num_blocks = num_blocks + assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2 + self.dilations = dilations + assert isinstance(self.dilations, tuple) and len(self.dilations) == 2 + self.reductions = reductions + assert isinstance(self.reductions, tuple) and len(self.reductions) == 2 + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU': + self.act_cfg['num_parameters'] = num_channels[0] + self.norm_eval = norm_eval + self.with_cp = with_cp + + cur_channels = in_channels + self.stem = nn.ModuleList() + for i in range(3): + self.stem.append( + ConvModule( + cur_channels, + num_channels[0], + 3, + 2 if i == 0 else 1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + cur_channels = num_channels[0] + + self.inject_2x = InputInjection(1) # down-sample for Input, factor=2 + self.inject_4x = InputInjection(2) # down-sample for Input, factor=4 + + cur_channels += in_channels + self.norm_prelu_0 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 1 + self.level1 = nn.ModuleList() + for i in range(num_blocks[0]): + self.level1.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[1], + num_channels[1], + dilations[0], + reductions[0], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[1] + in_channels + self.norm_prelu_1 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 2 + self.level2 = nn.ModuleList() + for i in range(num_blocks[1]): + self.level2.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[2], + num_channels[2], + dilations[1], + reductions[1], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[2] + self.norm_prelu_2 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + def forward(self, x): + output = [] + + # stage 0 + inp_2x = self.inject_2x(x) + inp_4x = self.inject_4x(x) + for layer in self.stem: + x = layer(x) + x = self.norm_prelu_0(torch.cat([x, inp_2x], 1)) + output.append(x) + + # stage 1 + for i, layer in enumerate(self.level1): + x = layer(x) + if i == 0: + down1 = x + x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1)) + output.append(x) + + # stage 2 + for i, layer in enumerate(self.level2): + x = layer(x) + if i == 0: + down2 = x + x = self.norm_prelu_2(torch.cat([down2, x], 1)) + output.append(x) + + return output + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmseg/models/backbones/erfnet.py b/mmseg/models/backbones/erfnet.py new file mode 100644 index 0000000000..2c5ec672a0 --- /dev/null +++ b/mmseg/models/backbones/erfnet.py @@ -0,0 +1,329 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +class DownsamplerBlock(BaseModule): + """Downsampler block of ERFNet. + + This module is a little different from basical ConvModule. + The features from Conv and MaxPool layers are + concatenated before BatchNorm. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.conv = build_conv_layer( + self.conv_cfg, + in_channels, + out_channels - in_channels, + kernel_size=3, + stride=2, + padding=1) + self.pool = nn.MaxPool2d(kernel_size=2, stride=2) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] + self.act = build_activation_layer(self.act_cfg) + + def forward(self, input): + conv_out = self.conv(input) + pool_out = self.pool(input) + pool_out = resize( + input=pool_out, + size=conv_out.size()[2:], + mode='bilinear', + align_corners=False) + output = torch.cat([conv_out, pool_out], 1) + output = self.bn(output) + output = self.act(output) + return output + + +class NonBottleneck1d(BaseModule): + """Non-bottleneck block of ERFNet. + + Args: + channels (int): Number of channels in Non-bottleneck block. + drop_rate (float): Probability of an element to be zeroed. + Default 0. + dilation (int): Dilation rate for last two conv layers. + Default 1. + num_conv_layer (int): Number of 3x1 and 1x3 convolution layers. + Default 2. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + channels, + drop_rate=0, + dilation=1, + num_conv_layer=2, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.act = build_activation_layer(self.act_cfg) + + self.convs_layers = nn.ModuleList() + for conv_layer in range(num_conv_layer): + first_conv_padding = (1, 0) if conv_layer == 0 else (dilation, 0) + first_conv_dilation = 1 if conv_layer == 0 else (dilation, 1) + second_conv_padding = (0, 1) if conv_layer == 0 else (0, dilation) + second_conv_dilation = 1 if conv_layer == 0 else (1, dilation) + + self.convs_layers.append( + build_conv_layer( + self.conv_cfg, + channels, + channels, + kernel_size=(3, 1), + stride=1, + padding=first_conv_padding, + bias=True, + dilation=first_conv_dilation)) + self.convs_layers.append(self.act) + self.convs_layers.append( + build_conv_layer( + self.conv_cfg, + channels, + channels, + kernel_size=(1, 3), + stride=1, + padding=second_conv_padding, + bias=True, + dilation=second_conv_dilation)) + self.convs_layers.append( + build_norm_layer(self.norm_cfg, channels)[1]) + if conv_layer == 0: + self.convs_layers.append(self.act) + else: + self.convs_layers.append(nn.Dropout(p=drop_rate)) + + def forward(self, input): + output = input + for conv in self.convs_layers: + output = conv(output) + output = self.act(output + input) + return output + + +class UpsamplerBlock(BaseModule): + """Upsampler block of ERFNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.conv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + output_padding=1, + bias=True) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] + self.act = build_activation_layer(self.act_cfg) + + def forward(self, input): + output = self.conv(input) + output = self.bn(output) + output = self.act(output) + return output + + +@MODELS.register_module() +class ERFNet(BaseModule): + """ERFNet backbone. + + This backbone is the implementation of `ERFNet: Efficient Residual + Factorized ConvNet for Real-time SemanticSegmentation + `_. + + Args: + in_channels (int): The number of channels of input + image. Default: 3. + enc_downsample_channels (Tuple[int]): Size of channel + numbers of various Downsampler block in encoder. + Default: (16, 64, 128). + enc_stage_non_bottlenecks (Tuple[int]): Number of stages of + Non-bottleneck block in encoder. + Default: (5, 8). + enc_non_bottleneck_dilations (Tuple[int]): Dilation rate of each + stage of Non-bottleneck block of encoder. + Default: (2, 4, 8, 16). + enc_non_bottleneck_channels (Tuple[int]): Size of channel + numbers of various Non-bottleneck block in encoder. + Default: (64, 128). + dec_upsample_channels (Tuple[int]): Size of channel numbers of + various Deconvolution block in decoder. + Default: (64, 16). + dec_stages_non_bottleneck (Tuple[int]): Number of stages of + Non-bottleneck block in decoder. + Default: (2, 2). + dec_non_bottleneck_channels (Tuple[int]): Size of channel + numbers of various Non-bottleneck block in decoder. + Default: (64, 16). + drop_rate (float): Probability of an element to be zeroed. + Default 0.1. + """ + + def __init__(self, + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + assert len(enc_downsample_channels) \ + == len(dec_upsample_channels)+1, 'Number of downsample\ + block of encoder does not \ + match number of upsample block of decoder!' + assert len(enc_downsample_channels) \ + == len(enc_stage_non_bottlenecks)+1, 'Number of \ + downsample block of encoder does not match \ + number of Non-bottleneck block of encoder!' + assert len(enc_downsample_channels) \ + == len(enc_non_bottleneck_channels)+1, 'Number of \ + downsample block of encoder does not match \ + number of channels of Non-bottleneck block of encoder!' + assert enc_stage_non_bottlenecks[-1] \ + % len(enc_non_bottleneck_dilations) == 0, 'Number of \ + Non-bottleneck block of encoder does not match \ + number of Non-bottleneck block of encoder!' + assert len(dec_upsample_channels) \ + == len(dec_stages_non_bottleneck), 'Number of \ + upsample block of decoder does not match \ + number of Non-bottleneck block of decoder!' + assert len(dec_stages_non_bottleneck) \ + == len(dec_non_bottleneck_channels), 'Number of \ + Non-bottleneck block of decoder does not match \ + number of channels of Non-bottleneck block of decoder!' + + self.in_channels = in_channels + self.enc_downsample_channels = enc_downsample_channels + self.enc_stage_non_bottlenecks = enc_stage_non_bottlenecks + self.enc_non_bottleneck_dilations = enc_non_bottleneck_dilations + self.enc_non_bottleneck_channels = enc_non_bottleneck_channels + self.dec_upsample_channels = dec_upsample_channels + self.dec_stages_non_bottleneck = dec_stages_non_bottleneck + self.dec_non_bottleneck_channels = dec_non_bottleneck_channels + self.dropout_ratio = dropout_ratio + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.encoder.append( + DownsamplerBlock(self.in_channels, enc_downsample_channels[0])) + + for i in range(len(enc_downsample_channels) - 1): + self.encoder.append( + DownsamplerBlock(enc_downsample_channels[i], + enc_downsample_channels[i + 1])) + # Last part of encoder is some dilated NonBottleneck1d blocks. + if i == len(enc_downsample_channels) - 2: + iteration_times = int(enc_stage_non_bottlenecks[-1] / + len(enc_non_bottleneck_dilations)) + for j in range(iteration_times): + for k in range(len(enc_non_bottleneck_dilations)): + self.encoder.append( + NonBottleneck1d(enc_downsample_channels[-1], + self.dropout_ratio, + enc_non_bottleneck_dilations[k])) + else: + for j in range(enc_stage_non_bottlenecks[i]): + self.encoder.append( + NonBottleneck1d(enc_downsample_channels[i + 1], + self.dropout_ratio)) + + for i in range(len(dec_upsample_channels)): + if i == 0: + self.decoder.append( + UpsamplerBlock(enc_downsample_channels[-1], + dec_non_bottleneck_channels[i])) + else: + self.decoder.append( + UpsamplerBlock(dec_non_bottleneck_channels[i - 1], + dec_non_bottleneck_channels[i])) + for j in range(dec_stages_non_bottleneck[i]): + self.decoder.append( + NonBottleneck1d(dec_non_bottleneck_channels[i])) + + def forward(self, x): + for enc in self.encoder: + x = enc(x) + for dec in self.decoder: + x = dec(x) + return [x] diff --git a/mmseg/models/backbones/fast_scnn.py b/mmseg/models/backbones/fast_scnn.py new file mode 100644 index 0000000000..6ff7a3191d --- /dev/null +++ b/mmseg/models/backbones/fast_scnn.py @@ -0,0 +1,408 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule + +from mmseg.models.decode_heads.psp_head import PPM +from mmseg.registry import MODELS +from ..utils import InvertedResidual, resize + + +class LearningToDownsample(nn.Module): + """Learning to downsample module. + + Args: + in_channels (int): Number of input channels. + dw_channels (tuple[int]): Number of output channels of the first and + the second depthwise conv (dwconv) layers. + out_channels (int): Number of output channels of the whole + 'learning to downsample' module. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + dw_act_cfg (dict): In DepthwiseSeparableConvModule, activation config + of depthwise ConvModule. If it is 'default', it will be the same + as `act_cfg`. Default: None. + """ + + def __init__(self, + in_channels, + dw_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dw_act_cfg=None): + super().__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.dw_act_cfg = dw_act_cfg + dw_channels1 = dw_channels[0] + dw_channels2 = dw_channels[1] + + self.conv = ConvModule( + in_channels, + dw_channels1, + 3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.dsconv1 = DepthwiseSeparableConvModule( + dw_channels1, + dw_channels2, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + dw_act_cfg=self.dw_act_cfg) + + self.dsconv2 = DepthwiseSeparableConvModule( + dw_channels2, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + dw_act_cfg=self.dw_act_cfg) + + def forward(self, x): + x = self.conv(x) + x = self.dsconv1(x) + x = self.dsconv2(x) + return x + + +class GlobalFeatureExtractor(nn.Module): + """Global feature extractor module. + + Args: + in_channels (int): Number of input channels of the GFE module. + Default: 64 + block_channels (tuple[int]): Tuple of ints. Each int specifies the + number of output channels of each Inverted Residual module. + Default: (64, 96, 128) + out_channels(int): Number of output channels of the GFE module. + Default: 128 + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + Default: 6 + num_blocks (tuple[int]): Tuple of ints. Each int specifies the + number of times each Inverted Residual module is repeated. + The repeated Inverted Residual modules are called a 'group'. + Default: (3, 3, 3) + strides (tuple[int]): Tuple of ints. Each int specifies + the downsampling factor of each 'group'. + Default: (2, 2, 1) + pool_scales (tuple[int]): Tuple of ints. Each int specifies + the parameter required in 'global average pooling' within PPM. + Default: (1, 2, 3, 6) + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + """ + + def __init__(self, + in_channels=64, + block_channels=(64, 96, 128), + out_channels=128, + expand_ratio=6, + num_blocks=(3, 3, 3), + strides=(2, 2, 1), + pool_scales=(1, 2, 3, 6), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False): + super().__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + assert len(block_channels) == len(num_blocks) == 3 + self.bottleneck1 = self._make_layer(in_channels, block_channels[0], + num_blocks[0], strides[0], + expand_ratio) + self.bottleneck2 = self._make_layer(block_channels[0], + block_channels[1], num_blocks[1], + strides[1], expand_ratio) + self.bottleneck3 = self._make_layer(block_channels[1], + block_channels[2], num_blocks[2], + strides[2], expand_ratio) + self.ppm = PPM( + pool_scales, + block_channels[2], + block_channels[2] // 4, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=align_corners) + + self.out = ConvModule( + block_channels[2] * 2, + out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _make_layer(self, + in_channels, + out_channels, + blocks, + stride=1, + expand_ratio=6): + layers = [ + InvertedResidual( + in_channels, + out_channels, + stride, + expand_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + ] + for i in range(1, blocks): + layers.append( + InvertedResidual( + out_channels, + out_channels, + 1, + expand_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return nn.Sequential(*layers) + + def forward(self, x): + x = self.bottleneck1(x) + x = self.bottleneck2(x) + x = self.bottleneck3(x) + x = torch.cat([x, *self.ppm(x)], dim=1) + x = self.out(x) + return x + + +class FeatureFusionModule(nn.Module): + """Feature fusion module. + + Args: + higher_in_channels (int): Number of input channels of the + higher-resolution branch. + lower_in_channels (int): Number of input channels of the + lower-resolution branch. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + dwconv_act_cfg (dict): Config of activation layers in 3x3 conv. + Default: dict(type='ReLU'). + conv_act_cfg (dict): Config of activation layers in the two 1x1 conv. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + """ + + def __init__(self, + higher_in_channels, + lower_in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dwconv_act_cfg=dict(type='ReLU'), + conv_act_cfg=None, + align_corners=False): + super().__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dwconv_act_cfg = dwconv_act_cfg + self.conv_act_cfg = conv_act_cfg + self.align_corners = align_corners + self.dwconv = ConvModule( + lower_in_channels, + out_channels, + 3, + padding=1, + groups=out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.dwconv_act_cfg) + self.conv_lower_res = ConvModule( + out_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.conv_act_cfg) + + self.conv_higher_res = ConvModule( + higher_in_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.conv_act_cfg) + + self.relu = nn.ReLU(True) + + def forward(self, higher_res_feature, lower_res_feature): + lower_res_feature = resize( + lower_res_feature, + size=higher_res_feature.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + lower_res_feature = self.dwconv(lower_res_feature) + lower_res_feature = self.conv_lower_res(lower_res_feature) + + higher_res_feature = self.conv_higher_res(higher_res_feature) + out = higher_res_feature + lower_res_feature + return self.relu(out) + + +@MODELS.register_module() +class FastSCNN(BaseModule): + """Fast-SCNN Backbone. + + This backbone is the implementation of `Fast-SCNN: Fast Semantic + Segmentation Network `_. + + Args: + in_channels (int): Number of input image channels. Default: 3. + downsample_dw_channels (tuple[int]): Number of output channels after + the first conv layer & the second conv layer in + Learning-To-Downsample (LTD) module. + Default: (32, 48). + global_in_channels (int): Number of input channels of + Global Feature Extractor(GFE). + Equal to number of output channels of LTD. + Default: 64. + global_block_channels (tuple[int]): Tuple of integers that describe + the output channels for each of the MobileNet-v2 bottleneck + residual blocks in GFE. + Default: (64, 96, 128). + global_block_strides (tuple[int]): Tuple of integers + that describe the strides (downsampling factors) for each of the + MobileNet-v2 bottleneck residual blocks in GFE. + Default: (2, 2, 1). + global_out_channels (int): Number of output channels of GFE. + Default: 128. + higher_in_channels (int): Number of input channels of the higher + resolution branch in FFM. + Equal to global_in_channels. + Default: 64. + lower_in_channels (int): Number of input channels of the lower + resolution branch in FFM. + Equal to global_out_channels. + Default: 128. + fusion_out_channels (int): Number of output channels of FFM. + Default: 128. + out_indices (tuple): Tuple of indices of list + [higher_res_features, lower_res_features, fusion_output]. + Often set to (0,1,2) to enable aux. heads. + Default: (0, 1, 2). + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + dw_act_cfg (dict): In DepthwiseSeparableConvModule, activation config + of depthwise ConvModule. If it is 'default', it will be the same + as `act_cfg`. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels=3, + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + dw_act_cfg=None, + init_cfg=None): + + super().__init__(init_cfg) + + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + + if global_in_channels != higher_in_channels: + raise AssertionError('Global Input Channels must be the same \ + with Higher Input Channels!') + elif global_out_channels != lower_in_channels: + raise AssertionError('Global Output Channels must be the same \ + with Lower Input Channels!') + + self.in_channels = in_channels + self.downsample_dw_channels1 = downsample_dw_channels[0] + self.downsample_dw_channels2 = downsample_dw_channels[1] + self.global_in_channels = global_in_channels + self.global_block_channels = global_block_channels + self.global_block_strides = global_block_strides + self.global_out_channels = global_out_channels + self.higher_in_channels = higher_in_channels + self.lower_in_channels = lower_in_channels + self.fusion_out_channels = fusion_out_channels + self.out_indices = out_indices + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.learning_to_downsample = LearningToDownsample( + in_channels, + downsample_dw_channels, + global_in_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + dw_act_cfg=dw_act_cfg) + self.global_feature_extractor = GlobalFeatureExtractor( + global_in_channels, + global_block_channels, + global_out_channels, + strides=self.global_block_strides, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.feature_fusion = FeatureFusionModule( + higher_in_channels, + lower_in_channels, + fusion_out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dwconv_act_cfg=self.act_cfg, + align_corners=self.align_corners) + + def forward(self, x): + higher_res_features = self.learning_to_downsample(x) + lower_res_features = self.global_feature_extractor(higher_res_features) + fusion_output = self.feature_fusion(higher_res_features, + lower_res_features) + + outs = [higher_res_features, lower_res_features, fusion_output] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/mmseg/models/backbones/hrnet.py b/mmseg/models/backbones/hrnet.py index e4247ba67e..2da755e731 100644 --- a/mmseg/models/backbones/hrnet.py +++ b/mmseg/models/backbones/hrnet.py @@ -1,16 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + import torch.nn as nn -from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, - kaiming_init) -from mmcv.runner import load_checkpoint -from mmcv.utils.parrots_wrapper import _BatchNorm - -from mmseg.ops import resize -from mmseg.utils import get_root_logger -from ..builder import BACKBONES +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, ModuleList, Sequential +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmseg.registry import MODELS +from ..utils import Upsample, resize from .resnet import BasicBlock, Bottleneck -class HRModule(nn.Module): +class HRModule(BaseModule): """High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange @@ -26,8 +27,11 @@ def __init__(self, multiscale_output=True, with_cp=False, conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True)): - super(HRModule, self).__init__() + norm_cfg=dict(type='BN', requires_grad=True), + block_init_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + self.block_init_cfg = block_init_cfg self._check_branches(num_branches, num_blocks, in_channels, num_channels) @@ -92,7 +96,8 @@ def _make_one_branch(self, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): @@ -102,9 +107,10 @@ def _make_one_branch(self, num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) - return nn.Sequential(*layers) + return Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): """Build multiple branch.""" @@ -114,7 +120,7 @@ def _make_branches(self, num_branches, block, num_blocks, num_channels): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) - return nn.ModuleList(branches) + return ModuleList(branches) def _make_fuse_layers(self): """Build fuse layer.""" @@ -141,7 +147,7 @@ def _make_fuse_layers(self): bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], # we set align_corners=False for HRNet - nn.Upsample( + Upsample( scale_factor=2**(j - i), mode='bilinear', align_corners=False))) @@ -208,25 +214,45 @@ def forward(self, x): return x_fuse -@BACKBONES.register_module() -class HRNet(nn.Module): +@MODELS.register_module() +class HRNet(BaseModule): """HRNet backbone. - High-Resolution Representations for Labeling Pixels and Regions - arXiv: https://arxiv.org/abs/1904.04514 + This backbone is the implementation of `High-Resolution Representations + for Labeling Pixels and Regions `_. Args: - extra (dict): detailed configuration for each stage of HRNet. + extra (dict): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of convolution block. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. in_channels (int): Number of input image channels. Normally 3. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Use `BN` by default. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. + and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. + memory while slowing down the training speed. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: False. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Default: True. + pretrained (str, optional): Model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. Example: >>> from mmseg.models import HRNet @@ -277,14 +303,49 @@ def __init__(self, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, with_cp=False, - zero_init_residual=False): - super(HRNet, self).__init__() + frozen_stages=-1, + zero_init_residual=False, + multiscale_output=True, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg) + + self.pretrained = pretrained + self.zero_init_residual = zero_init_residual + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + # Assert configurations of 4 stages are in extra + assert 'stage1' in extra and 'stage2' in extra \ + and 'stage3' in extra and 'stage4' in extra + # Assert whether the length of `num_blocks` and `num_channels` are + # equal to `num_branches` + for i in range(4): + cfg = extra[f'stage{i + 1}'] + assert len(cfg['num_blocks']) == cfg['num_branches'] and \ + len(cfg['num_channels']) == cfg['num_branches'] + self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp - self.zero_init_residual = zero_init_residual + self.frozen_stages = frozen_stages # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) @@ -356,7 +417,9 @@ def __init__(self, self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( - self.stage4_cfg, num_channels) + self.stage4_cfg, num_channels, multiscale_output=multiscale_output) + + self._freeze_stages() @property def norm1(self): @@ -430,6 +493,16 @@ def _make_layer(self, block, inplanes, planes, blocks, stride=1): build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + layers.append( block( inplanes, @@ -438,7 +511,8 @@ def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( @@ -447,9 +521,10 @@ def _make_layer(self, block, inplanes, planes, blocks, stride=1): planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg)) - return nn.Sequential(*layers) + return Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): """Make each stage.""" @@ -460,6 +535,16 @@ def _make_stage(self, layer_config, in_channels, multiscale_output=True): block = self.blocks_dict[layer_config['block']] hr_modules = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: @@ -477,35 +562,36 @@ def _make_stage(self, layer_config, in_channels, multiscale_output=True): reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*hr_modules), in_channels - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - else: - raise TypeError('pretrained must be a str or None') + conv_cfg=self.conv_cfg, + block_init_cfg=block_init_cfg)) + + return Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze stages param and norm stats.""" + if self.frozen_stages >= 0: + + self.norm1.eval() + self.norm2.eval() + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, f'layer{i}') + t = getattr(self, f'transition{i}') + elif i == 4: + m = getattr(self, f'stage{i}') + else: + m = getattr(self, f'stage{i}') + t = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + t.eval() + for param in t.parameters(): + param.requires_grad = False def forward(self, x): """Forward function.""" @@ -545,9 +631,10 @@ def forward(self, x): return y_list def train(self, mode=True): - """Convert the model into training mode whill keeping the normalization + """Convert the model into training mode will keeping the normalization layer freezed.""" - super(HRNet, self).train(mode) + super().train(mode) + self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only diff --git a/mmseg/models/backbones/icnet.py b/mmseg/models/backbones/icnet.py new file mode 100644 index 0000000000..8ff3448569 --- /dev/null +++ b/mmseg/models/backbones/icnet.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..decode_heads.psp_head import PPM +from ..utils import resize + + +@MODELS.register_module() +class ICNet(BaseModule): + """ICNet for Real-Time Semantic Segmentation on High-Resolution Images. + + This backbone is the implementation of + `ICNet `_. + + Args: + backbone_cfg (dict): Config dict to build backbone. Usually it is + ResNet but it can also be other backbones. + in_channels (int): The number of input image channels. Default: 3. + layer_channels (Sequence[int]): The numbers of feature channels at + layer 2 and layer 4 in ResNet. It can also be other backbones. + Default: (512, 2048). + light_branch_middle_channels (int): The number of channels of the + middle layer in light branch. Default: 32. + psp_out_channels (int): The number of channels of the output of PSP + module. Default: 512. + out_channels (Sequence[int]): The numbers of output feature channels + at each branches. Default: (64, 256, 256). + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. Default: (1, 2, 3, 6). + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + backbone_cfg, + in_channels=3, + layer_channels=(512, 2048), + light_branch_middle_channels=32, + psp_out_channels=512, + out_channels=(64, 256, 256), + pool_scales=(1, 2, 3, 6), + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + if backbone_cfg is None: + raise TypeError('backbone_cfg must be passed from config file!') + if init_cfg is None: + init_cfg = [ + dict(type='Kaiming', mode='fan_out', layer='Conv2d'), + dict(type='Constant', val=1, layer='_BatchNorm'), + dict(type='Normal', mean=0.01, layer='Linear') + ] + super().__init__(init_cfg=init_cfg) + self.align_corners = align_corners + self.backbone = MODELS.build(backbone_cfg) + + # Note: Default `ceil_mode` is false in nn.MaxPool2d, set + # `ceil_mode=True` to keep information in the corner of feature map. + self.backbone.maxpool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1, ceil_mode=True) + + self.psp_modules = PPM( + pool_scales=pool_scales, + in_channels=layer_channels[1], + channels=psp_out_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + align_corners=align_corners) + + self.psp_bottleneck = ConvModule( + layer_channels[1] + len(pool_scales) * psp_out_channels, + psp_out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv_sub1 = nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=light_branch_middle_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + ConvModule( + in_channels=light_branch_middle_channels, + out_channels=light_branch_middle_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + ConvModule( + in_channels=light_branch_middle_channels, + out_channels=out_channels[0], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + + self.conv_sub2 = ConvModule( + layer_channels[0], + out_channels[1], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + self.conv_sub4 = ConvModule( + psp_out_channels, + out_channels[2], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + def forward(self, x): + output = [] + + # sub 1 + output.append(self.conv_sub1(x)) + + # sub 2 + x = resize( + x, + scale_factor=0.5, + mode='bilinear', + align_corners=self.align_corners) + x = self.backbone.stem(x) + x = self.backbone.maxpool(x) + x = self.backbone.layer1(x) + x = self.backbone.layer2(x) + output.append(self.conv_sub2(x)) + + # sub 4 + x = resize( + x, + scale_factor=0.5, + mode='bilinear', + align_corners=self.align_corners) + x = self.backbone.layer3(x) + x = self.backbone.layer4(x) + psp_outs = self.psp_modules(x) + [x] + psp_outs = torch.cat(psp_outs, dim=1) + x = self.psp_bottleneck(psp_outs) + + output.append(self.conv_sub4(x)) + + return output diff --git a/mmseg/models/backbones/mae.py b/mmseg/models/backbones/mae.py new file mode 100644 index 0000000000..a1f243f085 --- /dev/null +++ b/mmseg/models/backbones/mae.py @@ -0,0 +1,260 @@ +# Copyright (c) OpenMMLab. All rights reserved.import math +import math + +import torch +import torch.nn as nn +from mmengine.model import ModuleList +from mmengine.model.weight_init import (constant_init, kaiming_init, + trunc_normal_) +from mmengine.runner.checkpoint import _load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.registry import MODELS +from .beit import BEiT, BEiTAttention, BEiTTransformerEncoderLayer + + +class MAEAttention(BEiTAttention): + """Multi-head self-attention with relative position bias used in MAE. + + This module is different from ``BEiTAttention`` by initializing the + relative bias table with zeros. + """ + + def init_weights(self): + """Initialize relative position bias with zeros.""" + + # As MAE initializes relative position bias as zeros and this class + # inherited from BEiT which initializes relative position bias + # with `trunc_normal`, `init_weights` here does + # nothing and just passes directly + + pass + + +class MAETransformerEncoderLayer(BEiTTransformerEncoderLayer): + """Implements one encoder layer in Vision Transformer. + + This module is different from ``BEiTTransformerEncoderLayer`` by replacing + ``BEiTAttention`` with ``MAEAttention``. + """ + + def build_attn(self, attn_cfg): + self.attn = MAEAttention(**attn_cfg) + + +@MODELS.register_module() +class MAE(BEiT): + """VisionTransformer with support for patch. + + Args: + img_size (int | tuple): Input image size. Default: 224. + patch_size (int): The patch size. Default: 16. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): embedding dimension. Default: 768. + num_layers (int): depth of transformer. Default: 12. + num_heads (int): number of attention heads. Default: 12. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): stochastic depth rate. Default 0.0. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + patch_norm (bool): Whether to add a norm in PatchEmbed Block. + Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + pretrained (str, optional): model pretrained path. Default: None. + init_values (float): Initialize the values of Attention and FFN + with learnable scaling. Defaults to 0.1. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=-1, + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_norm=False, + final_norm=False, + num_fcs=2, + norm_eval=False, + pretrained=None, + init_values=0.1, + init_cfg=None): + super().__init__( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dims=embed_dims, + num_layers=num_layers, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + out_indices=out_indices, + qv_bias=False, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + patch_norm=patch_norm, + final_norm=final_norm, + num_fcs=num_fcs, + norm_eval=norm_eval, + pretrained=pretrained, + init_values=init_values, + init_cfg=init_cfg) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + self.num_patches = self.patch_shape[0] * self.patch_shape[1] + self.pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches + 1, embed_dims)) + + def _build_layers(self): + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, self.num_layers) + ] + self.layers = ModuleList() + for i in range(self.num_layers): + self.layers.append( + MAETransformerEncoderLayer( + embed_dims=self.embed_dims, + num_heads=self.num_heads, + feedforward_channels=self.mlp_ratio * self.embed_dims, + attn_drop_rate=self.attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=self.num_fcs, + bias=True, + act_cfg=self.act_cfg, + norm_cfg=self.norm_cfg, + window_size=self.patch_shape, + init_values=self.init_values)) + + def fix_init_weight(self): + """Rescale the initialization according to layer id. + + This function is copied from https://github.com/microsoft/unilm/blob/master/beit/modeling_pretrain.py. # noqa: E501 + Copyright (c) Microsoft Corporation + Licensed under the MIT License + """ + + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.layers): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.ffn.layers[1].weight.data, layer_id + 1) + + def init_weights(self): + + def _init_weights(m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + self.apply(_init_weights) + self.fix_init_weight() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg.get('type') == 'Pretrained'): + checkpoint = _load_checkpoint( + self.init_cfg['checkpoint'], logger=None, map_location='cpu') + state_dict = self.resize_rel_pos_embed(checkpoint) + state_dict = self.resize_abs_pos_embed(state_dict) + self.load_state_dict(state_dict, False) + elif self.init_cfg is not None: + super().init_weights() + else: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + # Copyright 2019 Ross Wightman + # Licensed under the Apache License, Version 2.0 (the "License") + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'ffn' in n: + nn.init.normal_(m.bias, mean=0., std=1e-6) + else: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + + def resize_abs_pos_embed(self, state_dict): + if 'pos_embed' in state_dict: + pos_embed_checkpoint = state_dict['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches + # height (== width) for the checkpoint position embedding + orig_size = int( + (pos_embed_checkpoint.shape[-2] - num_extra_tokens)**0.5) + # height (== width) for the new position embedding + new_size = int(self.num_patches**0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, + embedding_size).permute( + 0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, + size=(new_size, new_size), + mode='bicubic', + align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + state_dict['pos_embed'] = new_pos_embed + return state_dict + + def forward(self, inputs): + B = inputs.shape[0] + + x, hw_shape = self.patch_embed(inputs) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + if self.final_norm: + x = self.norm1(x) + if i in self.out_indices: + out = x[:, 1:] + B, _, C = out.shape + out = out.reshape(B, hw_shape[0], hw_shape[1], + C).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) diff --git a/mmseg/models/backbones/mit.py b/mmseg/models/backbones/mit.py new file mode 100644 index 0000000000..66556bdfca --- /dev/null +++ b/mmseg/models/backbones/mit.py @@ -0,0 +1,450 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmengine.model import BaseModule, ModuleList, Sequential +from mmengine.model.weight_init import (constant_init, normal_init, + trunc_normal_init) + +from mmseg.registry import MODELS +from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw + + +class MixFFN(BaseModule): + """An implementation of MixFFN of Segformer. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Conv to encode positional information. + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + dropout_layer=None, + init_cfg=None): + super().__init__(init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + + in_channels = embed_dims + fc1 = Conv2d( + in_channels=in_channels, + out_channels=feedforward_channels, + kernel_size=1, + stride=1, + bias=True) + # 3x3 depth wise conv to provide positional encode information + pe_conv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=(3 - 1) // 2, + bias=True, + groups=feedforward_channels) + fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=in_channels, + kernel_size=1, + stride=1, + bias=True) + drop = nn.Dropout(ffn_drop) + layers = [fc1, pe_conv, self.activate, drop, fc2, drop] + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + + def forward(self, x, hw_shape, identity=None): + out = nlc_to_nchw(x, hw_shape) + out = self.layers(out) + out = nchw_to_nlc(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class EfficientMultiheadAttention(MultiheadAttention): + """An implementation of Efficient Multi-head Attention of Segformer. + + This module is modified from MultiheadAttention which is a module from + mmcv.cnn.bricks.transformer. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head + Attention of Segformer. Default: 1. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + init_cfg=None, + batch_first=True, + qkv_bias=False, + norm_cfg=dict(type='LN'), + sr_ratio=1): + super().__init__( + embed_dims, + num_heads, + attn_drop, + proj_drop, + dropout_layer=dropout_layer, + init_cfg=init_cfg, + batch_first=batch_first, + bias=qkv_bias) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa + from mmseg import digit_version, mmcv_version + if mmcv_version < digit_version('1.3.17'): + warnings.warn('The legacy version of forward function in' + 'EfficientMultiheadAttention is deprecated in' + 'mmcv>=1.3.17 and will no longer support in the' + 'future. Please upgrade your mmcv.') + self.forward = self.legacy_forward + + def forward(self, x, hw_shape, identity=None): + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + x_q = x_q.transpose(0, 1) + x_kv = x_kv.transpose(0, 1) + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + def legacy_forward(self, x, hw_shape, identity=None): + """multi head attention forward in mmcv version < 1.3.17.""" + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # `need_weights=True` will let nn.MultiHeadAttention + # `return attn_output, attn_output_weights.sum(dim=1) / num_heads` + # The `attn_output_weights.sum(dim=1)` may cause cuda error. So, we set + # `need_weights=False` to ignore `attn_output_weights.sum(dim=1)`. + # This issue - `https://github.com/pytorch/pytorch/issues/37583` report + # the error that large scale tensor sum operation may cause cuda error. + out = self.attn(query=x_q, key=x_kv, value=x_kv, need_weights=False)[0] + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Segformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + after the feed forward layer. Default 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.0. + qkv_bias (bool): enable bias for qkv if True. + Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + init_cfg (dict, optional): Initialization config dict. + Default:None. + sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head + Attention of Segformer. Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + sr_ratio=1, + with_cp=False): + super().__init__() + + # The ret[0] of build_norm_layer is norm name. + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = EfficientMultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + # The ret[0] of build_norm_layer is norm name. + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.ffn = MixFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + self.with_cp = with_cp + + def forward(self, x, hw_shape): + + def _inner_forward(x): + x = self.attn(self.norm1(x), hw_shape, identity=x) + x = self.ffn(self.norm2(x), hw_shape, identity=x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@MODELS.register_module() +class MixVisionTransformer(BaseModule): + """The backbone of Segformer. + + This backbone is the implementation of `SegFormer: Simple and + Efficient Design for Semantic Segmentation with + Transformers `_. + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 768. + num_stags (int): The num of stages. Default: 4. + num_layers (Sequence[int]): The layer number of each transformer encode + layer. Default: [3, 4, 6, 3]. + num_heads (Sequence[int]): The attention heads of each transformer + encode layer. Default: [1, 2, 4, 8]. + patch_sizes (Sequence[int]): The patch_size of each overlapped patch + embedding. Default: [7, 3, 3, 3]. + strides (Sequence[int]): The stride of each overlapped patch embedding. + Default: [4, 2, 2, 2]. + sr_ratios (Sequence[int]): The spatial reduction rate of each + transformer encode layer. Default: [8, 4, 2, 1]. + out_indices (Sequence[int] | int): Output from which stages. + Default: (0, 1, 2, 3). + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): stochastic depth rate. Default 0.0 + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels=3, + embed_dims=64, + num_stages=4, + num_layers=[3, 4, 6, 3], + num_heads=[1, 2, 4, 8], + patch_sizes=[7, 3, 3, 3], + strides=[4, 2, 2, 2], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratio=4, + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + pretrained=None, + init_cfg=None, + with_cp=False): + super().__init__(init_cfg=init_cfg) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.embed_dims = embed_dims + self.num_stages = num_stages + self.num_layers = num_layers + self.num_heads = num_heads + self.patch_sizes = patch_sizes + self.strides = strides + self.sr_ratios = sr_ratios + self.with_cp = with_cp + assert num_stages == len(num_layers) == len(num_heads) \ + == len(patch_sizes) == len(strides) == len(sr_ratios) + + self.out_indices = out_indices + assert max(out_indices) < self.num_stages + + # transformer encoder + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(num_layers)) + ] # stochastic num_layer decay rule + + cur = 0 + self.layers = ModuleList() + for i, num_layer in enumerate(num_layers): + embed_dims_i = embed_dims * num_heads[i] + patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims_i, + kernel_size=patch_sizes[i], + stride=strides[i], + padding=patch_sizes[i] // 2, + norm_cfg=norm_cfg) + layer = ModuleList([ + TransformerEncoderLayer( + embed_dims=embed_dims_i, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * embed_dims_i, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[cur + idx], + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + sr_ratio=sr_ratios[i]) for idx in range(num_layer) + ]) + in_channels = embed_dims_i + # The ret[0] of build_norm_layer is norm name. + norm = build_norm_layer(norm_cfg, embed_dims_i)[1] + self.layers.append(ModuleList([patch_embed, layer, norm])) + cur += num_layer + + def init_weights(self): + if self.init_cfg is None: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + else: + super().init_weights() + + def forward(self, x): + outs = [] + + for i, layer in enumerate(self.layers): + x, hw_shape = layer[0](x) + for block in layer[1]: + x = block(x, hw_shape) + x = layer[2](x) + x = nlc_to_nchw(x, hw_shape) + if i in self.out_indices: + outs.append(x) + + return outs diff --git a/mmseg/models/backbones/mobilenet_v2.py b/mmseg/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000000..1c21b5df97 --- /dev/null +++ b/mmseg/models/backbones/mobilenet_v2.py @@ -0,0 +1,197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.registry import MODELS +from ..utils import InvertedResidual, make_divisible + + +@MODELS.register_module() +class MobileNetV2(BaseModule): + """MobileNetV2 backbone. + + This backbone is the implementation of + `MobileNetV2: Inverted Residuals and Linear Bottlenecks + `_. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + strides (Sequence[int], optional): Strides of the first block of each + layer. If not specified, default config in ``arch_setting`` will + be used. + dilations (Sequence[int]): Dilation of each layer. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + # Parameters to build layers. 3 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks. + arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], + [6, 96, 3], [6, 160, 3], [6, 320, 1]] + + def __init__(self, + widen_factor=1., + strides=(1, 2, 2, 2, 1, 2, 1), + dilations=(1, 1, 1, 1, 1, 1, 1), + out_indices=(1, 2, 4, 6), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + self.widen_factor = widen_factor + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == len(self.arch_settings) + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 7): + raise ValueError('the item in out_indices must in ' + f'range(0, 7). But received {index}') + + if frozen_stages not in range(-1, 7): + raise ValueError('frozen_stages must be in range(-1, 7). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks = layer_cfg + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + def make_layer(self, out_channels, num_blocks, stride, dilation, + expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): Number of blocks. + stride (int): Stride of the first block. + dilation (int): Dilation of the first block. + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. + """ + layers = [] + for i in range(num_blocks): + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride if i == 0 else 1, + expand_ratio=expand_ratio, + dilation=dilation if i == 0 else 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmseg/models/backbones/mobilenet_v3.py b/mmseg/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000000..1efb6e0974 --- /dev/null +++ b/mmseg/models/backbones/mobilenet_v3.py @@ -0,0 +1,267 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import Conv2dAdaptivePadding +from mmengine.model import BaseModule +from mmengine.utils import is_tuple_of +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.registry import MODELS +from ..utils import InvertedResidualV3 as InvertedResidual + + +@MODELS.register_module() +class MobileNetV3(BaseModule): + """MobileNetV3 backbone. + + This backbone is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + arch (str): Architecture of mobilnetv3, from {'small', 'large'}. + Default: 'small'. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (tuple[int]): Output from which layer. + Default: (0, 1, 12). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4 + [3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8 + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16 + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16 + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32 + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2 + [3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4 + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8 + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16 + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16 + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32 + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 12), + frozen_stages=-1, + reduction_factor=1, + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + assert arch in self.arch_settings + assert isinstance(reduction_factor, int) and reduction_factor > 0 + assert is_tuple_of(out_indices, int) + for index in out_indices: + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch])+2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch])+2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.reduction_factor = reduction_factor + self.norm_eval = norm_eval + self.with_cp = with_cp + self.layers = self._make_layer() + + def _make_layer(self): + layers = [] + + # build the first layer (layer0) + in_channels = 16 + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + layer_setting = self.arch_settings[self.arch] + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + + if self.arch == 'large' and i >= 12 or self.arch == 'small' and \ + i >= 8: + mid_channels = mid_channels // self.reduction_factor + out_channels = out_channels // self.reduction_factor + + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + with_expand_conv=(in_channels != mid_channels), + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = f'layer{i + 1}' + self.add_module(layer_name, layer) + layers.append(layer_name) + + # build the last layer + # block5 layer12 os=32 for small model + # block6 layer16 os=32 for large model + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + dilation=4, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = f'layer{len(layer_setting) + 1}' + self.add_module(layer_name, layer) + layers.append(layer_name) + + # next, convert backbone MobileNetV3 to a semantic segmentation version + if self.arch == 'small': + self.layer4.depthwise_conv.conv.stride = (1, 1) + self.layer9.depthwise_conv.conv.stride = (1, 1) + for i in range(4, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 9: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + else: + self.layer7.depthwise_conv.conv.stride = (1, 1) + self.layer13.depthwise_conv.conv.stride = (1, 1) + for i in range(7, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 13: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return outs + + def _freeze_stages(self): + for i in range(self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmseg/models/backbones/resnest.py b/mmseg/models/backbones/resnest.py new file mode 100644 index 0000000000..3cc380b446 --- /dev/null +++ b/mmseg/models/backbones/resnest.py @@ -0,0 +1,318 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmseg.registry import MODELS +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d in ResNeSt. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels. Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + dcn (dict): Config dict for DCN. Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + super().__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.with_dcn = dcn is not None + self.dcn = dcn + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if self.with_dcn and not fallback_on_stride: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + conv_cfg = dcn + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + """nn.Module: the normalization layer named "norm0" """ + return getattr(self, self.norm0_name) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + batch = x.size(0) + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + inplane (int): Input planes of this block. + planes (int): Middle planes of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Key word arguments for base class. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + """Bottleneck block for ResNeSt.""" + super().__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.with_modulated_dcn = False + self.conv2 = SplitAttentionConv2d( + width, + width, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=self.dcn) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + This backbone is the implementation of `ResNeSt: + Split-Attention Networks `_. + + Args: + groups (int): Number of groups of Bottleneck. Default: 1 + base_width (int): Base width of Bottleneck. Default: 4 + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Keyword arguments for ResNet. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)) + } + + def __init__(self, + groups=1, + base_width=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.base_width = base_width + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super().__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/mmseg/models/backbones/resnet.py b/mmseg/models/backbones/resnet.py index 4e90c67778..9226c90d85 100644 --- a/mmseg/models/backbones/resnet.py +++ b/mmseg/models/backbones/resnet.py @@ -1,16 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + import torch.nn as nn import torch.utils.checkpoint as cp -from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer, - constant_init, kaiming_init) -from mmcv.runner import load_checkpoint -from mmcv.utils.parrots_wrapper import _BatchNorm +from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer +from mmengine.model import BaseModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmseg.utils import get_root_logger -from ..builder import BACKBONES +from mmseg.registry import MODELS from ..utils import ResLayer -class BasicBlock(nn.Module): +class BasicBlock(BaseModule): """Basic block for ResNet.""" expansion = 1 @@ -26,8 +27,9 @@ def __init__(self, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, - plugins=None): - super(BasicBlock, self).__init__() + plugins=None, + init_cfg=None): + super().__init__(init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' @@ -94,7 +96,7 @@ def _inner_forward(x): return out -class Bottleneck(nn.Module): +class Bottleneck(BaseModule): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is @@ -114,8 +116,9 @@ def __init__(self, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, - plugins=None): - super(Bottleneck, self).__init__() + plugins=None, + init_cfg=None): + super().__init__(init_cfg) assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert plugins is None or isinstance(plugins, list) @@ -304,45 +307,68 @@ def _inner_forward(x): return out -@BACKBONES.register_module() -class ResNet(nn.Module): +@MODELS.register_module() +class ResNet(BaseModule): """ResNet backbone. + This backbone is the improved implementation of `Deep Residual Learning + for Image Recognition `_. + Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default" 3. + in_channels (int): Number of input image channels. Default: 3. stem_channels (int): Number of stem channels. Default: 64. base_channels (int): Number of base channels of res layer. Default: 64. - num_stages (int): Resnet stages, normally 4. + num_stages (int): Resnet stages, normally 4. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. + Default: (1, 2, 2, 2). dilations (Sequence[int]): Dilation of each stage. + Default: (1, 1, 1, 1). out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + the first 1x1 conv layer. Default: 'pytorch'. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. + downsampling in the bottleneck. Default: False. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): Dictionary to construct and config conv layer. + When conv_cfg is None, cfg will be set to dict(type='Conv2d'). + Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. + and its variants only. Default: False. + dcn (dict | None): Dictionary to construct and config DCN conv layer. + When dcn is not None, conv_cfg must be None. Default: None. + stage_with_dcn (Sequence[bool]): Whether to set DCN conv for each + stage. The length of stage_with_dcn is equal to num_stages. + Default: (False, False, False, False). plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, - options: 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages' + + - cfg (dict, required): Cfg dict to build plugin. + + - position (str, required): Position inside block to insert plugin, + options: 'after_conv1', 'after_conv2', 'after_conv3'. + + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + Default: None. multi_grid (Sequence[int]|None): Multi grid dilation rates of last - stage. Default: None + stage. Default: None. contract_dilation (bool): Whether contract first dilation of each layer - Default: False + Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. + memory while slowing down the training speed. Default: False. zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. + in resblocks to let them behave as identity. Default: True. + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. Example: >>> from mmseg.models import ResNet @@ -389,10 +415,46 @@ def __init__(self, multi_grid=None, contract_dilation=False, with_cp=False, - zero_init_residual=True): - super(ResNet, self).__init__() + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg) if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') + + self.pretrained = pretrained + self.zero_init_residual = zero_init_residual + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + self.depth = depth self.stem_channels = stem_channels self.base_channels = base_channels @@ -418,7 +480,6 @@ def __init__(self, self.plugins = plugins self.multi_grid = multi_grid self.contract_dilation = contract_dilation - self.zero_init_residual = zero_init_residual self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels @@ -453,7 +514,8 @@ def __init__(self, dcn=dcn, plugins=stage_plugins, multi_grid=stage_multi_grid, - contract_dilation=contract_dilation) + contract_dilation=contract_dilation, + init_cfg=block_init_cfg) self.inplanes = planes * self.block.expansion layer_name = f'layer{i+1}' self.add_module(layer_name, res_layer) @@ -594,38 +656,6 @@ def _freeze_stages(self): for param in m.parameters(): param.requires_grad = False - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - if self.dcn is not None: - for m in self.modules(): - if isinstance(m, Bottleneck) and hasattr( - m, 'conv2_offset'): - constant_init(m.conv2_offset, 0) - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - else: - raise TypeError('pretrained must be a str or None') - def forward(self, x): """Forward function.""" if self.deep_stem: @@ -646,7 +676,7 @@ def forward(self, x): def train(self, mode=True): """Convert the model into training mode while keep normalization layer freezed.""" - super(ResNet, self).train(mode) + super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): @@ -655,35 +685,28 @@ def train(self, mode=True): m.eval() -@BACKBONES.register_module() +@MODELS.register_module() class ResNetV1c(ResNet): """ResNetV1c variant described in [1]_. - Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv - in the input stem with three 3x3 convs. - - References: - .. [1] https://arxiv.org/pdf/1812.01187.pdf + Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv in + the input stem with three 3x3 convs. For more details please refer to `Bag + of Tricks for Image Classification with Convolutional Neural Networks + `_. """ def __init__(self, **kwargs): - super(ResNetV1c, self).__init__( - deep_stem=True, avg_down=False, **kwargs) + super().__init__(deep_stem=True, avg_down=False, **kwargs) -@BACKBONES.register_module() +@MODELS.register_module() class ResNetV1d(ResNet): """ResNetV1d variant described in [1]_. - Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv - in the input stem with three 3x3 convs. And in the downsampling block, - a 2x2 avg_pool with stride 2 is added before conv, whose stride is - changed to 1. - - References: - .. [1] https://arxiv.org/pdf/1812.01187.pdf + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. """ def __init__(self, **kwargs): - super(ResNetV1d, self).__init__( - deep_stem=True, avg_down=True, **kwargs) + super().__init__(deep_stem=True, avg_down=True, **kwargs) diff --git a/mmseg/models/backbones/resnext.py b/mmseg/models/backbones/resnext.py index fa8149ce2f..67a244a12f 100644 --- a/mmseg/models/backbones/resnext.py +++ b/mmseg/models/backbones/resnext.py @@ -1,8 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer -from ..builder import BACKBONES +from mmseg.registry import MODELS from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet @@ -22,7 +23,7 @@ def __init__(self, base_width=4, base_channels=64, **kwargs): - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + super().__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes @@ -83,10 +84,14 @@ def __init__(self, self.add_module(self.norm3_name, norm3) -@BACKBONES.register_module() +@MODELS.register_module() class ResNeXt(ResNet): """ResNeXt backbone. + This backbone is the implementation of `Aggregated + Residual Transformations for Deep Neural + Networks `_. + Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Normally 3. @@ -134,7 +139,7 @@ class ResNeXt(ResNet): def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width - super(ResNeXt, self).__init__(**kwargs) + super().__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``""" diff --git a/mmseg/models/backbones/stdc.py b/mmseg/models/backbones/stdc.py new file mode 100644 index 0000000000..758a3c92e0 --- /dev/null +++ b/mmseg/models/backbones/stdc.py @@ -0,0 +1,422 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/MichaelFan01/STDC-Seg.""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmseg.registry import MODELS +from ..utils import resize +from .bisenetv1 import AttentionRefinementModule + + +class STDCModule(BaseModule): + """STDCModule. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels before scaling. + stride (int): The number of stride for the first conv layer. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): The activation config for conv layers. + num_convs (int): Numbers of conv layers. + fusion_type (str): Type of fusion operation. Default: 'add'. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + norm_cfg=None, + act_cfg=None, + num_convs=4, + fusion_type='add', + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert num_convs > 1 + assert fusion_type in ['add', 'cat'] + self.stride = stride + self.with_downsample = True if self.stride == 2 else False + self.fusion_type = fusion_type + + self.layers = ModuleList() + conv_0 = ConvModule( + in_channels, out_channels // 2, kernel_size=1, norm_cfg=norm_cfg) + + if self.with_downsample: + self.downsample = ConvModule( + out_channels // 2, + out_channels // 2, + kernel_size=3, + stride=2, + padding=1, + groups=out_channels // 2, + norm_cfg=norm_cfg, + act_cfg=None) + + if self.fusion_type == 'add': + self.layers.append(nn.Sequential(conv_0, self.downsample)) + self.skip = Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=1, + groups=in_channels, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None)) + else: + self.layers.append(conv_0) + self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + self.layers.append(conv_0) + + for i in range(1, num_convs): + out_factor = 2**(i + 1) if i != num_convs - 1 else 2**i + self.layers.append( + ConvModule( + out_channels // 2**i, + out_channels // out_factor, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + if self.fusion_type == 'add': + out = self.forward_add(inputs) + else: + out = self.forward_cat(inputs) + return out + + def forward_add(self, inputs): + layer_outputs = [] + x = inputs.clone() + for layer in self.layers: + x = layer(x) + layer_outputs.append(x) + if self.with_downsample: + inputs = self.skip(inputs) + + return torch.cat(layer_outputs, dim=1) + inputs + + def forward_cat(self, inputs): + x0 = self.layers[0](inputs) + layer_outputs = [x0] + for i, layer in enumerate(self.layers[1:]): + if i == 0: + if self.with_downsample: + x = layer(self.downsample(x0)) + else: + x = layer(x0) + else: + x = layer(x) + layer_outputs.append(x) + if self.with_downsample: + layer_outputs[0] = self.skip(x0) + return torch.cat(layer_outputs, dim=1) + + +class FeatureFusionModule(BaseModule): + """Feature Fusion Module. This module is different from FeatureFusionModule + in BiSeNetV1. It uses two ConvModules in `self.attention` whose inter + channel number is calculated by given `scale_factor`, while + FeatureFusionModule in BiSeNetV1 only uses one ConvModule in + `self.conv_atten`. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + scale_factor (int): The number of channel scale factor. + Default: 4. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): The activation config for conv layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + scale_factor=4, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + channels = out_channels // scale_factor + self.conv0 = ConvModule( + in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=act_cfg) + self.attention = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + out_channels, + channels, + 1, + norm_cfg=None, + bias=False, + act_cfg=act_cfg), + ConvModule( + channels, + out_channels, + 1, + norm_cfg=None, + bias=False, + act_cfg=None), nn.Sigmoid()) + + def forward(self, spatial_inputs, context_inputs): + inputs = torch.cat([spatial_inputs, context_inputs], dim=1) + x = self.conv0(inputs) + attn = self.attention(x) + x_attn = x * attn + return x_attn + x + + +@MODELS.register_module() +class STDCNet(BaseModule): + """This backbone is the implementation of `Rethinking BiSeNet For Real-time + Semantic Segmentation `_. + + Args: + stdc_type (int): The type of backbone structure, + `STDCNet1` and`STDCNet2` denotes two main backbones in paper, + whose FLOPs is 813M and 1446M, respectively. + in_channels (int): The num of input_channels. + channels (tuple[int]): The output channels for each stage. + bottleneck_type (str): The type of STDC Module type, the value must + be 'add' or 'cat'. + norm_cfg (dict): Config dict for normalization layer. + act_cfg (dict): The activation config for conv layers. + num_convs (int): Numbers of conv layer at each STDC Module. + Default: 4. + with_final_conv (bool): Whether add a conv layer at the Module output. + Default: True. + pretrained (str, optional): Model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> import torch + >>> stdc_type = 'STDCNet1' + >>> in_channels = 3 + >>> channels = (32, 64, 256, 512, 1024) + >>> bottleneck_type = 'cat' + >>> inputs = torch.rand(1, 3, 1024, 2048) + >>> self = STDCNet(stdc_type, in_channels, + ... channels, bottleneck_type).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 256, 128, 256]) + outputs[1].shape = torch.Size([1, 512, 64, 128]) + outputs[2].shape = torch.Size([1, 1024, 32, 64]) + """ + + arch_settings = { + 'STDCNet1': [(2, 1), (2, 1), (2, 1)], + 'STDCNet2': [(2, 1, 1, 1), (2, 1, 1, 1, 1), (2, 1, 1)] + } + + def __init__(self, + stdc_type, + in_channels, + channels, + bottleneck_type, + norm_cfg, + act_cfg, + num_convs=4, + with_final_conv=False, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert stdc_type in self.arch_settings, \ + f'invalid structure {stdc_type} for STDCNet.' + assert bottleneck_type in ['add', 'cat'],\ + f'bottleneck_type must be `add` or `cat`, got {bottleneck_type}' + + assert len(channels) == 5,\ + f'invalid channels length {len(channels)} for STDCNet.' + + self.in_channels = in_channels + self.channels = channels + self.stage_strides = self.arch_settings[stdc_type] + self.prtrained = pretrained + self.num_convs = num_convs + self.with_final_conv = with_final_conv + + self.stages = ModuleList([ + ConvModule( + self.in_channels, + self.channels[0], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + self.channels[0], + self.channels[1], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ]) + # `self.num_shallow_features` is the number of shallow modules in + # `STDCNet`, which is noted as `Stage1` and `Stage2` in original paper. + # They are both not used for following modules like Attention + # Refinement Module and Feature Fusion Module. + # Thus they would be cut from `outs`. Please refer to Figure 4 + # of original paper for more details. + self.num_shallow_features = len(self.stages) + + for strides in self.stage_strides: + idx = len(self.stages) - 1 + self.stages.append( + self._make_stage(self.channels[idx], self.channels[idx + 1], + strides, norm_cfg, act_cfg, bottleneck_type)) + # After appending, `self.stages` is a ModuleList including several + # shallow modules and STDCModules. + # (len(self.stages) == + # self.num_shallow_features + len(self.stage_strides)) + if self.with_final_conv: + self.final_conv = ConvModule( + self.channels[-1], + max(1024, self.channels[-1]), + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def _make_stage(self, in_channels, out_channels, strides, norm_cfg, + act_cfg, bottleneck_type): + layers = [] + for i, stride in enumerate(strides): + layers.append( + STDCModule( + in_channels if i == 0 else out_channels, + out_channels, + stride, + norm_cfg, + act_cfg, + num_convs=self.num_convs, + fusion_type=bottleneck_type)) + return Sequential(*layers) + + def forward(self, x): + outs = [] + for stage in self.stages: + x = stage(x) + outs.append(x) + if self.with_final_conv: + outs[-1] = self.final_conv(outs[-1]) + outs = outs[self.num_shallow_features:] + return tuple(outs) + + +@MODELS.register_module() +class STDCContextPathNet(BaseModule): + """STDCNet with Context Path. The `outs` below is a list of three feature + maps from deep to shallow, whose height and width is from small to big, + respectively. The biggest feature map of `outs` is outputted for + `STDCHead`, where Detail Loss would be calculated by Detail Ground-truth. + The other two feature maps are used for Attention Refinement Module, + respectively. Besides, the biggest feature map of `outs` and the last + output of Attention Refinement Module are concatenated for Feature Fusion + Module. Then, this fusion feature map `feat_fuse` would be outputted for + `decode_head`. More details please refer to Figure 4 of original paper. + + Args: + backbone_cfg (dict): Config dict for stdc backbone. + last_in_channels (tuple(int)), The number of channels of last + two feature maps from stdc backbone. Default: (1024, 512). + out_channels (int): The channels of output feature maps. + Default: 128. + ffm_cfg (dict): Config dict for Feature Fusion Module. Default: + `dict(in_channels=512, out_channels=256, scale_factor=4)`. + upsample_mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'``. + align_corners (str): align_corners argument of F.interpolate. It + must be `None` if upsample_mode is ``'nearest'``. Default: None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Return: + outputs (tuple): The tuple of list of output feature map for + auxiliary heads and decoder head. + """ + + def __init__(self, + backbone_cfg, + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict( + in_channels=512, out_channels=256, scale_factor=4), + upsample_mode='nearest', + align_corners=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.backbone = MODELS.build(backbone_cfg) + self.arms = ModuleList() + self.convs = ModuleList() + for channels in last_in_channels: + self.arms.append(AttentionRefinementModule(channels, out_channels)) + self.convs.append( + ConvModule( + out_channels, + out_channels, + 3, + padding=1, + norm_cfg=norm_cfg)) + self.conv_avg = ConvModule( + last_in_channels[0], out_channels, 1, norm_cfg=norm_cfg) + + self.ffm = FeatureFusionModule(**ffm_cfg) + + self.upsample_mode = upsample_mode + self.align_corners = align_corners + + def forward(self, x): + outs = list(self.backbone(x)) + avg = F.adaptive_avg_pool2d(outs[-1], 1) + avg_feat = self.conv_avg(avg) + + feature_up = resize( + avg_feat, + size=outs[-1].shape[2:], + mode=self.upsample_mode, + align_corners=self.align_corners) + arms_out = [] + for i in range(len(self.arms)): + x_arm = self.arms[i](outs[len(outs) - 1 - i]) + feature_up + feature_up = resize( + x_arm, + size=outs[len(outs) - 1 - i - 1].shape[2:], + mode=self.upsample_mode, + align_corners=self.align_corners) + feature_up = self.convs[i](feature_up) + arms_out.append(feature_up) + + feat_fuse = self.ffm(outs[0], arms_out[1]) + + # The `outputs` has four feature maps. + # `outs[0]` is outputted for `STDCHead` auxiliary head. + # Two feature maps of `arms_out` are outputted for auxiliary head. + # `feat_fuse` is outputted for decoder head. + outputs = [outs[0]] + list(arms_out) + [feat_fuse] + return tuple(outputs) diff --git a/mmseg/models/backbones/swin.py b/mmseg/models/backbones/swin.py new file mode 100644 index 0000000000..c0ace3c139 --- /dev/null +++ b/mmseg/models/backbones/swin.py @@ -0,0 +1,755 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, build_dropout +from mmengine.logging import print_log +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import (constant_init, trunc_normal_, + trunc_normal_init) +from mmengine.runner import CheckpointLoader +from mmengine.utils import to_2tuple + +from mmseg.registry import MODELS +from ..utils.embed import PatchEmbed, PatchMerging + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(BaseModule): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate, + init_cfg=None) + + self.drop = build_dropout(dropout_layer) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(BaseModule): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + self.with_cp = with_cp + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + init_cfg=None) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True, + init_cfg=None) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (BaseModule | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +@MODELS.register_module() +class SwinTransformer(BaseModule): + """Swin Transformer backbone. + + This backbone is the implementation of `Swin Transformer: + Hierarchical Vision Transformer using Shifted + Windows `_. + Inspiration from https://github.com/microsoft/Swin-Transformer. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int | float): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LN'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + pretrained=None, + frozen_stages=-1, + init_cfg=None): + self.frozen_stages = frozen_stages + + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + init_cfg = init_cfg + else: + raise TypeError('pretrained must be a str or None') + + super().__init__(init_cfg=init_cfg) + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + padding='corner', + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=int(mlp_ratio * in_channels), + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = build_norm_layer(norm_cfg, self.num_features[i])[1] + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super().train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + if self.init_cfg is None: + print_log(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.) + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + ckpt = CheckpointLoader.load_checkpoint( + self.init_cfg['checkpoint'], logger=None, map_location='cpu') + if 'state_dict' in ckpt: + _state_dict = ckpt['state_dict'] + elif 'model' in ckpt: + _state_dict = ckpt['model'] + else: + _state_dict = ckpt + + state_dict = OrderedDict() + for k, v in _state_dict.items(): + if k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + print_log('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + print_log(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + self.load_state_dict(state_dict, strict=False) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return outs diff --git a/mmseg/models/backbones/timm_backbone.py b/mmseg/models/backbones/timm_backbone.py new file mode 100644 index 0000000000..1eef302bdd --- /dev/null +++ b/mmseg/models/backbones/timm_backbone.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +try: + import timm +except ImportError: + timm = None + +from mmengine.model import BaseModule +from mmengine.registry import MODELS as MMENGINE_MODELS + +from mmseg.registry import MODELS + + +@MODELS.register_module() +class TIMMBackbone(BaseModule): + """Wrapper to use backbones from timm library. More details can be found in + `timm `_ . + + Args: + model_name (str): Name of timm model to instantiate. + pretrained (bool): Load pretrained weights if True. + checkpoint_path (str): Path of checkpoint to load after + model is initialized. + in_channels (int): Number of input image channels. Default: 3. + init_cfg (dict, optional): Initialization config dict + **kwargs: Other timm & model specific arguments. + """ + + def __init__( + self, + model_name, + features_only=True, + pretrained=True, + checkpoint_path='', + in_channels=3, + init_cfg=None, + **kwargs, + ): + if timm is None: + raise RuntimeError('timm is not installed') + super().__init__(init_cfg) + if 'norm_layer' in kwargs: + kwargs['norm_layer'] = MMENGINE_MODELS.get(kwargs['norm_layer']) + self.timm_model = timm.create_model( + model_name=model_name, + features_only=features_only, + pretrained=pretrained, + in_chans=in_channels, + checkpoint_path=checkpoint_path, + **kwargs, + ) + + # Make unused parameters None + self.timm_model.global_pool = None + self.timm_model.fc = None + self.timm_model.classifier = None + + # Hack to use pretrained weights from timm + if pretrained or checkpoint_path: + self._is_init = True + + def forward(self, x): + features = self.timm_model(x) + return features diff --git a/mmseg/models/backbones/twins.py b/mmseg/models/backbones/twins.py new file mode 100644 index 0000000000..b6a6eea795 --- /dev/null +++ b/mmseg/models/backbones/twins.py @@ -0,0 +1,588 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import (constant_init, normal_init, + trunc_normal_init) +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.models.backbones.mit import EfficientMultiheadAttention +from mmseg.registry import MODELS +from ..utils.embed import PatchEmbed + + +class GlobalSubsampledAttention(EfficientMultiheadAttention): + """Global Sub-sampled Attention (Spatial Reduction Attention) + + This module is modified from EfficientMultiheadAttention, + which is a module from mmseg.models.backbones.mit.py. + Specifically, there is no difference between + `GlobalSubsampledAttention` and `EfficientMultiheadAttention`, + `GlobalSubsampledAttention` is built as a brand new class + because it is renamed as `Global sub-sampled attention (GSA)` + in paper. + + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dims) + or (n, batch, embed_dims). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default: True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of GSA of PCPVT. + Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + batch_first=True, + qkv_bias=True, + norm_cfg=dict(type='LN'), + sr_ratio=1, + init_cfg=None): + super().__init__( + embed_dims, + num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + dropout_layer=dropout_layer, + batch_first=batch_first, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio, + init_cfg=init_cfg) + + +class GSAEncoderLayer(BaseModule): + """Implements one encoder layer with GSA. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (float): Kernel_size of conv in Attention modules. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = GlobalSubsampledAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape, identity=0.)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class LocallyGroupedSelfAttention(BaseModule): + """Locally-grouped Self Attention (LSA) module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + window_size(int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + window_size=1, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + assert embed_dims % num_heads == 0, f'dim {embed_dims} should be ' \ + f'divided by num_heads ' \ + f'{num_heads}.' + self.embed_dims = embed_dims + self.num_heads = num_heads + head_dim = embed_dims // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + self.window_size = window_size + + def forward(self, x, hw_shape): + b, n, c = x.shape + h, w = hw_shape + x = x.view(b, h, w, c) + + # pad feature maps to multiples of Local-groups + pad_l = pad_t = 0 + pad_r = (self.window_size - w % self.window_size) % self.window_size + pad_b = (self.window_size - h % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + + # calculate attention mask for LSA + Hp, Wp = x.shape[1:-1] + _h, _w = Hp // self.window_size, Wp // self.window_size + mask = torch.zeros((1, Hp, Wp), device=x.device) + mask[:, -pad_b:, :].fill_(1) + mask[:, :, -pad_r:].fill_(1) + + # [B, _h, _w, window_size, window_size, C] + x = x.reshape(b, _h, self.window_size, _w, self.window_size, + c).transpose(2, 3) + mask = mask.reshape(1, _h, self.window_size, _w, + self.window_size).transpose(2, 3).reshape( + 1, _h * _w, + self.window_size * self.window_size) + # [1, _h*_w, window_size*window_size, window_size*window_size] + attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-1000.0)).masked_fill( + attn_mask == 0, float(0.0)) + + # [3, B, _w*_h, nhead, window_size*window_size, dim] + qkv = self.qkv(x).reshape(b, _h * _w, + self.window_size * self.window_size, 3, + self.num_heads, c // self.num_heads).permute( + 3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + # [B, _h*_w, n_head, window_size*window_size, window_size*window_size] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + attn_mask.unsqueeze(2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(b, _h, _w, self.window_size, + self.window_size, c) + x = attn.transpose(2, 3).reshape(b, _h * self.window_size, + _w * self.window_size, c) + if pad_r > 0 or pad_b > 0: + x = x[:, :h, :w, :].contiguous() + + x = x.reshape(b, n, c) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LSAEncoderLayer(BaseModule): + """Implements one encoder layer in Twins-SVT. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=1, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = LocallyGroupedSelfAttention(embed_dims, num_heads, + qkv_bias, qk_scale, + attn_drop_rate, drop_rate, + window_size) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class ConditionalPositionEncoding(BaseModule): + """The Conditional Position Encoding (CPE) module. + + The CPE is the implementation of 'Conditional Positional Encodings + for Vision Transformers '_. + + Args: + in_channels (int): Number of input channels. + embed_dims (int): The feature dimension. Default: 768. + stride (int): Stride of conv layer. Default: 1. + """ + + def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.proj = nn.Conv2d( + in_channels, + embed_dims, + kernel_size=3, + stride=stride, + padding=1, + bias=True, + groups=embed_dims) + self.stride = stride + + def forward(self, x, hw_shape): + b, n, c = x.shape + h, w = hw_shape + feat_token = x + cnn_feat = feat_token.transpose(1, 2).view(b, c, h, w) + if self.stride == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose(1, 2) + return x + + +@MODELS.register_module() +class PCPVT(BaseModule): + """The backbone of Twins-PCPVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512]. + patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2]. + strides (list): The strides. Default: [4, 2, 2, 2]. + num_heads (int): Number of attention heads. Default: [1, 2, 4, 8]. + mlp_ratios (int): Ratio of mlp hidden dim to embedding dim. + Default: [4, 4, 4, 4]. + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Probability of an element to be zeroed. + Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0 + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + depths (list): Depths of each stage. Default [3, 4, 6, 3] + sr_ratios (list): Kernel_size of conv in each Attn module in + Transformer encoder layer. Default: [8, 4, 2, 1]. + norm_after_stage(bool): Add extra norm. Default False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels=3, + embed_dims=[64, 128, 256, 512], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + self.depths = depths + + # patch_embed + self.patch_embeds = ModuleList() + self.position_encoding_drops = ModuleList() + self.layers = ModuleList() + + for i in range(len(depths)): + self.patch_embeds.append( + PatchEmbed( + in_channels=in_channels if i == 0 else embed_dims[i - 1], + embed_dims=embed_dims[i], + conv_type='Conv2d', + kernel_size=patch_sizes[i], + stride=strides[i], + padding='corner', + norm_cfg=norm_cfg)) + + self.position_encoding_drops.append(nn.Dropout(p=drop_rate)) + + self.position_encodings = ModuleList([ + ConditionalPositionEncoding(embed_dim, embed_dim) + for embed_dim in embed_dims + ]) + + # transformer encoder + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + + for k in range(len(depths)): + _block = ModuleList([ + GSAEncoderLayer( + embed_dims=embed_dims[k], + num_heads=num_heads[k], + feedforward_channels=mlp_ratios[k] * embed_dims[k], + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[cur + i], + num_fcs=2, + qkv_bias=qkv_bias, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=sr_ratios[k]) for i in range(depths[k]) + ]) + self.layers.append(_block) + cur += depths[k] + + self.norm_name, norm = build_norm_layer( + norm_cfg, embed_dims[-1], postfix=1) + + self.out_indices = out_indices + self.norm_after_stage = norm_after_stage + if self.norm_after_stage: + self.norm_list = ModuleList() + for dim in embed_dims: + self.norm_list.append(build_norm_layer(norm_cfg, dim)[1]) + + def init_weights(self): + if self.init_cfg is not None: + super().init_weights() + else: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + + def forward(self, x): + outputs = list() + + b = x.shape[0] + + for i in range(len(self.depths)): + x, hw_shape = self.patch_embeds[i](x) + h, w = hw_shape + x = self.position_encoding_drops[i](x) + for j, blk in enumerate(self.layers[i]): + x = blk(x, hw_shape) + if j == 0: + x = self.position_encodings[i](x, hw_shape) + if self.norm_after_stage: + x = self.norm_list[i](x) + x = x.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous() + + if i in self.out_indices: + outputs.append(x) + + return tuple(outputs) + + +@MODELS.register_module() +class SVT(PCPVT): + """The backbone of Twins-SVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512]. + patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2]. + strides (list): The strides. Default: [4, 2, 2, 2]. + num_heads (int): Number of attention heads. Default: [1, 2, 4]. + mlp_ratios (int): Ratio of mlp hidden dim to embedding dim. + Default: [4, 4, 4]. + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Dropout rate. Default 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.2. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + depths (list): Depths of each stage. Default [4, 4, 4]. + sr_ratios (list): Kernel_size of conv in each Attn module in + Transformer encoder layer. Default: [4, 2, 1]. + windiow_sizes (list): Window size of LSA. Default: [7, 7, 7], + input_features_slice(bool): Input features need slice. Default: False. + norm_after_stage(bool): Add extra norm. Default False. + strides (list): Strides in patch-Embedding modules. Default: (2, 2, 2) + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels=3, + embed_dims=[64, 128, 256], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + norm_cfg=dict(type='LN'), + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + windiow_sizes=[7, 7, 7], + norm_after_stage=True, + pretrained=None, + init_cfg=None): + super().__init__(in_channels, embed_dims, patch_sizes, strides, + num_heads, mlp_ratios, out_indices, qkv_bias, + drop_rate, attn_drop_rate, drop_path_rate, norm_cfg, + depths, sr_ratios, norm_after_stage, pretrained, + init_cfg) + # transformer encoder + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + for k in range(len(depths)): + for i in range(depths[k]): + if i % 2 == 0: + self.layers[k][i] = \ + LSAEncoderLayer( + embed_dims=embed_dims[k], + num_heads=num_heads[k], + feedforward_channels=mlp_ratios[k] * embed_dims[k], + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:k])+i], + qkv_bias=qkv_bias, + window_size=windiow_sizes[k]) diff --git a/mmseg/models/backbones/unet.py b/mmseg/models/backbones/unet.py new file mode 100644 index 0000000000..545921db8e --- /dev/null +++ b/mmseg/models/backbones/unet.py @@ -0,0 +1,436 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer +from mmengine.model import BaseModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmseg.registry import MODELS +from ..utils import UpConvBlock, Upsample + + +class BasicConvBlock(nn.Module): + """Basic convolutional block for UNet. + + This module consists of several plain convolutional layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers. Default: 2. + stride (int): Whether use stride convolution to downsample + the input feature map. If stride=2, it only uses stride convolution + in the first convolutional layer to downsample the input feature + map. Options are 1 or 2. Default: 1. + dilation (int): Whether use dilated convolution to expand the + receptive field. Set dilation rate of each convolutional layer and + the dilation rate of the first convolutional layer is always 1. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dcn=None, + plugins=None): + super().__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.with_cp = with_cp + convs = [] + for i in range(num_convs): + convs.append( + ConvModule( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride if i == 0 else 1, + dilation=1 if i == 0 else dilation, + padding=1 if i == 0 else dilation, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.convs = nn.Sequential(*convs) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.convs, x) + else: + out = self.convs(x) + return out + + +@MODELS.register_module() +class DeconvModule(nn.Module): + """Deconvolution upsample module in decoder for UNet (2X upsample). + + This module uses deconvolution to upsample feature map in the decoder + of UNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of the convolutional layer. Default: 4. + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + kernel_size=4, + scale_factor=2): + super().__init__() + + assert (kernel_size - scale_factor >= 0) and\ + (kernel_size - scale_factor) % 2 == 0,\ + f'kernel_size should be greater than or equal to scale_factor '\ + f'and (kernel_size - scale_factor) should be even numbers, '\ + f'while the kernel size is {kernel_size} and scale_factor is '\ + f'{scale_factor}.' + + stride = scale_factor + padding = (kernel_size - scale_factor) // 2 + self.with_cp = with_cp + deconv = nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding) + + norm_name, norm = build_norm_layer(norm_cfg, out_channels) + activate = build_activation_layer(act_cfg) + self.deconv_upsamping = nn.Sequential(deconv, norm, activate) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.deconv_upsamping, x) + else: + out = self.deconv_upsamping(x) + return out + + +@MODELS.register_module() +class InterpConv(nn.Module): + """Interpolation upsample module in decoder for UNet. + + This module uses interpolation to upsample feature map in the decoder + of UNet. It consists of one interpolation upsample layer and one + convolutional layer. It can be one interpolation upsample layer followed + by one convolutional layer (conv_first=False) or one convolutional layer + followed by one interpolation upsample layer (conv_first=True). + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + conv_first (bool): Whether convolutional layer or interpolation + upsample layer first. Default: False. It means interpolation + upsample layer followed by one convolutional layer. + kernel_size (int): Kernel size of the convolutional layer. Default: 1. + stride (int): Stride of the convolutional layer. Default: 1. + padding (int): Padding of the convolutional layer. Default: 1. + upsample_cfg (dict): Interpolation config of the upsample layer. + Default: dict( + scale_factor=2, mode='bilinear', align_corners=False). + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + conv_cfg=None, + conv_first=False, + kernel_size=1, + stride=1, + padding=0, + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False)): + super().__init__() + + self.with_cp = with_cp + conv = ConvModule( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + upsample = Upsample(**upsample_cfg) + if conv_first: + self.interp_upsample = nn.Sequential(conv, upsample) + else: + self.interp_upsample = nn.Sequential(upsample, conv) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.interp_upsample, x) + else: + out = self.interp_upsample(x) + return out + + +@MODELS.register_module() +class UNet(BaseModule): + """UNet backbone. + + This backbone is the implementation of `U-Net: Convolutional Networks + for Biomedical Image Segmentation `_. + + Args: + in_channels (int): Number of input image channels. Default" 3. + base_channels (int): Number of base channels of each stage. + The output channels of the first stage. Default: 64. + num_stages (int): Number of stages in encoder, normally 5. Default: 5. + strides (Sequence[int 1 | 2]): Strides of each stage in encoder. + len(strides) is equal to num_stages. Normally the stride of the + first stage in encoder is 1. If strides[i]=2, it uses stride + convolution to downsample in the correspondence encoder stage. + Default: (1, 1, 1, 1, 1). + enc_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence encoder stage. + Default: (2, 2, 2, 2, 2). + dec_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence decoder stage. + Default: (2, 2, 2, 2). + downsamples (Sequence[int]): Whether use MaxPool to downsample the + feature map after the first stage of encoder + (stages: [1, num_stages)). If the correspondence encoder stage use + stride convolution (strides[i]=2), it will never use MaxPool to + downsample, even downsamples[i-1]=True. + Default: (True, True, True, True). + enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. + Default: (1, 1, 1, 1, 1). + dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. + Default: (1, 1, 1, 1). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Notice: + The input image size should be divisible by the whole downsample rate + of the encoder. More detail of the whole downsample rate can be found + in UNet._check_input_divisible. + """ + + def __init__(self, + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False, + dcn=None, + plugins=None, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert len(strides) == num_stages, \ + 'The length of strides should be equal to num_stages, '\ + f'while the strides is {strides}, the length of '\ + f'strides is {len(strides)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_num_convs) == num_stages, \ + 'The length of enc_num_convs should be equal to num_stages, '\ + f'while the enc_num_convs is {enc_num_convs}, the length of '\ + f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_num_convs) == (num_stages-1), \ + 'The length of dec_num_convs should be equal to (num_stages-1), '\ + f'while the dec_num_convs is {dec_num_convs}, the length of '\ + f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(downsamples) == (num_stages-1), \ + 'The length of downsamples should be equal to (num_stages-1), '\ + f'while the downsamples is {downsamples}, the length of '\ + f'downsamples is {len(downsamples)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_dilations) == num_stages, \ + 'The length of enc_dilations should be equal to num_stages, '\ + f'while the enc_dilations is {enc_dilations}, the length of '\ + f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_dilations) == (num_stages-1), \ + 'The length of dec_dilations should be equal to (num_stages-1), '\ + f'while the dec_dilations is {dec_dilations}, the length of '\ + f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ + f'{num_stages}.' + self.num_stages = num_stages + self.strides = strides + self.downsamples = downsamples + self.norm_eval = norm_eval + self.base_channels = base_channels + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + for i in range(num_stages): + enc_conv_block = [] + if i != 0: + if strides[i] == 1 and downsamples[i - 1]: + enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) + upsample = (strides[i] != 1 or downsamples[i - 1]) + self.decoder.append( + UpConvBlock( + conv_block=BasicConvBlock, + in_channels=base_channels * 2**i, + skip_channels=base_channels * 2**(i - 1), + out_channels=base_channels * 2**(i - 1), + num_convs=dec_num_convs[i - 1], + stride=1, + dilation=dec_dilations[i - 1], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + upsample_cfg=upsample_cfg if upsample else None, + dcn=None, + plugins=None)) + + enc_conv_block.append( + BasicConvBlock( + in_channels=in_channels, + out_channels=base_channels * 2**i, + num_convs=enc_num_convs[i], + stride=strides[i], + dilation=enc_dilations[i], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None)) + self.encoder.append(nn.Sequential(*enc_conv_block)) + in_channels = base_channels * 2**i + + def forward(self, x): + self._check_input_divisible(x) + enc_outs = [] + for enc in self.encoder: + x = enc(x) + enc_outs.append(x) + dec_outs = [x] + for i in reversed(range(len(self.decoder))): + x = self.decoder[i](enc_outs[i], x) + dec_outs.append(x) + + return dec_outs + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _check_input_divisible(self, x): + h, w = x.shape[-2:] + whole_downsample_rate = 1 + for i in range(1, self.num_stages): + if self.strides[i] == 2 or self.downsamples[i - 1]: + whole_downsample_rate *= 2 + assert (h % whole_downsample_rate == 0) \ + and (w % whole_downsample_rate == 0),\ + f'The input image size {(h, w)} should be divisible by the whole '\ + f'downsample rate {whole_downsample_rate}, when num_stages is '\ + f'{self.num_stages}, strides is {self.strides}, and downsamples '\ + f'is {self.downsamples}.' diff --git a/mmseg/models/backbones/vit.py b/mmseg/models/backbones/vit.py new file mode 100644 index 0000000000..3c96f65493 --- /dev/null +++ b/mmseg/models/backbones/vit.py @@ -0,0 +1,438 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmengine.logging import print_log +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import (constant_init, kaiming_init, + trunc_normal_) +from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.utils import _pair as to_2tuple + +from mmseg.registry import MODELS +from ..utils import PatchEmbed, resize + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + attn_cfg=dict(), + ffn_cfg=dict(), + with_cp=False): + super().__init__() + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + attn_cfg.update( + dict( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + batch_first=batch_first, + bias=qkv_bias)) + + self.build_attn(attn_cfg) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + ffn_cfg.update( + dict( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate) + if drop_path_rate > 0 else None, + act_cfg=act_cfg)) + self.build_ffn(ffn_cfg) + self.with_cp = with_cp + + def build_attn(self, attn_cfg): + self.attn = MultiheadAttention(**attn_cfg) + + def build_ffn(self, ffn_cfg): + self.ffn = FFN(**ffn_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + x = self.attn(self.norm1(x), identity=x) + x = self.ffn(self.norm2(x), identity=x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@MODELS.register_module() +class VisionTransformer(BaseModule): + """Vision Transformer. + + This backbone is the implementation of `An Image is Worth 16x16 Words: + Transformers for Image Recognition at + Scale `_. + + Args: + img_size (int | tuple): Input image size. Default: 224. + patch_size (int): The patch size. Default: 16. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): embedding dimension. Default: 768. + num_layers (int): depth of transformer. Default: 12. + num_heads (int): number of attention heads. Default: 12. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + qkv_bias (bool): enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): stochastic depth rate. Default 0.0 + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Default: True. + output_cls_token (bool): Whether output the cls_token. If set True, + `with_cls_token` must be True. Default: False. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + patch_norm (bool): Whether to add a norm in PatchEmbed Block. + Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Default: bicubic. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=-1, + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + with_cls_token=True, + output_cls_token=False, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_norm=False, + final_norm=False, + interpolate_mode='bicubic', + num_fcs=2, + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + if output_cls_token: + assert with_cls_token is True, f'with_cls_token must be True if' \ + f'set output_cls_token to True, but got {with_cls_token}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.img_size = img_size + self.patch_size = patch_size + self.interpolate_mode = interpolate_mode + self.norm_eval = norm_eval + self.with_cp = with_cp + self.pretrained = pretrained + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + padding='corner', + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None, + ) + + num_patches = (img_size[0] // patch_size) * \ + (img_size[1] // patch_size) + + self.with_cls_token = with_cls_token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + 1, embed_dims)) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + if out_indices == -1: + out_indices = num_layers - 1 + self.out_indices = [out_indices] + elif isinstance(out_indices, list) or isinstance(out_indices, tuple): + self.out_indices = out_indices + else: + raise TypeError('out_indices must be type of int, list or tuple') + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, num_layers) + ] # stochastic depth decay rule + + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append( + TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + batch_first=True)) + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def init_weights(self): + if (isinstance(self.init_cfg, dict) + and self.init_cfg.get('type') == 'Pretrained'): + checkpoint = CheckpointLoader.load_checkpoint( + self.init_cfg['checkpoint'], logger=None, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + if 'pos_embed' in state_dict.keys(): + if self.pos_embed.shape != state_dict['pos_embed'].shape: + print_log(msg=f'Resize the pos_embed shape from ' + f'{state_dict["pos_embed"].shape} to ' + f'{self.pos_embed.shape}') + h, w = self.img_size + pos_size = int( + math.sqrt(state_dict['pos_embed'].shape[1] - 1)) + state_dict['pos_embed'] = self.resize_pos_embed( + state_dict['pos_embed'], + (h // self.patch_size, w // self.patch_size), + (pos_size, pos_size), self.interpolate_mode) + + load_state_dict(self, state_dict, strict=False, logger=None) + elif self.init_cfg is not None: + super().init_weights() + else: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'ffn' in n: + nn.init.normal_(m.bias, mean=0., std=1e-6) + else: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + + def _pos_embeding(self, patched_img, hw_shape, pos_embed): + """Positioning embeding method. + + Resize the pos_embed, if the input image size doesn't match + the training size. + Args: + patched_img (torch.Tensor): The patched image, it should be + shape of [B, L1, C]. + hw_shape (tuple): The downsampled image resolution. + pos_embed (torch.Tensor): The pos_embed weighs, it should be + shape of [B, L2, c]. + Return: + torch.Tensor: The pos encoded image feature. + """ + assert patched_img.ndim == 3 and pos_embed.ndim == 3, \ + 'the shapes of patched_img and pos_embed must be [B, L, C]' + x_len, pos_len = patched_img.shape[1], pos_embed.shape[1] + if x_len != pos_len: + if pos_len == (self.img_size[0] // self.patch_size) * ( + self.img_size[1] // self.patch_size) + 1: + pos_h = self.img_size[0] // self.patch_size + pos_w = self.img_size[1] // self.patch_size + else: + raise ValueError( + 'Unexpected shape of pos_embed, got {}.'.format( + pos_embed.shape)) + pos_embed = self.resize_pos_embed(pos_embed, hw_shape, + (pos_h, pos_w), + self.interpolate_mode) + return self.drop_after_pos(patched_img + pos_embed) + + @staticmethod + def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode): + """Resize pos_embed weights. + + Resize pos_embed using bicubic interpolate method. + Args: + pos_embed (torch.Tensor): Position embedding weights. + input_shpae (tuple): Tuple for (downsampled input image height, + downsampled input image width). + pos_shape (tuple): The resolution of downsampled origin training + image. + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'`` + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C] + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + pos_h, pos_w = pos_shape + cls_token_weight = pos_embed[:, 0] + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2) + pos_embed_weight = resize( + pos_embed_weight, size=input_shpae, align_corners=False, mode=mode) + cls_token_weight = cls_token_weight.unsqueeze(1) + pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2) + pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1) + return pos_embed + + def forward(self, inputs): + B = inputs.shape[0] + + x, hw_shape = self.patch_embed(inputs) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = self._pos_embeding(x, hw_shape, self.pos_embed) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 1:] + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + if self.final_norm: + x = self.norm1(x) + if i in self.out_indices: + if self.with_cls_token: + # Remove class token and reshape token for decoder head + out = x[:, 1:] + else: + out = x + B, _, C = out.shape + out = out.reshape(B, hw_shape[0], hw_shape[1], + C).permute(0, 3, 1, 2).contiguous() + if self.output_cls_token: + out = [out, x[:, 0]] + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + super().train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.LayerNorm): + m.eval() diff --git a/mmseg/models/builder.py b/mmseg/models/builder.py index f4b84dd60f..081c646b49 100644 --- a/mmseg/models/builder.py +++ b/mmseg/models/builder.py @@ -1,56 +1,52 @@ -from mmcv.utils import Registry, build_from_cfg -from torch import nn +# Copyright (c) OpenMMLab. All rights reserved. +import warnings -BACKBONES = Registry('backbone') -NECKS = Registry('neck') -HEADS = Registry('head') -LOSSES = Registry('loss') -SEGMENTORS = Registry('segmentor') +from mmseg.registry import MODELS - -def build(cfg, registry, default_args=None): - """Build a module. - - Args: - cfg (dict, list[dict]): The config of modules, is is either a dict - or a list of configs. - registry (:obj:`Registry`): A registry the module belongs to. - default_args (dict, optional): Default arguments to build the module. - Defaults to None. - - Returns: - nn.Module: A built nn module. - """ - - if isinstance(cfg, list): - modules = [ - build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg - ] - return nn.Sequential(*modules) - else: - return build_from_cfg(cfg, registry, default_args) +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" - return build(cfg, BACKBONES) + warnings.warn('``build_backbone`` would be deprecated soon, please use ' + '``mmseg.registry.MODELS.build()`` ') + return BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" - return build(cfg, NECKS) + warnings.warn('``build_neck`` would be deprecated soon, please use ' + '``mmseg.registry.MODELS.build()`` ') + return NECKS.build(cfg) def build_head(cfg): """Build head.""" - return build(cfg, HEADS) + warnings.warn('``build_head`` would be deprecated soon, please use ' + '``mmseg.registry.MODELS.build()`` ') + return HEADS.build(cfg) def build_loss(cfg): """Build loss.""" - return build(cfg, LOSSES) + warnings.warn('``build_loss`` would be deprecated soon, please use ' + '``mmseg.registry.MODELS.build()`` ') + return LOSSES.build(cfg) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" - return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return SEGMENTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/mmseg/models/data_preprocessor.py b/mmseg/models/data_preprocessor.py new file mode 100644 index 0000000000..deef365a9e --- /dev/null +++ b/mmseg/models/data_preprocessor.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from numbers import Number +from typing import Any, Dict, List, Optional, Sequence + +import torch +from mmengine.model import BaseDataPreprocessor + +from mmseg.registry import MODELS +from mmseg.utils import stack_batch + + +@MODELS.register_module() +class SegDataPreProcessor(BaseDataPreprocessor): + """Image pre-processor for segmentation tasks. + + Comparing with the :class:`mmengine.ImgDataPreprocessor`, + + 1. It won't do normalization if ``mean`` is not specified. + 2. It does normalization and color space conversion after stacking batch. + 3. It supports batch augmentations like mixup and cutmix. + + + It provides the data pre-processing as follows + + - Collate and move data to the target device. + - Pad inputs to the input size with defined ``pad_val``, and pad seg map + with defined ``seg_pad_val``. + - Stack inputs to batch_inputs. + - Convert inputs from bgr to rgb if the shape of input is (3, H, W). + - Normalize image with defined std and mean. + - Do batch augmentations like Mixup and Cutmix during training. + + Args: + mean (Sequence[Number], optional): The pixel mean of R, G, B channels. + Defaults to None. + std (Sequence[Number], optional): The pixel standard deviation of + R, G, B channels. Defaults to None. + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + padding_mode (str): Type of padding. Default: constant. + - constant: pads with a constant value, this value is specified + with pad_val. + bgr_to_rgb (bool): whether to convert image from BGR to RGB. + Defaults to False. + rgb_to_bgr (bool): whether to convert image from RGB to RGB. + Defaults to False. + batch_augments (list[dict], optional): Batch-level augmentations + test_cfg (dict, optional): The padding size config in testing, if not + specify, will use `size` and `size_divisor` params as default. + Defaults to None, only supports keys `size` or `size_divisor`. + """ + + def __init__( + self, + mean: Sequence[Number] = None, + std: Sequence[Number] = None, + size: Optional[tuple] = None, + size_divisor: Optional[int] = None, + pad_val: Number = 0, + seg_pad_val: Number = 255, + bgr_to_rgb: bool = False, + rgb_to_bgr: bool = False, + batch_augments: Optional[List[dict]] = None, + test_cfg: dict = None, + ): + super().__init__() + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + + assert not (bgr_to_rgb and rgb_to_bgr), ( + '`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time') + self.channel_conversion = rgb_to_bgr or bgr_to_rgb + + if mean is not None: + assert std is not None, 'To enable the normalization in ' \ + 'preprocessing, please specify both ' \ + '`mean` and `std`.' + # Enable the normalization in preprocessing. + self._enable_normalize = True + self.register_buffer('mean', + torch.tensor(mean).view(-1, 1, 1), False) + self.register_buffer('std', + torch.tensor(std).view(-1, 1, 1), False) + else: + self._enable_normalize = False + + # TODO: support batch augmentations. + self.batch_augments = batch_augments + + # Support different padding methods in testing + self.test_cfg = test_cfg + + def forward(self, data: dict, training: bool = False) -> Dict[str, Any]: + """Perform normalization、padding and bgr2rgb conversion based on + ``BaseDataPreprocessor``. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + Dict: Data in the same format as the model input. + """ + data = self.cast_data(data) # type: ignore + inputs = data['inputs'] + data_samples = data.get('data_samples', None) + # TODO: whether normalize should be after stack_batch + if self.channel_conversion and inputs[0].size(0) == 3: + inputs = [_input[[2, 1, 0], ...] for _input in inputs] + + inputs = [_input.float() for _input in inputs] + if self._enable_normalize: + inputs = [(_input - self.mean) / self.std for _input in inputs] + + if training: + assert data_samples is not None, ('During training, ', + '`data_samples` must be define.') + inputs, data_samples = stack_batch( + inputs=inputs, + data_samples=data_samples, + size=self.size, + size_divisor=self.size_divisor, + pad_val=self.pad_val, + seg_pad_val=self.seg_pad_val) + + if self.batch_augments is not None: + inputs, data_samples = self.batch_augments( + inputs, data_samples) + else: + assert len(inputs) == 1, ( + 'Batch inference is not support currently, ' + 'as the image size might be different in a batch') + # pad images when testing + if self.test_cfg: + inputs, padded_samples = stack_batch( + inputs=inputs, + size=self.test_cfg.get('size', None), + size_divisor=self.test_cfg.get('size_divisor', None), + pad_val=self.pad_val, + seg_pad_val=self.seg_pad_val) + for data_sample, pad_info in zip(data_samples, padded_samples): + data_sample.set_metainfo({**pad_info}) + else: + inputs = torch.stack(inputs, dim=0) + + return dict(inputs=inputs, data_samples=data_samples) diff --git a/mmseg/models/decode_heads/__init__.py b/mmseg/models/decode_heads/__init__.py index fda4309436..b18152d7d9 100644 --- a/mmseg/models/decode_heads/__init__.py +++ b/mmseg/models/decode_heads/__init__.py @@ -1,19 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. from .ann_head import ANNHead +from .apc_head import APCHead from .aspp_head import ASPPHead from .cc_head import CCHead from .da_head import DAHead +from .dm_head import DMHead +from .dnl_head import DNLHead +from .dpt_head import DPTHead +from .ema_head import EMAHead from .enc_head import EncHead from .fcn_head import FCNHead +from .fpn_head import FPNHead from .gc_head import GCHead +from .isa_head import ISAHead +from .knet_head import IterativeDecodeHead, KernelUpdateHead, KernelUpdator +from .lraspp_head import LRASPPHead +from .mask2former_head import Mask2FormerHead +from .maskformer_head import MaskFormerHead from .nl_head import NLHead from .ocr_head import OCRHead +from .point_head import PointHead from .psa_head import PSAHead from .psp_head import PSPHead +from .segformer_head import SegformerHead +from .segmenter_mask_head import SegmenterMaskTransformerHead from .sep_aspp_head import DepthwiseSeparableASPPHead +from .sep_fcn_head import DepthwiseSeparableFCNHead +from .setr_mla_head import SETRMLAHead +from .setr_up_head import SETRUPHead +from .stdc_head import STDCHead from .uper_head import UPerHead __all__ = [ 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', - 'EncHead' + 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', + 'PointHead', 'APCHead', 'DMHead', 'LRASPPHead', 'SETRUPHead', + 'SETRMLAHead', 'DPTHead', 'SETRMLAHead', 'SegmenterMaskTransformerHead', + 'SegformerHead', 'ISAHead', 'STDCHead', 'IterativeDecodeHead', + 'KernelUpdateHead', 'KernelUpdator', 'MaskFormerHead', 'Mask2FormerHead' ] diff --git a/mmseg/models/decode_heads/ann_head.py b/mmseg/models/decode_heads/ann_head.py index 396c54e150..2b40ef5aa1 100644 --- a/mmseg/models/decode_heads/ann_head.py +++ b/mmseg/models/decode_heads/ann_head.py @@ -1,8 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule -from ..builder import HEADS +from mmseg.registry import MODELS from ..utils import SelfAttentionBlock as _SelfAttentionBlock from .decode_head import BaseDecodeHead @@ -16,7 +17,7 @@ class PPMConcat(nn.ModuleList): """ def __init__(self, pool_scales=(1, 3, 6, 8)): - super(PPMConcat, self).__init__( + super().__init__( [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) def forward(self, feats): @@ -57,7 +58,7 @@ def __init__(self, low_in_channels, high_in_channels, channels, query_downsample = nn.MaxPool2d(kernel_size=query_scale) else: query_downsample = None - super(SelfAttentionBlock, self).__init__( + super().__init__( key_in_channels=low_in_channels, query_in_channels=high_in_channels, channels=channels, @@ -99,7 +100,7 @@ class AFNB(nn.Module): def __init__(self, low_in_channels, high_in_channels, channels, out_channels, query_scales, key_pool_scales, conv_cfg, norm_cfg, act_cfg): - super(AFNB, self).__init__() + super().__init__() self.stages = nn.ModuleList() for query_scale in query_scales: self.stages.append( @@ -149,7 +150,7 @@ class APNB(nn.Module): def __init__(self, in_channels, channels, out_channels, query_scales, key_pool_scales, conv_cfg, norm_cfg, act_cfg): - super(APNB, self).__init__() + super().__init__() self.stages = nn.ModuleList() for query_scale in query_scales: self.stages.append( @@ -180,7 +181,7 @@ def forward(self, feats): return output -@HEADS.register_module() +@MODELS.register_module() class ANNHead(BaseDecodeHead): """Asymmetric Non-local Neural Networks for Semantic Segmentation. @@ -200,8 +201,7 @@ def __init__(self, query_scales=(1, ), key_pool_scales=(1, 3, 6, 8), **kwargs): - super(ANNHead, self).__init__( - input_transform='multiple_select', **kwargs) + super().__init__(input_transform='multiple_select', **kwargs) assert len(self.in_channels) == 2 low_in_channels, high_in_channels = self.in_channels self.project_channels = project_channels diff --git a/mmseg/models/decode_heads/apc_head.py b/mmseg/models/decode_heads/apc_head.py new file mode 100644 index 0000000000..728f39659c --- /dev/null +++ b/mmseg/models/decode_heads/apc_head.py @@ -0,0 +1,159 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.registry import MODELS +from ..utils import resize +from .decode_head import BaseDecodeHead + + +class ACM(nn.Module): + """Adaptive Context Module used in APCNet. + + Args: + pool_scale (int): Pooling scale used in Adaptive Context + Module to extract region features. + fusion (bool): Add one conv to fuse residual feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super().__init__() + self.pool_scale = pool_scale + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.pooled_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.global_info = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0) + + self.residual_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale) + # [batch_size, channels, h, w] + x = self.input_redu_conv(x) + # [batch_size, channels, pool_scale, pool_scale] + pooled_x = self.pooled_redu_conv(pooled_x) + batch_size = x.size(0) + # [batch_size, pool_scale * pool_scale, channels] + pooled_x = pooled_x.view(batch_size, self.channels, + -1).permute(0, 2, 1).contiguous() + # [batch_size, h * w, pool_scale * pool_scale] + affinity_matrix = self.gla(x + resize( + self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]) + ).permute(0, 2, 3, 1).reshape( + batch_size, -1, self.pool_scale**2) + affinity_matrix = F.sigmoid(affinity_matrix) + # [batch_size, h * w, channels] + z_out = torch.matmul(affinity_matrix, pooled_x) + # [batch_size, channels, h * w] + z_out = z_out.permute(0, 2, 1).contiguous() + # [batch_size, channels, h, w] + z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3)) + z_out = self.residual_conv(z_out) + z_out = F.relu(z_out + x) + if self.fusion: + z_out = self.fusion_conv(z_out) + + return z_out + + +@MODELS.register_module() +class APCHead(BaseDecodeHead): + """Adaptive Pyramid Context Network for Semantic Segmentation. + + This head is the implementation of + `APCNet `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Adaptive Context + Module. Default: (1, 2, 3, 6). + fusion (bool): Add one conv to fuse residual feature. + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): + super().__init__(**kwargs) + assert isinstance(pool_scales, (list, tuple)) + self.pool_scales = pool_scales + self.fusion = fusion + acm_modules = [] + for pool_scale in self.pool_scales: + acm_modules.append( + ACM(pool_scale, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.acm_modules = nn.ModuleList(acm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + acm_outs = [x] + for acm_module in self.acm_modules: + acm_outs.append(acm_module(x)) + acm_outs = torch.cat(acm_outs, dim=1) + output = self.bottleneck(acm_outs) + output = self.cls_seg(output) + return output diff --git a/mmseg/models/decode_heads/aspp_head.py b/mmseg/models/decode_heads/aspp_head.py index 6332ab120c..6d7185d7de 100644 --- a/mmseg/models/decode_heads/aspp_head.py +++ b/mmseg/models/decode_heads/aspp_head.py @@ -1,9 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule -from mmseg.ops import resize -from ..builder import HEADS +from mmseg.registry import MODELS +from ..utils import resize from .decode_head import BaseDecodeHead @@ -21,7 +22,7 @@ class ASPPModule(nn.ModuleList): def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg, act_cfg): - super(ASPPModule, self).__init__() + super().__init__() self.dilations = dilations self.in_channels = in_channels self.channels = channels @@ -49,7 +50,7 @@ def forward(self, x): return aspp_outs -@HEADS.register_module() +@MODELS.register_module() class ASPPHead(BaseDecodeHead): """Rethinking Atrous Convolution for Semantic Image Segmentation. @@ -62,7 +63,7 @@ class ASPPHead(BaseDecodeHead): """ def __init__(self, dilations=(1, 6, 12, 18), **kwargs): - super(ASPPHead, self).__init__(**kwargs) + super().__init__(**kwargs) assert isinstance(dilations, (list, tuple)) self.dilations = dilations self.image_pool = nn.Sequential( @@ -90,8 +91,17 @@ def __init__(self, dilations=(1, 6, 12, 18), **kwargs): norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) - def forward(self, inputs): - """Forward function.""" + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ x = self._transform_inputs(inputs) aspp_outs = [ resize( @@ -102,6 +112,11 @@ def forward(self, inputs): ] aspp_outs.extend(self.aspp_modules(x)) aspp_outs = torch.cat(aspp_outs, dim=1) - output = self.bottleneck(aspp_outs) + feats = self.bottleneck(aspp_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) output = self.cls_seg(output) return output diff --git a/mmseg/models/decode_heads/cascade_decode_head.py b/mmseg/models/decode_heads/cascade_decode_head.py index d02122ca0e..fe2bcb9302 100644 --- a/mmseg/models/decode_heads/cascade_decode_head.py +++ b/mmseg/models/decode_heads/cascade_decode_head.py @@ -1,5 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod +from typing import List +from torch import Tensor + +from mmseg.utils import ConfigType from .decode_head import BaseDecodeHead @@ -8,50 +13,50 @@ class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): :class:`CascadeEncoderDecoder.""" def __init__(self, *args, **kwargs): - super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) @abstractmethod def forward(self, inputs, prev_output): """Placeholder of forward function.""" pass - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): + def loss(self, inputs: List[Tensor], prev_output: Tensor, + batch_data_samples: List[dict], train_cfg: ConfigType) -> Tensor: """Forward function for training. + Args: - inputs (list[Tensor]): List of multi-level img features. + inputs (List[Tensor]): List of multi-level img features. prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. + batch_data_samples (List[:obj:`SegDataSample`]): The seg + data samples. It usually includes information such + as `metainfo` and `gt_sem_seg`. train_cfg (dict): The training config. Returns: dict[str, Tensor]: a dictionary of loss components """ seg_logits = self.forward(inputs, prev_output) - losses = self.losses(seg_logits, gt_semantic_seg) + losses = self.loss_by_feat(seg_logits, batch_data_samples) return losses - def forward_test(self, inputs, prev_output, img_metas, test_cfg): + def predict(self, inputs: List[Tensor], prev_output: Tensor, + batch_img_metas: List[dict], tese_cfg: ConfigType): """Forward function for testing. Args: - inputs (list[Tensor]): List of multi-level img features. + inputs (List[Tensor]): List of multi-level img features. prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + batch_img_metas (dict): List Image info where each dict may also + contain: 'img_shape', 'scale_factor', 'flip', 'img_path', + 'ori_shape', and 'pad_shape'. For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. + `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. test_cfg (dict): The testing config. Returns: Tensor: Output segmentation map. """ - return self.forward(inputs, prev_output) + seg_logits = self.forward(inputs, prev_output) + + return self.predict_by_feat(seg_logits, batch_img_metas) diff --git a/mmseg/models/decode_heads/cc_head.py b/mmseg/models/decode_heads/cc_head.py index 95c2706a5d..e9075a2648 100644 --- a/mmseg/models/decode_heads/cc_head.py +++ b/mmseg/models/decode_heads/cc_head.py @@ -1,6 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch -from ..builder import HEADS +from mmseg.registry import MODELS from .fcn_head import FCNHead try: @@ -9,7 +10,7 @@ CrissCrossAttention = None -@HEADS.register_module() +@MODELS.register_module() class CCHead(FCNHead): """CCNet: Criss-Cross Attention for Semantic Segmentation. @@ -25,7 +26,7 @@ def __init__(self, recurrence=2, **kwargs): if CrissCrossAttention is None: raise RuntimeError('Please install mmcv-full for ' 'CrissCrossAttention ops') - super(CCHead, self).__init__(num_convs=2, **kwargs) + super().__init__(num_convs=2, **kwargs) self.recurrence = recurrence self.cca = CrissCrossAttention(self.channels) diff --git a/mmseg/models/decode_heads/da_head.py b/mmseg/models/decode_heads/da_head.py index 8ee0e08c3d..d87214365d 100644 --- a/mmseg/models/decode_heads/da_head.py +++ b/mmseg/models/decode_heads/da_head.py @@ -1,10 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + import torch import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale -from torch import nn +from torch import Tensor, nn -from mmseg.core import add_prefix -from ..builder import HEADS +from mmseg.registry import MODELS +from mmseg.utils import SampleList, add_prefix from ..utils import SelfAttentionBlock as _SelfAttentionBlock from .decode_head import BaseDecodeHead @@ -18,7 +21,7 @@ class PAM(_SelfAttentionBlock): """ def __init__(self, in_channels, channels): - super(PAM, self).__init__( + super().__init__( key_in_channels=in_channels, query_in_channels=in_channels, channels=channels, @@ -40,7 +43,7 @@ def __init__(self, in_channels, channels): def forward(self, x): """Forward function.""" - out = super(PAM, self).forward(x, x) + out = super().forward(x, x) out = self.gamma(out) + x return out @@ -50,7 +53,7 @@ class CAM(nn.Module): """Channel Attention Module (CAM)""" def __init__(self): - super(CAM, self).__init__() + super().__init__() self.gamma = Scale(0) def forward(self, x): @@ -71,7 +74,7 @@ def forward(self, x): return out -@HEADS.register_module() +@MODELS.register_module() class DAHead(BaseDecodeHead): """Dual Attention Network for Scene Segmentation. @@ -83,7 +86,7 @@ class DAHead(BaseDecodeHead): """ def __init__(self, pam_channels, **kwargs): - super(DAHead, self).__init__(**kwargs) + super().__init__(**kwargs) self.pam_channels = pam_channels self.pam_in_conv = ConvModule( self.in_channels, @@ -157,22 +160,25 @@ def forward(self, inputs): return pam_cam_out, pam_out, cam_out - def forward_test(self, inputs, img_metas, test_cfg): + def predict(self, inputs, batch_img_metas: List[dict], test_cfg, + **kwargs) -> List[Tensor]: """Forward function for testing, only ``pam_cam`` is used.""" - return self.forward(inputs)[0] + seg_logits = self.forward(inputs)[0] + return self.predict_by_feat(seg_logits, batch_img_metas, **kwargs) - def losses(self, seg_logit, seg_label): + def loss_by_feat(self, seg_logit: Tuple[Tensor], + batch_data_samples: SampleList, **kwargs) -> dict: """Compute ``pam_cam``, ``pam``, ``cam`` loss.""" pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit loss = dict() loss.update( add_prefix( - super(DAHead, self).losses(pam_cam_seg_logit, seg_label), + super().loss_by_feat(pam_cam_seg_logit, batch_data_samples), 'pam_cam')) loss.update( - add_prefix( - super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam')) + add_prefix(super().loss_by_feat(pam_seg_logit, batch_data_samples), + 'pam')) loss.update( - add_prefix( - super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam')) + add_prefix(super().loss_by_feat(cam_seg_logit, batch_data_samples), + 'cam')) return loss diff --git a/mmseg/models/decode_heads/decode_head.py b/mmseg/models/decode_heads/decode_head.py index d4c8748722..0803715f82 100644 --- a/mmseg/models/decode_heads/decode_head.py +++ b/mmseg/models/decode_heads/decode_head.py @@ -1,23 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings from abc import ABCMeta, abstractmethod +from typing import List, Tuple import torch import torch.nn as nn -from mmcv.cnn import normal_init +from mmengine.model import BaseModule +from torch import Tensor -from mmseg.core import build_pixel_sampler -from mmseg.ops import resize +from mmseg.structures import build_pixel_sampler +from mmseg.utils import ConfigType, SampleList from ..builder import build_loss from ..losses import accuracy +from ..utils import resize -class BaseDecodeHead(nn.Module, metaclass=ABCMeta): +class BaseDecodeHead(BaseModule, metaclass=ABCMeta): """Base class for BaseDecodeHead. + 1. The ``init_weights`` method is used to initialize decode_head's + model parameters. After segmentor initialization, ``init_weights`` + is triggered when ``segmentor.init_weights()`` is called externally. + + 2. The ``loss`` method is used to calculate the loss of decode_head, + which includes two steps: (1) the decode_head model performs forward + propagation to obtain the feature maps (2) The ``loss_by_feat`` method + is called based on the feature maps to calculate the loss. + + .. code:: text + + loss(): forward() -> loss_by_feat() + + 3. The ``predict`` method is used to predict segmentation results, + which includes two steps: (1) the decode_head model performs forward + propagation to obtain the feature maps (2) The ``predict_by_feat`` method + is called based on the feature maps to predict segmentation results + including post-processing. + + .. code:: text + + predict(): forward() -> predict_by_feat() + Args: in_channels (int|Sequence[int]): Input channels. channels (int): Channels after modules, before conv_seg. num_classes (int): Number of classes. - drop_out_ratio (float): Ratio of dropout layer. Default: 0.1. + out_channels (int): Output channels of conv_seg. + threshold (float): Threshold for binary segmentation in the case of + `num_classes==1`. Default: None. + dropout_ratio (float): Ratio of dropout layer. Default: 0.1. conv_cfg (dict|None): Config of conv layers. Default: None. norm_cfg (dict|None): Config of norm layers. Default: None. act_cfg (dict): Config of activation layers. @@ -32,13 +63,22 @@ class BaseDecodeHead(nn.Module, metaclass=ABCMeta): a list and passed into decode head. None: Only one select feature map is allowed. Default: None. - loss_decode (dict): Config of decode loss. + loss_decode (dict | Sequence[dict]): Config of decode loss. + The `loss_name` is property of corresponding loss function which + could be shown in training log. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + e.g. dict(type='CrossEntropyLoss'), + [dict(type='CrossEntropyLoss', loss_name='loss_ce'), + dict(type='DiceLoss', loss_name='loss_dice')] Default: dict(type='CrossEntropyLoss'). - ignore_index (int): The label index to be ignored. Default: 255 + ignore_index (int | None): The label index to be ignored. When using + masked BCE loss, ignore_index should be set to None. Default: 255. sampler (dict|None): The config of segmentation map sampler. Default: None. align_corners (bool): align_corners argument of F.interpolate. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, @@ -46,7 +86,9 @@ def __init__(self, channels, *, num_classes, - drop_out_ratio=0.1, + out_channels=None, + threshold=None, + dropout_ratio=0.1, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), @@ -58,27 +100,63 @@ def __init__(self, loss_weight=1.0), ignore_index=255, sampler=None, - align_corners=False): - super(BaseDecodeHead, self).__init__() + align_corners=False, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='conv_seg'))): + super().__init__(init_cfg) self._init_inputs(in_channels, in_index, input_transform) self.channels = channels - self.num_classes = num_classes - self.drop_out_ratio = drop_out_ratio + self.dropout_ratio = dropout_ratio self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.in_index = in_index - self.loss_decode = build_loss(loss_decode) + self.ignore_index = ignore_index self.align_corners = align_corners + + if out_channels is None: + if num_classes == 2: + warnings.warn('For binary segmentation, we suggest using' + '`out_channels = 1` to define the output' + 'channels of segmentor, and use `threshold`' + 'to convert `seg_logits` into a prediction' + 'applying a threshold') + out_channels = num_classes + + if out_channels != num_classes and out_channels != 1: + raise ValueError( + 'out_channels should be equal to num_classes,' + 'except binary segmentation set out_channels == 1 and' + f'num_classes == 2, but got out_channels={out_channels}' + f'and num_classes={num_classes}') + + if out_channels == 1 and threshold is None: + threshold = 0.3 + warnings.warn('threshold is not defined for binary, and defaults' + 'to 0.3') + self.num_classes = num_classes + self.out_channels = out_channels + self.threshold = threshold + + if isinstance(loss_decode, dict): + self.loss_decode = build_loss(loss_decode) + elif isinstance(loss_decode, (list, tuple)): + self.loss_decode = nn.ModuleList() + for loss in loss_decode: + self.loss_decode.append(build_loss(loss)) + else: + raise TypeError(f'loss_decode must be a dict or sequence of dict,\ + but got {type(loss_decode)}') + if sampler is not None: - self.sampler = build_pixel_sampler(sampler) + self.sampler = build_pixel_sampler(sampler, context=self) else: self.sampler = None - self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) - if drop_out_ratio > 0: - self.dropout = nn.Dropout2d(drop_out_ratio) + self.conv_seg = nn.Conv2d(channels, self.out_channels, kernel_size=1) + if dropout_ratio > 0: + self.dropout = nn.Dropout2d(dropout_ratio) else: self.dropout = None @@ -127,10 +205,6 @@ def _init_inputs(self, in_channels, in_index, input_transform): assert isinstance(in_index, int) self.in_channels = in_channels - def init_weights(self): - """Initialize weights of classification layer.""" - normal_init(self.conv_seg, mean=0, std=0.01) - def _transform_inputs(self, inputs): """Transform inputs for decoder. @@ -163,67 +237,122 @@ def forward(self, inputs): """Placeholder of forward function.""" pass - def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + def loss(self, inputs: Tuple[Tensor], batch_data_samples: SampleList, + train_cfg: ConfigType) -> dict: """Forward function for training. + Args: - inputs (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. + inputs (Tuple[Tensor]): List of multi-level img features. + batch_data_samples (list[:obj:`SegDataSample`]): The seg + data samples. It usually includes information such + as `img_metas` or `gt_semantic_seg`. train_cfg (dict): The training config. Returns: dict[str, Tensor]: a dictionary of loss components """ seg_logits = self.forward(inputs) - losses = self.losses(seg_logits, gt_semantic_seg) + losses = self.loss_by_feat(seg_logits, batch_data_samples) return losses - def forward_test(self, inputs, img_metas, test_cfg): - """Forward function for testing. + def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict], + test_cfg: ConfigType) -> List[Tensor]: + """Forward function for prediction. Args: - inputs (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + inputs (Tuple[Tensor]): List of multi-level img features. + batch_img_metas (dict): List Image info where each dict may also + contain: 'img_shape', 'scale_factor', 'flip', 'img_path', + 'ori_shape', and 'pad_shape'. For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. + `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. test_cfg (dict): The testing config. Returns: - Tensor: Output segmentation map. + List[Tensor]: Outputs segmentation logits map. """ - return self.forward(inputs) + seg_logits = self.forward(inputs) - def cls_seg(self, feat): - """Classify each pixel.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.conv_seg(feat) - return output + return self.predict_by_feat(seg_logits, batch_img_metas) + + def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor: + gt_semantic_segs = [ + data_sample.gt_sem_seg.data for data_sample in batch_data_samples + ] + return torch.stack(gt_semantic_segs, dim=0) + + def loss_by_feat(self, seg_logits: Tensor, + batch_data_samples: SampleList) -> dict: + """Compute segmentation loss. + + Args: + seg_logits (Tensor): The output from decode head forward function. + batch_data_samples (List[:obj:`SegDataSample`]): The seg + data samples. It usually includes information such + as `metainfo` and `gt_sem_seg`. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ - def losses(self, seg_logit, seg_label): - """Compute segmentation loss.""" + seg_label = self._stack_batch_gt(batch_data_samples) loss = dict() - seg_logit = resize( - input=seg_logit, + seg_logits = resize( + input=seg_logits, size=seg_label.shape[2:], mode='bilinear', align_corners=self.align_corners) if self.sampler is not None: - seg_weight = self.sampler.sample(seg_logit, seg_label) + seg_weight = self.sampler.sample(seg_logits, seg_label) else: seg_weight = None seg_label = seg_label.squeeze(1) - loss['loss_seg'] = self.loss_decode( - seg_logit, - seg_label, - weight=seg_weight, - ignore_index=self.ignore_index) - loss['acc_seg'] = accuracy(seg_logit, seg_label) + + if not isinstance(self.loss_decode, nn.ModuleList): + losses_decode = [self.loss_decode] + else: + losses_decode = self.loss_decode + for loss_decode in losses_decode: + if loss_decode.loss_name not in loss: + loss[loss_decode.loss_name] = loss_decode( + seg_logits, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + else: + loss[loss_decode.loss_name] += loss_decode( + seg_logits, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + + loss['acc_seg'] = accuracy( + seg_logits, seg_label, ignore_index=self.ignore_index) return loss + + def predict_by_feat(self, seg_logits: Tensor, + batch_img_metas: List[dict]) -> Tensor: + """Transform a batch of output seg_logits to the input shape. + + Args: + seg_logits (Tensor): The output from decode head forward function. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + Tensor: Outputs segmentation logits map. + """ + + seg_logits = resize( + input=seg_logits, + size=batch_img_metas[0]['img_shape'], + mode='bilinear', + align_corners=self.align_corners) + return seg_logits diff --git a/mmseg/models/decode_heads/dm_head.py b/mmseg/models/decode_heads/dm_head.py new file mode 100644 index 0000000000..7694abd8ac --- /dev/null +++ b/mmseg/models/decode_heads/dm_head.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer + +from mmseg.registry import MODELS +from .decode_head import BaseDecodeHead + + +class DCM(nn.Module): + """Dynamic Convolutional Module used in DMNet. + + Args: + filter_size (int): The filter size of generated convolution kernel + used in Dynamic Convolutional Module. + fusion (bool): Add one conv to fuse DCM output feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super().__init__() + self.filter_size = filter_size + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1, + 0) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.norm_cfg is not None: + self.norm = build_norm_layer(self.norm_cfg, self.channels)[1] + else: + self.norm = None + self.activate = build_activation_layer(self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + generated_filter = self.filter_gen_conv( + F.adaptive_avg_pool2d(x, self.filter_size)) + x = self.input_redu_conv(x) + b, c, h, w = x.shape + # [1, b * c, h, w], c = self.channels + x = x.view(1, b * c, h, w) + # [b * c, 1, filter_size, filter_size] + generated_filter = generated_filter.view(b * c, 1, self.filter_size, + self.filter_size) + pad = (self.filter_size - 1) // 2 + if (self.filter_size - 1) % 2 == 0: + p2d = (pad, pad, pad, pad) + else: + p2d = (pad + 1, pad, pad + 1, pad) + x = F.pad(input=x, pad=p2d, mode='constant', value=0) + # [1, b * c, h, w] + output = F.conv2d(input=x, weight=generated_filter, groups=b * c) + # [b, c, h, w] + output = output.view(b, c, h, w) + if self.norm is not None: + output = self.norm(output) + output = self.activate(output) + + if self.fusion: + output = self.fusion_conv(output) + + return output + + +@MODELS.register_module() +class DMHead(BaseDecodeHead): + """Dynamic Multi-scale Filters for Semantic Segmentation. + + This head is the implementation of + `DMNet `_. + + Args: + filter_sizes (tuple[int]): The size of generated convolutional filters + used in Dynamic Convolutional Module. Default: (1, 3, 5, 7). + fusion (bool): Add one conv to fuse DCM output feature. + """ + + def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs): + super().__init__(**kwargs) + assert isinstance(filter_sizes, (list, tuple)) + self.filter_sizes = filter_sizes + self.fusion = fusion + dcm_modules = [] + for filter_size in self.filter_sizes: + dcm_modules.append( + DCM(filter_size, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.dcm_modules = nn.ModuleList(dcm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(filter_sizes) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + dcm_outs = [x] + for dcm_module in self.dcm_modules: + dcm_outs.append(dcm_module(x)) + dcm_outs = torch.cat(dcm_outs, dim=1) + output = self.bottleneck(dcm_outs) + output = self.cls_seg(output) + return output diff --git a/mmseg/models/decode_heads/dnl_head.py b/mmseg/models/decode_heads/dnl_head.py new file mode 100644 index 0000000000..248c118141 --- /dev/null +++ b/mmseg/models/decode_heads/dnl_head.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import NonLocal2d +from torch import nn + +from mmseg.registry import MODELS +from .fcn_head import FCNHead + + +class DisentangledNonLocal2d(NonLocal2d): + """Disentangled Non-Local Blocks. + + Args: + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, *arg, temperature, **kwargs): + super().__init__(*arg, **kwargs) + self.temperature = temperature + self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1) + + def embedded_gaussian(self, theta_x, phi_x): + """Embedded gaussian with temperature.""" + + # NonLocal2d pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + if self.use_scale: + # theta_x.shape[-1] is `self.inter_channels` + pairwise_weight /= torch.tensor( + theta_x.shape[-1], + dtype=torch.float, + device=pairwise_weight.device)**torch.tensor( + 0.5, device=pairwise_weight.device) + pairwise_weight /= torch.tensor( + self.temperature, device=pairwise_weight.device) + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def forward(self, x): + # x: [N, C, H, W] + n = x.size(0) + + # g_x: [N, HxW, C] + g_x = self.g(x).view(n, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + # theta_x: [N, HxW, C], phi_x: [N, C, HxW] + if self.mode == 'gaussian': + theta_x = x.view(n, self.in_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + if self.sub_sample: + phi_x = self.phi(x).view(n, self.in_channels, -1) + else: + phi_x = x.view(n, self.in_channels, -1) + elif self.mode == 'concatenation': + theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) + phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) + else: + theta_x = self.theta(x).view(n, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(n, self.inter_channels, -1) + + # subtract mean + theta_x -= theta_x.mean(dim=-2, keepdim=True) + phi_x -= phi_x.mean(dim=-1, keepdim=True) + + pairwise_func = getattr(self, self.mode) + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = pairwise_func(theta_x, phi_x) + + # y: [N, HxW, C] + y = torch.matmul(pairwise_weight, g_x) + # y: [N, C, H, W] + y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, + *x.size()[2:]) + + # unary_mask: [N, 1, HxW] + unary_mask = self.conv_mask(x) + unary_mask = unary_mask.view(n, 1, -1) + unary_mask = unary_mask.softmax(dim=-1) + # unary_x: [N, 1, C] + unary_x = torch.matmul(unary_mask, g_x) + # unary_x: [N, C, 1, 1] + unary_x = unary_x.permute(0, 2, 1).contiguous().reshape( + n, self.inter_channels, 1, 1) + + output = x + self.conv_out(y + unary_x) + + return output + + +@MODELS.register_module() +class DNLHead(FCNHead): + """Disentangled Non-Local Neural Networks. + + This head is the implementation of `DNLNet + `_. + + Args: + reduction (int): Reduction factor of projection transform. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + sqrt(1/inter_channels). Default: False. + mode (str): The nonlocal mode. Options are 'embedded_gaussian', + 'dot_product'. Default: 'embedded_gaussian.'. + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + temperature=0.05, + **kwargs): + super().__init__(num_convs=2, **kwargs) + self.reduction = reduction + self.use_scale = use_scale + self.mode = mode + self.temperature = temperature + self.dnl_block = DisentangledNonLocal2d( + in_channels=self.channels, + reduction=self.reduction, + use_scale=self.use_scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + mode=self.mode, + temperature=self.temperature) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.dnl_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/mmseg/models/decode_heads/dpt_head.py b/mmseg/models/decode_heads/dpt_head.py new file mode 100644 index 0000000000..d2cfd89daa --- /dev/null +++ b/mmseg/models/decode_heads/dpt_head.py @@ -0,0 +1,294 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Linear, build_activation_layer +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize +from .decode_head import BaseDecodeHead + + +class ReassembleBlocks(BaseModule): + """ViTPostProcessBlock, process cls_token in ViT backbone output and + rearrange the feature vector to feature map. + + Args: + in_channels (int): ViT feature channels. Default: 768. + out_channels (List): output channels of each stage. + Default: [96, 192, 384, 768]. + readout_type (str): Type of readout operation. Default: 'ignore'. + patch_size (int): The patch size. Default: 16. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels=768, + out_channels=[96, 192, 384, 768], + readout_type='ignore', + patch_size=16, + init_cfg=None): + super().__init__(init_cfg) + + assert readout_type in ['ignore', 'add', 'project'] + self.readout_type = readout_type + self.patch_size = patch_size + + self.projects = nn.ModuleList([ + ConvModule( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=1, + act_cfg=None, + ) for out_channel in out_channels + ]) + + self.resize_layers = nn.ModuleList([ + nn.ConvTranspose2d( + in_channels=out_channels[0], + out_channels=out_channels[0], + kernel_size=4, + stride=4, + padding=0), + nn.ConvTranspose2d( + in_channels=out_channels[1], + out_channels=out_channels[1], + kernel_size=2, + stride=2, + padding=0), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], + out_channels=out_channels[3], + kernel_size=3, + stride=2, + padding=1) + ]) + if self.readout_type == 'project': + self.readout_projects = nn.ModuleList() + for _ in range(len(self.projects)): + self.readout_projects.append( + nn.Sequential( + Linear(2 * in_channels, in_channels), + build_activation_layer(dict(type='GELU')))) + + def forward(self, inputs): + assert isinstance(inputs, list) + out = [] + for i, x in enumerate(inputs): + assert len(x) == 2 + x, cls_token = x[0], x[1] + feature_shape = x.shape + if self.readout_type == 'project': + x = x.flatten(2).permute((0, 2, 1)) + readout = cls_token.unsqueeze(1).expand_as(x) + x = self.readout_projects[i](torch.cat((x, readout), -1)) + x = x.permute(0, 2, 1).reshape(feature_shape) + elif self.readout_type == 'add': + x = x.flatten(2) + cls_token.unsqueeze(-1) + x = x.reshape(feature_shape) + else: + pass + x = self.projects[i](x) + x = self.resize_layers[i](x) + out.append(x) + return out + + +class PreActResidualConvUnit(BaseModule): + """ResidualConvUnit, pre-activate residual unit. + + Args: + in_channels (int): number of channels in the input feature map. + act_cfg (dict): dictionary to construct and config activation layer. + norm_cfg (dict): dictionary to construct and config norm layer. + stride (int): stride of the first block. Default: 1 + dilation (int): dilation rate for convs layers. Default: 1. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels, + act_cfg, + norm_cfg, + stride=1, + dilation=1, + init_cfg=None): + super().__init__(init_cfg) + + self.conv1 = ConvModule( + in_channels, + in_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=False, + order=('act', 'conv', 'norm')) + + self.conv2 = ConvModule( + in_channels, + in_channels, + 3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=False, + order=('act', 'conv', 'norm')) + + def forward(self, inputs): + inputs_ = inputs.clone() + x = self.conv1(inputs) + x = self.conv2(x) + return x + inputs_ + + +class FeatureFusionBlock(BaseModule): + """FeatureFusionBlock, merge feature map from different stages. + + Args: + in_channels (int): Input channels. + act_cfg (dict): The activation config for ResidualConvUnit. + norm_cfg (dict): Config dict for normalization layer. + expand (bool): Whether expand the channels in post process block. + Default: False. + align_corners (bool): align_corner setting for bilinear upsample. + Default: True. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels, + act_cfg, + norm_cfg, + expand=False, + align_corners=True, + init_cfg=None): + super().__init__(init_cfg) + + self.in_channels = in_channels + self.expand = expand + self.align_corners = align_corners + + self.out_channels = in_channels + if self.expand: + self.out_channels = in_channels // 2 + + self.project = ConvModule( + self.in_channels, + self.out_channels, + kernel_size=1, + act_cfg=None, + bias=True) + + self.res_conv_unit1 = PreActResidualConvUnit( + in_channels=self.in_channels, act_cfg=act_cfg, norm_cfg=norm_cfg) + self.res_conv_unit2 = PreActResidualConvUnit( + in_channels=self.in_channels, act_cfg=act_cfg, norm_cfg=norm_cfg) + + def forward(self, *inputs): + x = inputs[0] + if len(inputs) == 2: + if x.shape != inputs[1].shape: + res = resize( + inputs[1], + size=(x.shape[2], x.shape[3]), + mode='bilinear', + align_corners=False) + else: + res = inputs[1] + x = x + self.res_conv_unit1(res) + x = self.res_conv_unit2(x) + x = resize( + x, + scale_factor=2, + mode='bilinear', + align_corners=self.align_corners) + x = self.project(x) + return x + + +@MODELS.register_module() +class DPTHead(BaseDecodeHead): + """Vision Transformers for Dense Prediction. + + This head is implemented of `DPT `_. + + Args: + embed_dims (int): The embed dimension of the ViT backbone. + Default: 768. + post_process_channels (List): Out channels of post process conv + layers. Default: [96, 192, 384, 768]. + readout_type (str): Type of readout operation. Default: 'ignore'. + patch_size (int): The patch size. Default: 16. + expand_channels (bool): Whether expand the channels in post process + block. Default: False. + act_cfg (dict): The activation config for residual conv unit. + Default dict(type='ReLU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + """ + + def __init__(self, + embed_dims=768, + post_process_channels=[96, 192, 384, 768], + readout_type='ignore', + patch_size=16, + expand_channels=False, + act_cfg=dict(type='ReLU'), + norm_cfg=dict(type='BN'), + **kwargs): + super().__init__(**kwargs) + + self.in_channels = self.in_channels + self.expand_channels = expand_channels + self.reassemble_blocks = ReassembleBlocks(embed_dims, + post_process_channels, + readout_type, patch_size) + + self.post_process_channels = [ + channel * math.pow(2, i) if expand_channels else channel + for i, channel in enumerate(post_process_channels) + ] + self.convs = nn.ModuleList() + for channel in self.post_process_channels: + self.convs.append( + ConvModule( + channel, + self.channels, + kernel_size=3, + padding=1, + act_cfg=None, + bias=False)) + self.fusion_blocks = nn.ModuleList() + for _ in range(len(self.convs)): + self.fusion_blocks.append( + FeatureFusionBlock(self.channels, act_cfg, norm_cfg)) + self.fusion_blocks[0].res_conv_unit1 = None + self.project = ConvModule( + self.channels, + self.channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg) + self.num_fusion_blocks = len(self.fusion_blocks) + self.num_reassemble_blocks = len(self.reassemble_blocks.resize_layers) + self.num_post_process_channels = len(self.post_process_channels) + assert self.num_fusion_blocks == self.num_reassemble_blocks + assert self.num_reassemble_blocks == self.num_post_process_channels + + def forward(self, inputs): + assert len(inputs) == self.num_reassemble_blocks + x = self._transform_inputs(inputs) + x = self.reassemble_blocks(x) + x = [self.convs[i](feature) for i, feature in enumerate(x)] + out = self.fusion_blocks[0](x[-1]) + for i in range(1, len(self.fusion_blocks)): + out = self.fusion_blocks[i](out, x[-(i + 1)]) + out = self.project(out) + out = self.cls_seg(out) + return out diff --git a/mmseg/models/decode_heads/ema_head.py b/mmseg/models/decode_heads/ema_head.py new file mode 100644 index 0000000000..ab8dbb0c29 --- /dev/null +++ b/mmseg/models/decode_heads/ema_head.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.registry import MODELS +from .decode_head import BaseDecodeHead + + +def reduce_mean(tensor): + """Reduce mean when distributed training.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor + + +class EMAModule(nn.Module): + """Expectation Maximization Attention Module used in EMANet. + + Args: + channels (int): Channels of the whole module. + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + """ + + def __init__(self, channels, num_bases, num_stages, momentum): + super().__init__() + assert num_stages >= 1, 'num_stages must be at least 1!' + self.num_bases = num_bases + self.num_stages = num_stages + self.momentum = momentum + + bases = torch.zeros(1, channels, self.num_bases) + bases.normal_(0, math.sqrt(2. / self.num_bases)) + # [1, channels, num_bases] + bases = F.normalize(bases, dim=1, p=2) + self.register_buffer('bases', bases) + + def forward(self, feats): + """Forward function.""" + batch_size, channels, height, width = feats.size() + # [batch_size, channels, height*width] + feats = feats.view(batch_size, channels, height * width) + # [batch_size, channels, num_bases] + bases = self.bases.repeat(batch_size, 1, 1) + + with torch.no_grad(): + for i in range(self.num_stages): + # [batch_size, height*width, num_bases] + attention = torch.einsum('bcn,bck->bnk', feats, bases) + attention = F.softmax(attention, dim=2) + # l1 norm + attention_normed = F.normalize(attention, dim=1, p=1) + # [batch_size, channels, num_bases] + bases = torch.einsum('bcn,bnk->bck', feats, attention_normed) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + + feats_recon = torch.einsum('bck,bnk->bcn', bases, attention) + feats_recon = feats_recon.view(batch_size, channels, height, width) + + if self.training: + bases = bases.mean(dim=0, keepdim=True) + bases = reduce_mean(bases) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + self.bases = (1 - + self.momentum) * self.bases + self.momentum * bases + + return feats_recon + + +@MODELS.register_module() +class EMAHead(BaseDecodeHead): + """Expectation Maximization Attention Networks for Semantic Segmentation. + + This head is the implementation of `EMANet + `_. + + Args: + ema_channels (int): EMA module channels + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + concat_input (bool): Whether concat the input and output of convs + before classification layer. Default: True + momentum (float): Momentum to update the base. Default: 0.1. + """ + + def __init__(self, + ema_channels, + num_bases, + num_stages, + concat_input=True, + momentum=0.1, + **kwargs): + super().__init__(**kwargs) + self.ema_channels = ema_channels + self.num_bases = num_bases + self.num_stages = num_stages + self.concat_input = concat_input + self.momentum = momentum + self.ema_module = EMAModule(self.ema_channels, self.num_bases, + self.num_stages, self.momentum) + + self.ema_in_conv = ConvModule( + self.in_channels, + self.ema_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # project (0, inf) -> (-inf, inf) + self.ema_mid_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None) + for param in self.ema_mid_conv.parameters(): + param.requires_grad = False + + self.ema_out_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + self.bottleneck = ConvModule( + self.ema_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.concat_input: + self.conv_cat = ConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + feats = self.ema_in_conv(x) + identity = feats + feats = self.ema_mid_conv(feats) + recon = self.ema_module(feats) + recon = F.relu(recon, inplace=True) + recon = self.ema_out_conv(recon) + output = F.relu(identity + recon, inplace=True) + output = self.bottleneck(output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/mmseg/models/decode_heads/enc_head.py b/mmseg/models/decode_heads/enc_head.py index 0c11994cf6..ef48fb6995 100644 --- a/mmseg/models/decode_heads/enc_head.py +++ b/mmseg/models/decode_heads/enc_head.py @@ -1,10 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, build_norm_layer +from torch import Tensor -from mmseg.ops import Encoding, resize -from ..builder import HEADS, build_loss +from mmseg.registry import MODELS +from mmseg.utils import ConfigType, SampleList +from ..builder import build_loss +from ..utils import Encoding, resize from .decode_head import BaseDecodeHead @@ -20,7 +26,7 @@ class EncModule(nn.Module): """ def __init__(self, in_channels, num_codes, conv_cfg, norm_cfg, act_cfg): - super(EncModule, self).__init__() + super().__init__() self.encoding_project = ConvModule( in_channels, in_channels, @@ -58,7 +64,7 @@ def forward(self, x): return encoding_feat, output -@HEADS.register_module() +@MODELS.register_module() class EncHead(BaseDecodeHead): """Context Encoding for Semantic Segmentation. @@ -84,8 +90,7 @@ def __init__(self, use_sigmoid=True, loss_weight=0.2), **kwargs): - super(EncHead, self).__init__( - input_transform='multiple_select', **kwargs) + super().__init__(input_transform='multiple_select', **kwargs) self.use_se_loss = use_se_loss self.add_lateral = add_lateral self.num_codes = num_codes @@ -148,12 +153,14 @@ def forward(self, inputs): else: return output - def forward_test(self, inputs, img_metas, test_cfg): + def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict], + test_cfg: ConfigType): """Forward function for testing, ignore se_loss.""" if self.use_se_loss: - return self.forward(inputs)[0] + seg_logits = self.forward(inputs)[0] else: - return self.forward(inputs) + seg_logits = self.forward(inputs) + return self.predict_by_feat(seg_logits, batch_img_metas) @staticmethod def _convert_to_onehot_labels(seg_label, num_classes): @@ -175,11 +182,14 @@ def _convert_to_onehot_labels(seg_label, num_classes): onehot_labels[i] = hist > 0 return onehot_labels - def losses(self, seg_logit, seg_label): + def loss_by_feat(self, seg_logit: Tuple[Tensor], + batch_data_samples: SampleList, **kwargs) -> dict: """Compute segmentation and semantic encoding loss.""" seg_logit, se_seg_logit = seg_logit loss = dict() - loss.update(super(EncHead, self).losses(seg_logit, seg_label)) + loss.update(super().loss_by_feat(seg_logit, batch_data_samples)) + + seg_label = self._stack_batch_gt(batch_data_samples) se_loss = self.loss_se_decode( se_seg_logit, self._convert_to_onehot_labels(seg_label, self.num_classes)) diff --git a/mmseg/models/decode_heads/fcn_head.py b/mmseg/models/decode_heads/fcn_head.py index e586a2e0d4..3418018883 100644 --- a/mmseg/models/decode_heads/fcn_head.py +++ b/mmseg/models/decode_heads/fcn_head.py @@ -1,12 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule -from ..builder import HEADS +from mmseg.registry import MODELS from .decode_head import BaseDecodeHead -@HEADS.register_module() +@MODELS.register_module() class FCNHead(BaseDecodeHead): """Fully Convolution Networks for Semantic Segmentation. @@ -17,24 +18,32 @@ class FCNHead(BaseDecodeHead): kernel_size (int): The kernel size for convs in the head. Default: 3. concat_input (bool): Whether concat the input and output of convs before classification layer. + dilation (int): The dilation rate for convs in the head. Default: 1. """ def __init__(self, num_convs=2, kernel_size=3, concat_input=True, + dilation=1, **kwargs): - assert num_convs > 0 + assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int) self.num_convs = num_convs self.concat_input = concat_input - super(FCNHead, self).__init__(**kwargs) + self.kernel_size = kernel_size + super().__init__(**kwargs) + if num_convs == 0: + assert self.in_channels == self.channels + + conv_padding = (kernel_size // 2) * dilation convs = [] convs.append( ConvModule( self.in_channels, self.channels, kernel_size=kernel_size, - padding=kernel_size // 2, + padding=conv_padding, + dilation=dilation, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) @@ -44,11 +53,15 @@ def __init__(self, self.channels, self.channels, kernel_size=kernel_size, - padding=kernel_size // 2, + padding=conv_padding, + dilation=dilation, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) - self.convs = nn.Sequential(*convs) + if num_convs == 0: + self.convs = nn.Identity() + else: + self.convs = nn.Sequential(*convs) if self.concat_input: self.conv_cat = ConvModule( self.in_channels + self.channels, @@ -59,11 +72,25 @@ def __init__(self, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) - def forward(self, inputs): - """Forward function.""" + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ x = self._transform_inputs(inputs) - output = self.convs(x) + feats = self.convs(x) if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) + feats = self.conv_cat(torch.cat([x, feats], dim=1)) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) output = self.cls_seg(output) return output diff --git a/mmseg/models/decode_heads/fpn_head.py b/mmseg/models/decode_heads/fpn_head.py new file mode 100644 index 0000000000..25f481fe81 --- /dev/null +++ b/mmseg/models/decode_heads/fpn_head.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.registry import MODELS +from ..utils import Upsample, resize +from .decode_head import BaseDecodeHead + + +@MODELS.register_module() +class FPNHead(BaseDecodeHead): + """Panoptic Feature Pyramid Networks. + + This head is the implementation of `Semantic FPN + `_. + + Args: + feature_strides (tuple[int]): The strides for input feature maps. + stack_lateral. All strides suppose to be power of 2. The first + one is of largest resolution. + """ + + def __init__(self, feature_strides, **kwargs): + super().__init__(input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + self.scale_heads = nn.ModuleList() + for i in range(len(feature_strides)): + head_length = max( + 1, + int(np.log2(feature_strides[i]) - np.log2(feature_strides[0]))) + scale_head = [] + for k in range(head_length): + scale_head.append( + ConvModule( + self.in_channels[i] if k == 0 else self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + if feature_strides[i] != feature_strides[0]: + scale_head.append( + Upsample( + scale_factor=2, + mode='bilinear', + align_corners=self.align_corners)) + self.scale_heads.append(nn.Sequential(*scale_head)) + + def forward(self, inputs): + + x = self._transform_inputs(inputs) + + output = self.scale_heads[0](x[0]) + for i in range(1, len(self.feature_strides)): + # non inplace + output = output + resize( + self.scale_heads[i](x[i]), + size=output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + + output = self.cls_seg(output) + return output diff --git a/mmseg/models/decode_heads/gc_head.py b/mmseg/models/decode_heads/gc_head.py index 3368663750..14f0ef021c 100644 --- a/mmseg/models/decode_heads/gc_head.py +++ b/mmseg/models/decode_heads/gc_head.py @@ -1,11 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.cnn import ContextBlock -from ..builder import HEADS +from mmseg.registry import MODELS from .fcn_head import FCNHead -@HEADS.register_module() +@MODELS.register_module() class GCHead(FCNHead): """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. @@ -17,7 +18,7 @@ class GCHead(FCNHead): pooling_type (str): The pooling type of context aggregation. Options are 'att', 'avg'. Default: 'avg'. fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Defautl: ('channel_add',) + Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) """ def __init__(self, @@ -25,7 +26,7 @@ def __init__(self, pooling_type='att', fusion_types=('channel_add', ), **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) + super().__init__(num_convs=2, **kwargs) self.ratio = ratio self.pooling_type = pooling_type self.fusion_types = fusion_types diff --git a/mmseg/models/decode_heads/isa_head.py b/mmseg/models/decode_heads/isa_head.py new file mode 100644 index 0000000000..355f215f39 --- /dev/null +++ b/mmseg/models/decode_heads/isa_head.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.registry import MODELS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class SelfAttentionBlock(_SelfAttentionBlock): + """Self-Attention Module. + + Args: + in_channels (int): Input channels of key/query feature. + channels (int): Output channels of key/query transform. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict | None): Config of activation layers. + """ + + def __init__(self, in_channels, channels, conv_cfg, norm_cfg, act_cfg): + super().__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=2, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=True, + with_out=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.output_project = self.build_project( + in_channels, + in_channels, + num_convs=1, + use_conv_module=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + """Forward function.""" + context = super().forward(x, x) + return self.output_project(context) + + +@MODELS.register_module() +class ISAHead(BaseDecodeHead): + """Interlaced Sparse Self-Attention for Semantic Segmentation. + + This head is the implementation of `ISA + `_. + + Args: + isa_channels (int): The channels of ISA Module. + down_factor (tuple[int]): The local group size of ISA. + """ + + def __init__(self, isa_channels, down_factor=(8, 8), **kwargs): + super().__init__(**kwargs) + self.down_factor = down_factor + + self.in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.global_relation = SelfAttentionBlock( + self.channels, + isa_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.local_relation = SelfAttentionBlock( + self.channels, + isa_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.out_conv = ConvModule( + self.channels * 2, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x_ = self._transform_inputs(inputs) + x = self.in_conv(x_) + residual = x + + n, c, h, w = x.size() + loc_h, loc_w = self.down_factor # size of local group in H- and W-axes + glb_h, glb_w = math.ceil(h / loc_h), math.ceil(w / loc_w) + pad_h, pad_w = glb_h * loc_h - h, glb_w * loc_w - w + if pad_h > 0 or pad_w > 0: # pad if the size is not divisible + padding = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2) + x = F.pad(x, padding) + + # global relation + x = x.view(n, c, glb_h, loc_h, glb_w, loc_w) + # do permutation to gather global group + x = x.permute(0, 3, 5, 1, 2, 4) # (n, loc_h, loc_w, c, glb_h, glb_w) + x = x.reshape(-1, c, glb_h, glb_w) + # apply attention within each global group + x = self.global_relation(x) # (n * loc_h * loc_w, c, glb_h, glb_w) + + # local relation + x = x.view(n, loc_h, loc_w, c, glb_h, glb_w) + # do permutation to gather local group + x = x.permute(0, 4, 5, 3, 1, 2) # (n, glb_h, glb_w, c, loc_h, loc_w) + x = x.reshape(-1, c, loc_h, loc_w) + # apply attention within each local group + x = self.local_relation(x) # (n * glb_h * glb_w, c, loc_h, loc_w) + + # permute each pixel back to its original position + x = x.view(n, glb_h, glb_w, c, loc_h, loc_w) + x = x.permute(0, 3, 1, 4, 2, 5) # (n, c, glb_h, loc_h, glb_w, loc_w) + x = x.reshape(n, c, glb_h * loc_h, glb_w * loc_w) + if pad_h > 0 or pad_w > 0: # remove padding + x = x[:, :, pad_h // 2:pad_h // 2 + h, pad_w // 2:pad_w // 2 + w] + + x = self.out_conv(torch.cat([x, residual], dim=1)) + out = self.cls_seg(x) + + return out diff --git a/mmseg/models/decode_heads/knet_head.py b/mmseg/models/decode_heads/knet_head.py new file mode 100644 index 0000000000..82d3a28076 --- /dev/null +++ b/mmseg/models/decode_heads/knet_head.py @@ -0,0 +1,461 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import (FFN, MultiheadAttention, + build_transformer_layer) +from mmengine.logging import print_log +from torch import Tensor + +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.registry import MODELS +from mmseg.utils import SampleList + + +@MODELS.register_module() +class KernelUpdator(nn.Module): + """Dynamic Kernel Updator in Kernel Update Head. + + Args: + in_channels (int): The number of channels of input feature map. + Default: 256. + feat_channels (int): The number of middle-stage channels in + the kernel updator. Default: 64. + out_channels (int): The number of output channels. + gate_sigmoid (bool): Whether use sigmoid function in gate + mechanism. Default: True. + gate_norm_act (bool): Whether add normalization and activation + layer in gate mechanism. Default: False. + activate_out: Whether add activation after gate mechanism. + Default: False. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='LN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + """ + + def __init__( + self, + in_channels=256, + feat_channels=64, + out_channels=None, + gate_sigmoid=True, + gate_norm_act=False, + activate_out=False, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='ReLU', inplace=True), + ): + super().__init__() + self.in_channels = in_channels + self.feat_channels = feat_channels + self.out_channels_raw = out_channels + self.gate_sigmoid = gate_sigmoid + self.gate_norm_act = gate_norm_act + self.activate_out = activate_out + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.out_channels = out_channels if out_channels else in_channels + + self.num_params_in = self.feat_channels + self.num_params_out = self.feat_channels + self.dynamic_layer = nn.Linear( + self.in_channels, self.num_params_in + self.num_params_out) + self.input_layer = nn.Linear(self.in_channels, + self.num_params_in + self.num_params_out, + 1) + self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1) + self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1) + if self.gate_norm_act: + self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1] + + self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1] + + self.activation = build_activation_layer(act_cfg) + + self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1) + self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] + + def forward(self, update_feature, input_feature): + """Forward function of KernelUpdator. + + Args: + update_feature (torch.Tensor): Feature map assembled from + each group. It would be reshaped with last dimension + shape: `self.in_channels`. + input_feature (torch.Tensor): Intermediate feature + with shape: (N, num_classes, conv_kernel_size**2, channels). + Returns: + Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is + the number of classes, C1 and C2 are the feature map channels of + KernelUpdateHead and KernelUpdator, respectively. + """ + + update_feature = update_feature.reshape(-1, self.in_channels) + num_proposals = update_feature.size(0) + # dynamic_layer works for + # phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper + parameters = self.dynamic_layer(update_feature) + param_in = parameters[:, :self.num_params_in].view( + -1, self.feat_channels) + param_out = parameters[:, -self.num_params_out:].view( + -1, self.feat_channels) + + # input_layer works for + # phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper + input_feats = self.input_layer( + input_feature.reshape(num_proposals, -1, self.feat_channels)) + input_in = input_feats[..., :self.num_params_in] + input_out = input_feats[..., -self.num_params_out:] + + # `gate_feats` is F^G in K-Net paper + gate_feats = input_in * param_in.unsqueeze(-2) + if self.gate_norm_act: + gate_feats = self.activation(self.gate_norm(gate_feats)) + + input_gate = self.input_norm_in(self.input_gate(gate_feats)) + update_gate = self.norm_in(self.update_gate(gate_feats)) + if self.gate_sigmoid: + input_gate = input_gate.sigmoid() + update_gate = update_gate.sigmoid() + param_out = self.norm_out(param_out) + input_out = self.input_norm_out(input_out) + + if self.activate_out: + param_out = self.activation(param_out) + input_out = self.activation(input_out) + + # Gate mechanism. Eq.(5) in original paper. + # param_out has shape (batch_size, feat_channels, out_channels) + features = update_gate * param_out.unsqueeze( + -2) + input_gate * input_out + + features = self.fc_layer(features) + features = self.fc_norm(features) + features = self.activation(features) + + return features + + +@MODELS.register_module() +class KernelUpdateHead(nn.Module): + """Kernel Update Head in K-Net. + + Args: + num_classes (int): Number of classes. Default: 150. + num_ffn_fcs (int): The number of fully-connected layers in + FFNs. Default: 2. + num_heads (int): The number of parallel attention heads. + Default: 8. + num_mask_fcs (int): The number of fully connected layers for + mask prediction. Default: 3. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 2048. + in_channels (int): The number of channels of input feature map. + Default: 256. + out_channels (int): The number of output channels. + Default: 256. + dropout (float): The Probability of an element to be + zeroed in MultiheadAttention and FFN. Default 0.0. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + ffn_act_cfg (dict): Config of activation layers in FFN. + Default: dict(type='ReLU'). + conv_kernel_size (int): The kernel size of convolution in + Kernel Update Head for dynamic kernel updation. + Default: 1. + feat_transform_cfg (dict | None): Config of feature transform. + Default: None. + kernel_init (bool): Whether initiate mask kernel in mask head. + Default: False. + with_ffn (bool): Whether add FFN in kernel update head. + Default: True. + feat_gather_stride (int): Stride of convolution in feature transform. + Default: 1. + mask_transform_stride (int): Stride of mask transform. + Default: 1. + kernel_updator_cfg (dict): Config of kernel updator. + Default: dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')). + """ + + def __init__(self, + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=3, + feedforward_channels=2048, + in_channels=256, + out_channels=256, + dropout=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ffn_act_cfg=dict(type='ReLU', inplace=True), + conv_kernel_size=1, + feat_transform_cfg=None, + kernel_init=False, + with_ffn=True, + feat_gather_stride=1, + mask_transform_stride=1, + kernel_updator_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))): + super().__init__() + self.num_classes = num_classes + self.in_channels = in_channels + self.out_channels = out_channels + self.fp16_enabled = False + self.dropout = dropout + self.num_heads = num_heads + self.kernel_init = kernel_init + self.with_ffn = with_ffn + self.conv_kernel_size = conv_kernel_size + self.feat_gather_stride = feat_gather_stride + self.mask_transform_stride = mask_transform_stride + + self.attention = MultiheadAttention(in_channels * conv_kernel_size**2, + num_heads, dropout) + self.attention_norm = build_norm_layer( + dict(type='LN'), in_channels * conv_kernel_size**2)[1] + self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg) + + if feat_transform_cfg is not None: + kernel_size = feat_transform_cfg.pop('kernel_size', 1) + transform_channels = in_channels + self.feat_transform = ConvModule( + transform_channels, + in_channels, + kernel_size, + stride=feat_gather_stride, + padding=int(feat_gather_stride // 2), + **feat_transform_cfg) + else: + self.feat_transform = None + + if self.with_ffn: + self.ffn = FFN( + in_channels, + feedforward_channels, + num_ffn_fcs, + act_cfg=ffn_act_cfg, + dropout=dropout) + self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] + + self.mask_fcs = nn.ModuleList() + for _ in range(num_mask_fcs): + self.mask_fcs.append( + nn.Linear(in_channels, in_channels, bias=False)) + self.mask_fcs.append( + build_norm_layer(dict(type='LN'), in_channels)[1]) + self.mask_fcs.append(build_activation_layer(act_cfg)) + + self.fc_mask = nn.Linear(in_channels, out_channels) + + def init_weights(self): + """Use xavier initialization for all weight parameter and set + classification head bias as a specific value when use focal loss.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + else: + # adopt the default initialization for + # the weight and bias of the layer norm + pass + if self.kernel_init: + print_log( + 'mask kernel in mask head is normal initialized by std 0.01') + nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01) + + def forward(self, x, proposal_feat, mask_preds, mask_shape=None): + """Forward function of Dynamic Instance Interactive Head. + + Args: + x (Tensor): Feature map from FPN with shape + (batch_size, feature_dimensions, H , W). + proposal_feat (Tensor): Intermediate feature get from + diihead in last stage, has shape + (batch_size, num_proposals, feature_dimensions) + mask_preds (Tensor): mask prediction from the former stage in shape + (batch_size, num_proposals, H, W). + + Returns: + Tuple: The first tensor is predicted mask with shape + (N, num_classes, H, W), the second tensor is dynamic kernel + with shape (N, num_classes, channels, K, K). + """ + N, num_proposals = proposal_feat.shape[:2] + if self.feat_transform is not None: + x = self.feat_transform(x) + + C, H, W = x.shape[-3:] + + mask_h, mask_w = mask_preds.shape[-2:] + if mask_h != H or mask_w != W: + gather_mask = F.interpolate( + mask_preds, (H, W), align_corners=False, mode='bilinear') + else: + gather_mask = mask_preds + + sigmoid_masks = gather_mask.softmax(dim=1) + + # Group Feature Assembling. Eq.(3) in original paper. + # einsum is faster than bmm by 30% + x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x) + + # obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C] + proposal_feat = proposal_feat.reshape(N, num_proposals, + self.in_channels, + -1).permute(0, 1, 3, 2) + obj_feat = self.kernel_update_conv(x_feat, proposal_feat) + + # [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C] + obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2) + obj_feat = self.attention_norm(self.attention(obj_feat)) + # [N, B, K*K*C] -> [B, N, K*K*C] + obj_feat = obj_feat.permute(1, 0, 2) + + # obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C] + obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels) + + # FFN + if self.with_ffn: + obj_feat = self.ffn_norm(self.ffn(obj_feat)) + + mask_feat = obj_feat + + for reg_layer in self.mask_fcs: + mask_feat = reg_layer(mask_feat) + + # [B, N, K*K, C] -> [B, N, C, K*K] + mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2) + + if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1): + mask_x = F.interpolate( + x, scale_factor=0.5, mode='bilinear', align_corners=False) + H, W = mask_x.shape[-2:] + else: + mask_x = x + # group conv is 5x faster than unfold and uses about 1/5 memory + # Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms + # Group conv vs. unfold vs. concat batch, 278 : 1420 : 369 + # but in real training group conv is slower than concat batch + # so we keep using concat batch. + # fold_x = F.unfold( + # mask_x, + # self.conv_kernel_size, + # padding=int(self.conv_kernel_size // 2)) + # mask_feat = mask_feat.reshape(N, num_proposals, -1) + # new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x) + # [B, N, C, K*K] -> [B*N, C, K, K] + mask_feat = mask_feat.reshape(N, num_proposals, C, + self.conv_kernel_size, + self.conv_kernel_size) + # [B, C, H, W] -> [1, B*C, H, W] + new_mask_preds = [] + for i in range(N): + new_mask_preds.append( + F.conv2d( + mask_x[i:i + 1], + mask_feat[i], + padding=int(self.conv_kernel_size // 2))) + + new_mask_preds = torch.cat(new_mask_preds, dim=0) + new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W) + if self.mask_transform_stride == 2: + new_mask_preds = F.interpolate( + new_mask_preds, + scale_factor=2, + mode='bilinear', + align_corners=False) + + if mask_shape is not None and mask_shape[0] != H: + new_mask_preds = F.interpolate( + new_mask_preds, + mask_shape, + align_corners=False, + mode='bilinear') + + return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape( + N, num_proposals, self.in_channels, self.conv_kernel_size, + self.conv_kernel_size) + + +@MODELS.register_module() +class IterativeDecodeHead(BaseDecodeHead): + """K-Net: Towards Unified Image Segmentation. + + This head is the implementation of + `K-Net: `_. + + Args: + num_stages (int): The number of stages (kernel update heads) + in IterativeDecodeHead. Default: 3. + kernel_generate_head:(dict): Config of kernel generate head which + generate mask predictions, dynamic kernels and class predictions + for next kernel update heads. + kernel_update_head (dict): Config of kernel update head which refine + dynamic kernels and class predictions iteratively. + + """ + + def __init__(self, num_stages, kernel_generate_head, kernel_update_head, + **kwargs): + # ``IterativeDecodeHead`` would skip initialization of + # ``BaseDecodeHead`` which would be called when building + # ``self.kernel_generate_head``. + super(BaseDecodeHead, self).__init__(**kwargs) + assert num_stages == len(kernel_update_head) + self.num_stages = num_stages + self.kernel_generate_head = MODELS.build(kernel_generate_head) + self.kernel_update_head = nn.ModuleList() + self.align_corners = self.kernel_generate_head.align_corners + self.num_classes = self.kernel_generate_head.num_classes + self.input_transform = self.kernel_generate_head.input_transform + self.ignore_index = self.kernel_generate_head.ignore_index + self.out_channels = self.num_classes + + for head_cfg in kernel_update_head: + self.kernel_update_head.append(MODELS.build(head_cfg)) + + def forward(self, inputs): + """Forward function.""" + feats = self.kernel_generate_head._forward_feature(inputs) + sem_seg = self.kernel_generate_head.cls_seg(feats) + seg_kernels = self.kernel_generate_head.conv_seg.weight.clone() + seg_kernels = seg_kernels[None].expand( + feats.size(0), *seg_kernels.size()) + + stage_segs = [sem_seg] + for i in range(self.num_stages): + sem_seg, seg_kernels = self.kernel_update_head[i](feats, + seg_kernels, + sem_seg) + stage_segs.append(sem_seg) + if self.training: + return stage_segs + # only return the prediction of the last stage during testing + return stage_segs[-1] + + def loss_by_feat(self, seg_logits: List[Tensor], + batch_data_samples: SampleList, **kwargs) -> dict: + losses = dict() + for i, logit in enumerate(seg_logits): + loss = self.kernel_generate_head.loss_by_feat( + logit, batch_data_samples) + for k, v in loss.items(): + losses[f'{k}.s{i}'] = v + + return losses diff --git a/mmseg/models/decode_heads/lraspp_head.py b/mmseg/models/decode_heads/lraspp_head.py new file mode 100644 index 0000000000..ba2465f275 --- /dev/null +++ b/mmseg/models/decode_heads/lraspp_head.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils import is_tuple_of + +from mmseg.registry import MODELS +from ..utils import resize +from .decode_head import BaseDecodeHead + + +@MODELS.register_module() +class LRASPPHead(BaseDecodeHead): + """Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3. + + This head is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + branch_channels (tuple[int]): The number of output channels in every + each branch. Default: (32, 64). + """ + + def __init__(self, branch_channels=(32, 64), **kwargs): + super().__init__(**kwargs) + if self.input_transform != 'multiple_select': + raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform ' + f'must be \'multiple_select\'. But received ' + f'\'{self.input_transform}\'') + assert is_tuple_of(branch_channels, int) + assert len(branch_channels) == len(self.in_channels) - 1 + self.branch_channels = branch_channels + + self.convs = nn.Sequential() + self.conv_ups = nn.Sequential() + for i in range(len(branch_channels)): + self.convs.add_module( + f'conv{i}', + nn.Conv2d( + self.in_channels[i], branch_channels[i], 1, bias=False)) + self.conv_ups.add_module( + f'conv_up{i}', + ConvModule( + self.channels + branch_channels[i], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False)) + + self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1) + + self.aspp_conv = ConvModule( + self.in_channels[-1], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False) + self.image_pool = nn.Sequential( + nn.AvgPool2d(kernel_size=49, stride=(16, 20)), + ConvModule( + self.in_channels[2], + self.channels, + 1, + act_cfg=dict(type='Sigmoid'), + bias=False)) + + def forward(self, inputs): + """Forward function.""" + inputs = self._transform_inputs(inputs) + + x = inputs[-1] + + x = self.aspp_conv(x) * resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = self.conv_up_input(x) + + for i in range(len(self.branch_channels) - 1, -1, -1): + x = resize( + x, + size=inputs[i].size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = torch.cat([x, self.convs[i](inputs[i])], 1) + x = self.conv_ups[i](x) + + return self.cls_seg(x) diff --git a/mmseg/models/decode_heads/mask2former_head.py b/mmseg/models/decode_heads/mask2former_head.py new file mode 100644 index 0000000000..0ea7424307 --- /dev/null +++ b/mmseg/models/decode_heads/mask2former_head.py @@ -0,0 +1,162 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from mmdet.models.dense_heads import \ + Mask2FormerHead as MMDET_Mask2FormerHead +except ModuleNotFoundError: + MMDET_Mask2FormerHead = None + +from mmengine.structures import InstanceData +from torch import Tensor + +from mmseg.registry import MODELS +from mmseg.structures.seg_data_sample import SegDataSample +from mmseg.utils import ConfigType, SampleList + + +@MODELS.register_module() +class Mask2FormerHead(MMDET_Mask2FormerHead): + """Implements the Mask2Former head. + + See `Mask2Former: Masked-attention Mask Transformer for Universal Image + Segmentation `_ for details. + + Args: + num_classes (int): Number of classes. Default: 150. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + ignore_index (int): The label index to be ignored. Default: 255. + """ + + def __init__(self, + num_classes, + align_corners=False, + ignore_index=255, + **kwargs): + super().__init__(**kwargs) + + self.num_classes = num_classes + self.align_corners = align_corners + self.out_channels = num_classes + self.ignore_index = ignore_index + + feat_channels = kwargs['feat_channels'] + self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) + + def _seg_data_to_instance_data(self, batch_data_samples: SampleList): + """Perform forward propagation to convert paradigm from MMSegmentation + to MMDetection to ensure ``MMDET_Mask2FormerHead`` could be called + normally. Specifically, ``batch_gt_instances`` would be added. + + Args: + batch_data_samples (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + + Returns: + tuple[Tensor]: A tuple contains two lists. + + - batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``labels``, each is + unique ground truth label id of images, with + shape (num_gt, ) and ``masks``, each is ground truth + masks of each instances of a image, shape (num_gt, h, w). + - batch_img_metas (list[dict]): List of image meta information. + """ + batch_img_metas = [] + batch_gt_instances = [] + + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + gt_sem_seg = data_sample.gt_sem_seg.data + classes = torch.unique( + gt_sem_seg, + sorted=False, + return_inverse=False, + return_counts=False) + + # remove ignored region + gt_labels = classes[classes != self.ignore_index] + + masks = [] + for class_id in gt_labels: + masks.append(gt_sem_seg == class_id) + + if len(masks) == 0: + gt_masks = torch.zeros( + (0, gt_sem_seg.shape[-2], + gt_sem_seg.shape[-1])).to(gt_sem_seg).long() + else: + gt_masks = torch.stack(masks).squeeze(1).long() + + instance_data = InstanceData(labels=gt_labels, masks=gt_masks) + batch_gt_instances.append(instance_data) + return batch_gt_instances, batch_img_metas + + def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList, + train_cfg: ConfigType) -> dict: + """Perform forward propagation and loss calculation of the decoder head + on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the upstream + network, each is a 4D-tensor. + batch_data_samples (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + train_cfg (ConfigType): Training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components. + """ + # batch SegDataSample to InstanceDataSample + batch_gt_instances, batch_img_metas = self._seg_data_to_instance_data( + batch_data_samples) + + # forward + all_cls_scores, all_mask_preds = self(x, batch_data_samples) + + # loss + losses = self.loss_by_feat(all_cls_scores, all_mask_preds, + batch_gt_instances, batch_img_metas) + + return losses + + def predict(self, x: Tuple[Tensor], batch_img_metas: List[dict], + test_cfg: ConfigType) -> Tuple[Tensor]: + """Test without augmentaton. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + batch_img_metas (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + test_cfg (ConfigType): Test config. + + Returns: + Tensor: A tensor of segmentation mask. + """ + batch_data_samples = [ + SegDataSample(metainfo=metainfo) for metainfo in batch_img_metas + ] + + all_cls_scores, all_mask_preds = self(x, batch_data_samples) + mask_cls_results = all_cls_scores[-1] + mask_pred_results = all_mask_preds[-1] + if 'pad_shape' in batch_img_metas[0]: + size = batch_img_metas[0]['pad_shape'] + else: + size = batch_img_metas[0]['img_shape'] + # upsample mask + mask_pred_results = F.interpolate( + mask_pred_results, size=size, mode='bilinear', align_corners=False) + cls_score = F.softmax(mask_cls_results, dim=-1)[..., :-1] + mask_pred = mask_pred_results.sigmoid() + seg_logits = torch.einsum('bqc, bqhw->bchw', cls_score, mask_pred) + return seg_logits diff --git a/mmseg/models/decode_heads/maskformer_head.py b/mmseg/models/decode_heads/maskformer_head.py new file mode 100644 index 0000000000..98ca92b996 --- /dev/null +++ b/mmseg/models/decode_heads/maskformer_head.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from mmdet.models.dense_heads import MaskFormerHead as MMDET_MaskFormerHead +except ModuleNotFoundError: + MMDET_MaskFormerHead = None + +from mmengine.structures import InstanceData +from torch import Tensor + +from mmseg.registry import MODELS +from mmseg.structures.seg_data_sample import SegDataSample +from mmseg.utils import ConfigType, SampleList + + +@MODELS.register_module() +class MaskFormerHead(MMDET_MaskFormerHead): + """Implements the MaskFormer head. + + See `Per-Pixel Classification is Not All You Need for Semantic Segmentation + `_ for details. + + Args: + num_classes (int): Number of classes. Default: 150. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + ignore_index (int): The label index to be ignored. Default: 255. + """ + + def __init__(self, + num_classes: int = 150, + align_corners: bool = False, + ignore_index: int = 255, + **kwargs) -> None: + super().__init__(**kwargs) + + self.out_channels = kwargs['out_channels'] + self.align_corners = True + self.num_classes = num_classes + self.align_corners = align_corners + self.out_channels = num_classes + self.ignore_index = ignore_index + + feat_channels = kwargs['feat_channels'] + self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) + + def _seg_data_to_instance_data(self, batch_data_samples: SampleList): + """Perform forward propagation to convert paradigm from MMSegmentation + to MMDetection to ensure ``MMDET_MaskFormerHead`` could be called + normally. Specifically, ``batch_gt_instances`` would be added. + + Args: + batch_data_samples (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + + Returns: + tuple[Tensor]: A tuple contains two lists. + + - batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``labels``, each is + unique ground truth label id of images, with + shape (num_gt, ) and ``masks``, each is ground truth + masks of each instances of a image, shape (num_gt, h, w). + - batch_img_metas (list[dict]): List of image meta information. + """ + batch_img_metas = [] + batch_gt_instances = [] + for data_sample in batch_data_samples: + # Add `batch_input_shape` in metainfo of data_sample, which would + # be used in MaskFormerHead of MMDetection. + metainfo = data_sample.metainfo + metainfo['batch_input_shape'] = metainfo['img_shape'] + data_sample.set_metainfo(metainfo) + batch_img_metas.append(data_sample.metainfo) + gt_sem_seg = data_sample.gt_sem_seg.data + classes = torch.unique( + gt_sem_seg, + sorted=False, + return_inverse=False, + return_counts=False) + + # remove ignored region + gt_labels = classes[classes != self.ignore_index] + + masks = [] + for class_id in gt_labels: + masks.append(gt_sem_seg == class_id) + + if len(masks) == 0: + gt_masks = torch.zeros((0, gt_sem_seg.shape[-2], + gt_sem_seg.shape[-1])).to(gt_sem_seg) + else: + gt_masks = torch.stack(masks).squeeze(1) + + instance_data = InstanceData( + labels=gt_labels, masks=gt_masks.long()) + batch_gt_instances.append(instance_data) + return batch_gt_instances, batch_img_metas + + def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList, + train_cfg: ConfigType) -> dict: + """Perform forward propagation and loss calculation of the decoder head + on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the upstream + network, each is a 4D-tensor. + batch_data_samples (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + train_cfg (ConfigType): Training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components. + """ + # batch SegDataSample to InstanceDataSample + batch_gt_instances, batch_img_metas = self._seg_data_to_instance_data( + batch_data_samples) + + # forward + all_cls_scores, all_mask_preds = self(x, batch_data_samples) + + # loss + losses = self.loss_by_feat(all_cls_scores, all_mask_preds, + batch_gt_instances, batch_img_metas) + + return losses + + def predict(self, x: Tuple[Tensor], batch_img_metas: List[dict], + test_cfg: ConfigType) -> Tuple[Tensor]: + """Test without augmentaton. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + batch_img_metas (List[:obj:`SegDataSample`]): The Data + Samples. It usually includes information such as + `gt_sem_seg`. + test_cfg (ConfigType): Test config. + + Returns: + Tensor: A tensor of segmentation mask. + """ + + batch_data_samples = [] + for metainfo in batch_img_metas: + metainfo['batch_input_shape'] = metainfo['img_shape'] + batch_data_samples.append(SegDataSample(metainfo=metainfo)) + # Forward function of MaskFormerHead from MMDetection needs + # 'batch_data_samples' as inputs, which is image shape actually. + all_cls_scores, all_mask_preds = self(x, batch_data_samples) + mask_cls_results = all_cls_scores[-1] + mask_pred_results = all_mask_preds[-1] + + # upsample masks + img_shape = batch_img_metas[0]['batch_input_shape'] + mask_pred_results = F.interpolate( + mask_pred_results, + size=img_shape, + mode='bilinear', + align_corners=False) + + # semantic inference + cls_score = F.softmax(mask_cls_results, dim=-1)[..., :-1] + mask_pred = mask_pred_results.sigmoid() + seg_logits = torch.einsum('bqc,bqhw->bchw', cls_score, mask_pred) + return seg_logits diff --git a/mmseg/models/decode_heads/nl_head.py b/mmseg/models/decode_heads/nl_head.py index 31658755a6..0ffcc2a2f0 100644 --- a/mmseg/models/decode_heads/nl_head.py +++ b/mmseg/models/decode_heads/nl_head.py @@ -1,11 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.cnn import NonLocal2d -from ..builder import HEADS +from mmseg.registry import MODELS from .fcn_head import FCNHead -@HEADS.register_module() +@MODELS.register_module() class NLHead(FCNHead): """Non-local Neural Networks. @@ -25,7 +26,7 @@ def __init__(self, use_scale=True, mode='embedded_gaussian', **kwargs): - super(NLHead, self).__init__(num_convs=2, **kwargs) + super().__init__(num_convs=2, **kwargs) self.reduction = reduction self.use_scale = use_scale self.mode = mode diff --git a/mmseg/models/decode_heads/ocr_head.py b/mmseg/models/decode_heads/ocr_head.py index e180e10276..9afe37bebd 100644 --- a/mmseg/models/decode_heads/ocr_head.py +++ b/mmseg/models/decode_heads/ocr_head.py @@ -1,11 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule -from mmseg.ops import resize -from ..builder import HEADS +from mmseg.registry import MODELS from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from ..utils import resize from .cascade_decode_head import BaseCascadeDecodeHead @@ -17,7 +18,7 @@ class SpatialGatherModule(nn.Module): """ def __init__(self, scale): - super(SpatialGatherModule, self).__init__() + super().__init__() self.scale = scale def forward(self, feats, probs): @@ -45,7 +46,7 @@ def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, query_downsample = nn.MaxPool2d(kernel_size=scale) else: query_downsample = None - super(ObjectAttentionBlock, self).__init__( + super().__init__( key_in_channels=in_channels, query_in_channels=in_channels, channels=channels, @@ -72,8 +73,7 @@ def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, def forward(self, query_feats, key_feats): """Forward function.""" - context = super(ObjectAttentionBlock, - self).forward(query_feats, key_feats) + context = super().forward(query_feats, key_feats) output = self.bottleneck(torch.cat([context, query_feats], dim=1)) if self.query_downsample is not None: output = resize(query_feats) @@ -81,7 +81,7 @@ def forward(self, query_feats, key_feats): return output -@HEADS.register_module() +@MODELS.register_module() class OCRHead(BaseCascadeDecodeHead): """Object-Contextual Representations for Semantic Segmentation. @@ -95,7 +95,7 @@ class OCRHead(BaseCascadeDecodeHead): """ def __init__(self, ocr_channels, scale=1, **kwargs): - super(OCRHead, self).__init__(**kwargs) + super().__init__(**kwargs) self.ocr_channels = ocr_channels self.scale = scale self.object_context_block = ObjectAttentionBlock( diff --git a/mmseg/models/decode_heads/point_head.py b/mmseg/models/decode_heads/point_head.py new file mode 100644 index 0000000000..e8e433d662 --- /dev/null +++ b/mmseg/models/decode_heads/point_head.py @@ -0,0 +1,367 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +try: + from mmcv.ops import point_sample +except ModuleNotFoundError: + point_sample = None + +from typing import List + +from mmseg.registry import MODELS +from mmseg.utils import SampleList +from ..losses import accuracy +from ..utils import resize +from .cascade_decode_head import BaseCascadeDecodeHead + + +def calculate_uncertainty(seg_logits): + """Estimate uncertainty based on seg logits. + + For each location of the prediction ``seg_logits`` we estimate + uncertainty as the difference between top first and top second + predicted logits. + + Args: + seg_logits (Tensor): Semantic segmentation logits, + shape (batch_size, num_classes, height, width). + + Returns: + scores (Tensor): T uncertainty scores with the most uncertain + locations having the highest uncertainty score, shape ( + batch_size, 1, height, width) + """ + top2_scores = torch.topk(seg_logits, k=2, dim=1)[0] + return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) + + +@MODELS.register_module() +class PointHead(BaseCascadeDecodeHead): + """A mask point head use in PointRend. + + This head is implemented of `PointRend: Image Segmentation as + Rendering `_. + ``PointHead`` use shared multi-layer perceptron (equivalent to + nn.Conv1d) to predict the logit of input points. The fine-grained feature + and coarse feature will be concatenate together for predication. + + Args: + num_fcs (int): Number of fc layers in the head. Default: 3. + in_channels (int): Number of input channels. Default: 256. + fc_channels (int): Number of fc channels. Default: 256. + num_classes (int): Number of classes for logits. Default: 80. + class_agnostic (bool): Whether use class agnostic classification. + If so, the output channels of logits will be 1. Default: False. + coarse_pred_each_layer (bool): Whether concatenate coarse feature with + the output of each fc layer. Default: True. + conv_cfg (dict|None): Dictionary to construct and config conv layer. + Default: dict(type='Conv1d')) + norm_cfg (dict|None): Dictionary to construct and config norm layer. + Default: None. + loss_point (dict): Dictionary to construct and config loss layer of + point head. Default: dict(type='CrossEntropyLoss', use_mask=True, + loss_weight=1.0). + """ + + def __init__(self, + num_fcs=3, + coarse_pred_each_layer=True, + conv_cfg=dict(type='Conv1d'), + norm_cfg=None, + act_cfg=dict(type='ReLU', inplace=False), + **kwargs): + super().__init__( + input_transform='multiple_select', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='fc_seg')), + **kwargs) + if point_sample is None: + raise RuntimeError('Please install mmcv-full for ' + 'point_sample ops') + + self.num_fcs = num_fcs + self.coarse_pred_each_layer = coarse_pred_each_layer + + fc_in_channels = sum(self.in_channels) + self.num_classes + fc_channels = self.channels + self.fcs = nn.ModuleList() + for k in range(num_fcs): + fc = ConvModule( + fc_in_channels, + fc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.fcs.append(fc) + fc_in_channels = fc_channels + fc_in_channels += self.num_classes if self.coarse_pred_each_layer \ + else 0 + self.fc_seg = nn.Conv1d( + fc_in_channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0) + if self.dropout_ratio > 0: + self.dropout = nn.Dropout(self.dropout_ratio) + delattr(self, 'conv_seg') + + def cls_seg(self, feat): + """Classify each pixel with fc.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.fc_seg(feat) + return output + + def forward(self, fine_grained_point_feats, coarse_point_feats): + x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1) + for fc in self.fcs: + x = fc(x) + if self.coarse_pred_each_layer: + x = torch.cat((x, coarse_point_feats), dim=1) + return self.cls_seg(x) + + def _get_fine_grained_point_feats(self, x, points): + """Sample from fine grained features. + + Args: + x (list[Tensor]): Feature pyramid from by neck or backbone. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + fine_grained_feats (Tensor): Sampled fine grained feature, + shape (batch_size, sum(channels of x), num_points). + """ + + fine_grained_feats_list = [ + point_sample(_, points, align_corners=self.align_corners) + for _ in x + ] + if len(fine_grained_feats_list) > 1: + fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1) + else: + fine_grained_feats = fine_grained_feats_list[0] + + return fine_grained_feats + + def _get_coarse_point_feats(self, prev_output, points): + """Sample from fine grained features. + + Args: + prev_output (list[Tensor]): Prediction of previous decode head. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + coarse_feats (Tensor): Sampled coarse feature, shape (batch_size, + num_classes, num_points). + """ + + coarse_feats = point_sample( + prev_output, points, align_corners=self.align_corners) + + return coarse_feats + + def loss(self, inputs, prev_output, batch_data_samples: SampleList, + train_cfg, **kwargs): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + batch_data_samples (list[:obj:`SegDataSample`]): The seg + data samples. It usually includes information such + as `img_metas` or `gt_semantic_seg`. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + x = self._transform_inputs(inputs) + with torch.no_grad(): + points = self.get_points_train( + prev_output, calculate_uncertainty, cfg=train_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats(prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + + losses = self.loss_by_feat(point_logits, points, batch_data_samples) + + return losses + + def predict(self, inputs, prev_output, batch_img_metas: List[dict], + test_cfg, **kwargs): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + + x = self._transform_inputs(inputs) + refined_seg_logits = prev_output.clone() + for _ in range(test_cfg.subdivision_steps): + refined_seg_logits = resize( + refined_seg_logits, + scale_factor=test_cfg.scale_factor, + mode='bilinear', + align_corners=self.align_corners) + batch_size, channels, height, width = refined_seg_logits.shape + point_indices, points = self.get_points_test( + refined_seg_logits, calculate_uncertainty, cfg=test_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats( + prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + + point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) + refined_seg_logits = refined_seg_logits.reshape( + batch_size, channels, height * width) + refined_seg_logits = refined_seg_logits.scatter_( + 2, point_indices, point_logits) + refined_seg_logits = refined_seg_logits.view( + batch_size, channels, height, width) + + return self.predict_by_feat(refined_seg_logits, batch_img_metas, + **kwargs) + + def loss_by_feat(self, point_logits, points, batch_data_samples, **kwargs): + """Compute segmentation loss.""" + gt_semantic_seg = self._stack_batch_gt(batch_data_samples) + point_label = point_sample( + gt_semantic_seg.float(), + points, + mode='nearest', + align_corners=self.align_corners) + point_label = point_label.squeeze(1).long() + + loss = dict() + if not isinstance(self.loss_decode, nn.ModuleList): + losses_decode = [self.loss_decode] + else: + losses_decode = self.loss_decode + for loss_module in losses_decode: + loss['point' + loss_module.loss_name] = loss_module( + point_logits, point_label, ignore_index=self.ignore_index) + + loss['acc_point'] = accuracy( + point_logits, point_label, ignore_index=self.ignore_index) + return loss + + def get_points_train(self, seg_logits, uncertainty_func, cfg): + """Sample points for training. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'uncertainty_func' function that takes point's logit prediction as + input. + + Args: + seg_logits (Tensor): Semantic segmentation logits, shape ( + batch_size, num_classes, height, width). + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Training config of point head. + + Returns: + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains the coordinates of ``num_points`` sampled + points. + """ + num_points = cfg.num_points + oversample_ratio = cfg.oversample_ratio + importance_sample_ratio = cfg.importance_sample_ratio + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = seg_logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=seg_logits.device) + point_logits = point_sample(seg_logits, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=seg_logits.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_point_coords = torch.rand( + batch_size, num_random_points, 2, device=seg_logits.device) + point_coords = torch.cat((point_coords, rand_point_coords), dim=1) + return point_coords + + def get_points_test(self, seg_logits, uncertainty_func, cfg): + """Sample points for testing. + + Find ``num_points`` most uncertain points from ``uncertainty_map``. + + Args: + seg_logits (Tensor): A tensor of shape (batch_size, num_classes, + height, width) for class-specific or class-agnostic prediction. + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Testing config of point head. + + Returns: + point_indices (Tensor): A tensor of shape (batch_size, num_points) + that contains indices from [0, height x width) of the most + uncertain points. + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the ``height x width`` grid . + """ + + num_points = cfg.subdivision_num_points + uncertainty_map = uncertainty_func(seg_logits) + batch_size, _, height, width = uncertainty_map.shape + h_step = 1.0 / height + w_step = 1.0 / width + + uncertainty_map = uncertainty_map.view(batch_size, height * width) + num_points = min(height * width, num_points) + point_indices = uncertainty_map.topk(num_points, dim=1)[1] + point_coords = torch.zeros( + batch_size, + num_points, + 2, + dtype=torch.float, + device=seg_logits.device) + point_coords[:, :, 0] = w_step / 2.0 + (point_indices % + width).float() * w_step + point_coords[:, :, 1] = h_step / 2.0 + (point_indices // + width).float() * h_step + return point_indices, point_coords diff --git a/mmseg/models/decode_heads/psa_head.py b/mmseg/models/decode_heads/psa_head.py index 8d915e57f4..13ee5c58a5 100644 --- a/mmseg/models/decode_heads/psa_head.py +++ b/mmseg/models/decode_heads/psa_head.py @@ -1,10 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule -from mmseg.ops import resize -from ..builder import HEADS +from mmseg.registry import MODELS +from ..utils import resize from .decode_head import BaseDecodeHead try: @@ -13,7 +14,7 @@ PSAMask = None -@HEADS.register_module() +@MODELS.register_module() class PSAHead(BaseDecodeHead): """Point-wise Spatial Attention Network for Scene Parsing. @@ -42,7 +43,7 @@ def __init__(self, **kwargs): if PSAMask is None: raise RuntimeError('Please install mmcv-full for PSAMask ops') - super(PSAHead, self).__init__(**kwargs) + super().__init__(**kwargs) assert psa_type in ['collect', 'distribute', 'bi-direction'] self.psa_type = psa_type self.compact = compact diff --git a/mmseg/models/decode_heads/psp_head.py b/mmseg/models/decode_heads/psp_head.py index bdbe2c8ac8..a40ec41dec 100644 --- a/mmseg/models/decode_heads/psp_head.py +++ b/mmseg/models/decode_heads/psp_head.py @@ -1,9 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule -from mmseg.ops import resize -from ..builder import HEADS +from mmseg.registry import MODELS +from ..utils import resize from .decode_head import BaseDecodeHead @@ -22,8 +23,8 @@ class PPM(nn.ModuleList): """ def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, - act_cfg, align_corners): - super(PPM, self).__init__() + act_cfg, align_corners, **kwargs): + super().__init__() self.pool_scales = pool_scales self.align_corners = align_corners self.in_channels = in_channels @@ -41,7 +42,8 @@ def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg))) + act_cfg=self.act_cfg, + **kwargs))) def forward(self, x): """Forward function.""" @@ -57,7 +59,7 @@ def forward(self, x): return ppm_outs -@HEADS.register_module() +@MODELS.register_module() class PSPHead(BaseDecodeHead): """Pyramid Scene Parsing Network. @@ -70,7 +72,7 @@ class PSPHead(BaseDecodeHead): """ def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(PSPHead, self).__init__(**kwargs) + super().__init__(**kwargs) assert isinstance(pool_scales, (list, tuple)) self.pool_scales = pool_scales self.psp_modules = PPM( @@ -90,12 +92,26 @@ def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) - def forward(self, inputs): - """Forward function.""" + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ x = self._transform_inputs(inputs) psp_outs = [x] psp_outs.extend(self.psp_modules(x)) psp_outs = torch.cat(psp_outs, dim=1) - output = self.bottleneck(psp_outs) + feats = self.bottleneck(psp_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) output = self.cls_seg(output) return output diff --git a/mmseg/models/decode_heads/segformer_head.py b/mmseg/models/decode_heads/segformer_head.py new file mode 100644 index 0000000000..f9eb0b320b --- /dev/null +++ b/mmseg/models/decode_heads/segformer_head.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.registry import MODELS +from ..utils import resize + + +@MODELS.register_module() +class SegformerHead(BaseDecodeHead): + """The all mlp Head of segformer. + + This head is the implementation of + `Segformer ` _. + + Args: + interpolate_mode: The interpolate mode of MLP head upsample operation. + Default: 'bilinear'. + """ + + def __init__(self, interpolate_mode='bilinear', **kwargs): + super().__init__(input_transform='multiple_select', **kwargs) + + self.interpolate_mode = interpolate_mode + num_inputs = len(self.in_channels) + + assert num_inputs == len(self.in_index) + + self.convs = nn.ModuleList() + for i in range(num_inputs): + self.convs.append( + ConvModule( + in_channels=self.in_channels[i], + out_channels=self.channels, + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + self.fusion_conv = ConvModule( + in_channels=self.channels * num_inputs, + out_channels=self.channels, + kernel_size=1, + norm_cfg=self.norm_cfg) + + def forward(self, inputs): + # Receive 4 stage backbone feature map: 1/4, 1/8, 1/16, 1/32 + inputs = self._transform_inputs(inputs) + outs = [] + for idx in range(len(inputs)): + x = inputs[idx] + conv = self.convs[idx] + outs.append( + resize( + input=conv(x), + size=inputs[0].shape[2:], + mode=self.interpolate_mode, + align_corners=self.align_corners)) + + out = self.fusion_conv(torch.cat(outs, dim=1)) + + out = self.cls_seg(out) + + return out diff --git a/mmseg/models/decode_heads/segmenter_mask_head.py b/mmseg/models/decode_heads/segmenter_mask_head.py new file mode 100644 index 0000000000..85d27735ba --- /dev/null +++ b/mmseg/models/decode_heads/segmenter_mask_head.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmengine.model import ModuleList +from mmengine.model.weight_init import (constant_init, trunc_normal_, + trunc_normal_init) + +from mmseg.models.backbones.vit import TransformerEncoderLayer +from mmseg.registry import MODELS +from .decode_head import BaseDecodeHead + + +@MODELS.register_module() +class SegmenterMaskTransformerHead(BaseDecodeHead): + """Segmenter: Transformer for Semantic Segmentation. + + This head is the implementation of + `Segmenter: `_. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + in_channels (int): The number of channels of input image. + num_layers (int): The depth of transformer. + num_heads (int): The number of attention heads. + embed_dims (int): The number of embedding dimension. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + drop_path_rate (float): stochastic depth rate. Default 0.1. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + init_std (float): The value of std in weight initialization. + Default: 0.02. + """ + + def __init__( + self, + in_channels, + num_layers, + num_heads, + embed_dims, + mlp_ratio=4, + drop_path_rate=0.1, + drop_rate=0.0, + attn_drop_rate=0.0, + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_std=0.02, + **kwargs, + ): + super().__init__(in_channels=in_channels, **kwargs) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)] + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append( + TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + batch_first=True, + )) + + self.dec_proj = nn.Linear(in_channels, embed_dims) + + self.cls_emb = nn.Parameter( + torch.randn(1, self.num_classes, embed_dims)) + self.patch_proj = nn.Linear(embed_dims, embed_dims, bias=False) + self.classes_proj = nn.Linear(embed_dims, embed_dims, bias=False) + + self.decoder_norm = build_norm_layer( + norm_cfg, embed_dims, postfix=1)[1] + self.mask_norm = build_norm_layer( + norm_cfg, self.num_classes, postfix=2)[1] + + self.init_std = init_std + + delattr(self, 'conv_seg') + + def init_weights(self): + trunc_normal_(self.cls_emb, std=self.init_std) + trunc_normal_init(self.patch_proj, std=self.init_std) + trunc_normal_init(self.classes_proj, std=self.init_std) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=self.init_std, bias=0) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.0) + + def forward(self, inputs): + x = self._transform_inputs(inputs) + b, c, h, w = x.shape + x = x.permute(0, 2, 3, 1).contiguous().view(b, -1, c) + + x = self.dec_proj(x) + cls_emb = self.cls_emb.expand(x.size(0), -1, -1) + x = torch.cat((x, cls_emb), 1) + for layer in self.layers: + x = layer(x) + x = self.decoder_norm(x) + + patches = self.patch_proj(x[:, :-self.num_classes]) + cls_seg_feat = self.classes_proj(x[:, -self.num_classes:]) + + patches = F.normalize(patches, dim=2, p=2) + cls_seg_feat = F.normalize(cls_seg_feat, dim=2, p=2) + + masks = patches @ cls_seg_feat.transpose(1, 2) + masks = self.mask_norm(masks) + masks = masks.permute(0, 2, 1).contiguous().view(b, -1, h, w) + + return masks diff --git a/mmseg/models/decode_heads/sep_aspp_head.py b/mmseg/models/decode_heads/sep_aspp_head.py index 71881890bd..9dba68c9ec 100644 --- a/mmseg/models/decode_heads/sep_aspp_head.py +++ b/mmseg/models/decode_heads/sep_aspp_head.py @@ -1,9 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn -from mmcv.cnn import ConvModule +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmseg.ops import DepthwiseSeparableConvModule, resize -from ..builder import HEADS +from mmseg.registry import MODELS +from ..utils import resize from .aspp_head import ASPPHead, ASPPModule @@ -12,7 +13,7 @@ class DepthwiseSeparableASPPModule(ASPPModule): conv.""" def __init__(self, **kwargs): - super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) + super().__init__(**kwargs) for i, dilation in enumerate(self.dilations): if dilation > 1: self[i] = DepthwiseSeparableConvModule( @@ -25,7 +26,7 @@ def __init__(self, **kwargs): act_cfg=self.act_cfg) -@HEADS.register_module() +@MODELS.register_module() class DepthwiseSeparableASPPHead(ASPPHead): """Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation. @@ -40,7 +41,7 @@ class DepthwiseSeparableASPPHead(ASPPHead): """ def __init__(self, c1_in_channels, c1_channels, **kwargs): - super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) + super().__init__(**kwargs) assert c1_in_channels >= 0 self.aspp_modules = DepthwiseSeparableASPPModule( dilations=self.dilations, diff --git a/mmseg/models/decode_heads/sep_fcn_head.py b/mmseg/models/decode_heads/sep_fcn_head.py new file mode 100644 index 0000000000..3b15983bce --- /dev/null +++ b/mmseg/models/decode_heads/sep_fcn_head.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import DepthwiseSeparableConvModule + +from mmseg.registry import MODELS +from .fcn_head import FCNHead + + +@MODELS.register_module() +class DepthwiseSeparableFCNHead(FCNHead): + """Depthwise-Separable Fully Convolutional Network for Semantic + Segmentation. + + This head is implemented according to `Fast-SCNN: Fast Semantic + Segmentation Network `_. + + Args: + in_channels(int): Number of output channels of FFM. + channels(int): Number of middle-stage channels in the decode head. + concat_input(bool): Whether to concatenate original decode input into + the result of several consecutive convolution layers. + Default: True. + num_classes(int): Used to determine the dimension of + final prediction tensor. + in_index(int): Correspond with 'out_indices' in FastSCNN backbone. + norm_cfg (dict | None): Config of norm layers. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + loss_decode(dict): Config of loss type and some + relevant additional options. + dw_act_cfg (dict):Activation config of depthwise ConvModule. If it is + 'default', it will be the same as `act_cfg`. Default: None. + """ + + def __init__(self, dw_act_cfg=None, **kwargs): + super().__init__(**kwargs) + self.convs[0] = DepthwiseSeparableConvModule( + self.in_channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) + + for i in range(1, self.num_convs): + self.convs[i] = DepthwiseSeparableConvModule( + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) + + if self.concat_input: + self.conv_cat = DepthwiseSeparableConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) diff --git a/mmseg/models/decode_heads/setr_mla_head.py b/mmseg/models/decode_heads/setr_mla_head.py new file mode 100644 index 0000000000..1975991a60 --- /dev/null +++ b/mmseg/models/decode_heads/setr_mla_head.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.registry import MODELS +from ..utils import Upsample +from .decode_head import BaseDecodeHead + + +@MODELS.register_module() +class SETRMLAHead(BaseDecodeHead): + """Multi level feature aggretation head of SETR. + + MLA head of `SETR `_. + + Args: + mlahead_channels (int): Channels of conv-conv-4x of multi-level feature + aggregation. Default: 128. + up_scale (int): The scale factor of interpolate. Default:4. + """ + + def __init__(self, mla_channels=128, up_scale=4, **kwargs): + super().__init__(input_transform='multiple_select', **kwargs) + self.mla_channels = mla_channels + + num_inputs = len(self.in_channels) + + # Refer to self.cls_seg settings of BaseDecodeHead + assert self.channels == num_inputs * mla_channels + + self.up_convs = nn.ModuleList() + for i in range(num_inputs): + self.up_convs.append( + nn.Sequential( + ConvModule( + in_channels=self.in_channels[i], + out_channels=mla_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + in_channels=mla_channels, + out_channels=mla_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + Upsample( + scale_factor=up_scale, + mode='bilinear', + align_corners=self.align_corners))) + + def forward(self, inputs): + inputs = self._transform_inputs(inputs) + outs = [] + for x, up_conv in zip(inputs, self.up_convs): + outs.append(up_conv(x)) + out = torch.cat(outs, dim=1) + out = self.cls_seg(out) + return out diff --git a/mmseg/models/decode_heads/setr_up_head.py b/mmseg/models/decode_heads/setr_up_head.py new file mode 100644 index 0000000000..9c796d8161 --- /dev/null +++ b/mmseg/models/decode_heads/setr_up_head.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, build_norm_layer + +from mmseg.registry import MODELS +from ..utils import Upsample +from .decode_head import BaseDecodeHead + + +@MODELS.register_module() +class SETRUPHead(BaseDecodeHead): + """Naive upsampling head and Progressive upsampling head of SETR. + + Naive or PUP head of `SETR `_. + + Args: + norm_layer (dict): Config dict for input normalization. + Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True). + num_convs (int): Number of decoder convolutions. Default: 1. + up_scale (int): The scale factor of interpolate. Default:4. + kernel_size (int): The kernel size of convolution when decoding + feature information from backbone. Default: 3. + init_cfg (dict | list[dict] | None): Initialization config dict. + Default: dict( + type='Constant', val=1.0, bias=0, layer='LayerNorm'). + """ + + def __init__(self, + norm_layer=dict(type='LN', eps=1e-6, requires_grad=True), + num_convs=1, + up_scale=4, + kernel_size=3, + init_cfg=[ + dict(type='Constant', val=1.0, bias=0, layer='LayerNorm'), + dict( + type='Normal', + std=0.01, + override=dict(name='conv_seg')) + ], + **kwargs): + + assert kernel_size in [1, 3], 'kernel_size must be 1 or 3.' + + super().__init__(init_cfg=init_cfg, **kwargs) + + assert isinstance(self.in_channels, int) + + _, self.norm = build_norm_layer(norm_layer, self.in_channels) + + self.up_convs = nn.ModuleList() + in_channels = self.in_channels + out_channels = self.channels + for _ in range(num_convs): + self.up_convs.append( + nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=int(kernel_size - 1) // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + Upsample( + scale_factor=up_scale, + mode='bilinear', + align_corners=self.align_corners))) + in_channels = out_channels + + def forward(self, x): + x = self._transform_inputs(x) + + n, c, h, w = x.shape + x = x.reshape(n, c, h * w).transpose(2, 1).contiguous() + x = self.norm(x) + x = x.transpose(1, 2).reshape(n, c, h, w).contiguous() + + for up_conv in self.up_convs: + x = up_conv(x) + out = self.cls_seg(x) + return out diff --git a/mmseg/models/decode_heads/stdc_head.py b/mmseg/models/decode_heads/stdc_head.py new file mode 100644 index 0000000000..1c1c21e308 --- /dev/null +++ b/mmseg/models/decode_heads/stdc_head.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +from mmengine.structures import PixelData +from torch import Tensor + +from mmseg.registry import MODELS +from mmseg.structures import SegDataSample +from mmseg.utils import SampleList +from .fcn_head import FCNHead + + +@MODELS.register_module() +class STDCHead(FCNHead): + """This head is the implementation of `Rethinking BiSeNet For Real-time + Semantic Segmentation `_. + + Args: + boundary_threshold (float): The threshold of calculating boundary. + Default: 0.1. + """ + + def __init__(self, boundary_threshold=0.1, **kwargs): + super().__init__(**kwargs) + self.boundary_threshold = boundary_threshold + # Using register buffer to make laplacian kernel on the same + # device of `seg_label`. + self.register_buffer( + 'laplacian_kernel', + torch.tensor([-1, -1, -1, -1, 8, -1, -1, -1, -1], + dtype=torch.float32, + requires_grad=False).reshape((1, 1, 3, 3))) + self.fusion_kernel = torch.nn.Parameter( + torch.tensor([[6. / 10], [3. / 10], [1. / 10]], + dtype=torch.float32).reshape(1, 3, 1, 1), + requires_grad=False) + + def loss_by_feat(self, seg_logits: Tensor, + batch_data_samples: SampleList) -> dict: + """Compute Detail Aggregation Loss.""" + # Note: The paper claims `fusion_kernel` is a trainable 1x1 conv + # parameters. However, it is a constant in original repo and other + # codebase because it would not be added into computation graph + # after threshold operation. + seg_label = self._stack_batch_gt(batch_data_samples).to( + self.laplacian_kernel) + boundary_targets = F.conv2d( + seg_label, self.laplacian_kernel, padding=1) + boundary_targets = boundary_targets.clamp(min=0) + boundary_targets[boundary_targets > self.boundary_threshold] = 1 + boundary_targets[boundary_targets <= self.boundary_threshold] = 0 + + boundary_targets_x2 = F.conv2d( + seg_label, self.laplacian_kernel, stride=2, padding=1) + boundary_targets_x2 = boundary_targets_x2.clamp(min=0) + + boundary_targets_x4 = F.conv2d( + seg_label, self.laplacian_kernel, stride=4, padding=1) + boundary_targets_x4 = boundary_targets_x4.clamp(min=0) + + boundary_targets_x4_up = F.interpolate( + boundary_targets_x4, boundary_targets.shape[2:], mode='nearest') + boundary_targets_x2_up = F.interpolate( + boundary_targets_x2, boundary_targets.shape[2:], mode='nearest') + + boundary_targets_x2_up[ + boundary_targets_x2_up > self.boundary_threshold] = 1 + boundary_targets_x2_up[ + boundary_targets_x2_up <= self.boundary_threshold] = 0 + + boundary_targets_x4_up[ + boundary_targets_x4_up > self.boundary_threshold] = 1 + boundary_targets_x4_up[ + boundary_targets_x4_up <= self.boundary_threshold] = 0 + + boundary_targets_pyramids = torch.stack( + (boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up), + dim=1) + + boundary_targets_pyramids = boundary_targets_pyramids.squeeze(2) + boudary_targets_pyramid = F.conv2d(boundary_targets_pyramids, + self.fusion_kernel) + + boudary_targets_pyramid[ + boudary_targets_pyramid > self.boundary_threshold] = 1 + boudary_targets_pyramid[ + boudary_targets_pyramid <= self.boundary_threshold] = 0 + + seg_labels = boudary_targets_pyramid.long() + batch_sample_list = [] + for label in seg_labels: + seg_data_sample = SegDataSample() + seg_data_sample.gt_sem_seg = PixelData(data=label) + batch_sample_list.append(seg_data_sample) + + loss = super().loss_by_feat(seg_logits, batch_sample_list) + return loss diff --git a/mmseg/models/decode_heads/uper_head.py b/mmseg/models/decode_heads/uper_head.py index bb617f6b13..b1ccc3173c 100644 --- a/mmseg/models/decode_heads/uper_head.py +++ b/mmseg/models/decode_heads/uper_head.py @@ -1,14 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule -from mmseg.ops import resize -from ..builder import HEADS +from mmseg.registry import MODELS +from ..utils import resize from .decode_head import BaseDecodeHead from .psp_head import PPM -@HEADS.register_module() +@MODELS.register_module() class UPerHead(BaseDecodeHead): """Unified Perceptual Parsing for Scene Understanding. @@ -21,8 +22,7 @@ class UPerHead(BaseDecodeHead): """ def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(UPerHead, self).__init__( - input_transform='multiple_select', **kwargs) + super().__init__(input_transform='multiple_select', **kwargs) # PSP Module self.psp_modules = PPM( pool_scales, @@ -83,9 +83,17 @@ def psp_forward(self, inputs): return output - def forward(self, inputs): - """Forward function.""" + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ inputs = self._transform_inputs(inputs) # build laterals @@ -100,7 +108,7 @@ def forward(self, inputs): used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += resize( + laterals[i - 1] = laterals[i - 1] + resize( laterals[i], size=prev_shape, mode='bilinear', @@ -121,6 +129,11 @@ def forward(self, inputs): mode='bilinear', align_corners=self.align_corners) fpn_outs = torch.cat(fpn_outs, dim=1) - output = self.fpn_bottleneck(fpn_outs) + feats = self.fpn_bottleneck(fpn_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) output = self.cls_seg(output) return output diff --git a/mmseg/models/losses/__init__.py b/mmseg/models/losses/__init__.py index 225bdde393..d7e019747d 100644 --- a/mmseg/models/losses/__init__.py +++ b/mmseg/models/losses/__init__.py @@ -1,10 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. from .accuracy import Accuracy, accuracy from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, cross_entropy, mask_cross_entropy) +from .dice_loss import DiceLoss +from .focal_loss import FocalLoss +from .lovasz_loss import LovaszLoss +from .tversky_loss import TverskyLoss from .utils import reduce_loss, weight_reduce_loss, weighted_loss __all__ = [ 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', - 'weight_reduce_loss', 'weighted_loss' + 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss', + 'FocalLoss', 'TverskyLoss' ] diff --git a/mmseg/models/losses/accuracy.py b/mmseg/models/losses/accuracy.py index e45f9ec485..1d9e2d7701 100644 --- a/mmseg/models/losses/accuracy.py +++ b/mmseg/models/losses/accuracy.py @@ -1,12 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch import torch.nn as nn -def accuracy(pred, target, topk=1, thresh=None): +def accuracy(pred, target, topk=1, thresh=None, ignore_index=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class, ...) target (torch.Tensor): The target of each prediction, shape (N, , ...) + ignore_index (int | None): The label index to be ignored. Default: None topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. @@ -42,17 +45,26 @@ def accuracy(pred, target, topk=1, thresh=None): if thresh is not None: # Only prediction values larger than thresh are counted as correct correct = correct & (pred_value > thresh).t() + if ignore_index is not None: + correct = correct[:, target != ignore_index] res = [] + eps = torch.finfo(torch.float32).eps for k in topk: - correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / target.numel())) + # Avoid causing ZeroDivisionError when all pixels + # of an image are ignored + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + eps + if ignore_index is not None: + total_num = target[target != ignore_index].numel() + eps + else: + total_num = target.numel() + eps + res.append(correct_k.mul_(100.0 / total_num)) return res[0] if return_single else res class Accuracy(nn.Module): """Accuracy calculation module.""" - def __init__(self, topk=(1, ), thresh=None): + def __init__(self, topk=(1, ), thresh=None, ignore_index=None): """Module to calculate the accuracy. Args: @@ -64,6 +76,7 @@ def __init__(self, topk=(1, ), thresh=None): super().__init__() self.topk = topk self.thresh = thresh + self.ignore_index = ignore_index def forward(self, pred, target): """Forward function to calculate accuracy. @@ -75,4 +88,5 @@ def forward(self, pred, target): Returns: tuple[float]: The accuracies under different topk criterions. """ - return accuracy(pred, target, self.topk, self.thresh) + return accuracy(pred, target, self.topk, self.thresh, + self.ignore_index) diff --git a/mmseg/models/losses/cross_entropy_loss.py b/mmseg/models/losses/cross_entropy_loss.py index dcd9f1c894..770b997486 100644 --- a/mmseg/models/losses/cross_entropy_loss.py +++ b/mmseg/models/losses/cross_entropy_loss.py @@ -1,9 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + import torch import torch.nn as nn import torch.nn.functional as F -from ..builder import LOSSES -from .utils import weight_reduce_loss +from mmseg.registry import MODELS +from .utils import get_class_weight, weight_reduce_loss def cross_entropy(pred, @@ -12,8 +15,31 @@ def cross_entropy(pred, class_weight=None, reduction='mean', avg_factor=None, - ignore_index=-100): - """The wrapper function for :func:`F.cross_entropy`""" + ignore_index=-100, + avg_non_ignore=False): + """cross_entropy. The wrapper function for :func:`F.cross_entropy` + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + Default: None. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. + Options are 'none', 'mean' and 'sum'. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Default: None. + ignore_index (int): Specifies a target value that is ignored and + does not contribute to the input gradients. When + ``avg_non_ignore `` is ``True``, and the ``reduction`` is + ``''mean''``, the loss is averaged over non-ignored targets. + Defaults: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + """ + # class_weight is a manual rescaling weight given to each class. # If given, has to be a Tensor of size C element-wise losses loss = F.cross_entropy( @@ -24,6 +50,11 @@ def cross_entropy(pred, ignore_index=ignore_index) # apply weights and do the reduction + # average loss over non-ignored elements + # pytorch's official cross_entropy average loss over non-ignored elements + # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = label.numel() - (label == ignore_index).sum().item() if weight is not None: weight = weight.float() loss = weight_reduce_loss( @@ -32,18 +63,27 @@ def cross_entropy(pred, return loss -def _expand_onehot_labels(labels, label_weights, label_channels): +def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): """Expand onehot labels to match the size of prediction.""" - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze() - if inds.numel() > 0: - bin_labels[inds, labels[inds] - 1] = 1 + bin_labels = labels.new_zeros(target_shape) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero(valid_mask, as_tuple=True) + + if inds[0].numel() > 0: + if labels.dim() == 3: + bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 + else: + bin_labels[inds[0], labels[valid_mask]] = 1 + + valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() + if label_weights is None: - bin_label_weights = None + bin_label_weights = valid_mask else: - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) - return bin_labels, bin_label_weights + bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) + bin_label_weights = bin_label_weights * valid_mask + + return bin_labels, bin_label_weights, valid_mask def binary_cross_entropy(pred, @@ -51,30 +91,62 @@ def binary_cross_entropy(pred, weight=None, reduction='mean', avg_factor=None, - class_weight=None): + class_weight=None, + ignore_index=-100, + avg_non_ignore=False, + **kwargs): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. + Note: In bce loss, label < 0 is invalid. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. + ignore_index (int): The label index to be ignored. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` Returns: torch.Tensor: The calculated loss """ + if pred.size(1) == 1: + # For binary class segmentation, the shape of pred is + # [N, 1, H, W] and that of label is [N, H, W]. + # As the ignore_index often set as 255, so the + # binary class label check should mask out + # ignore_index + assert label[label != ignore_index].max() <= 1, \ + 'For pred with shape [N, 1, H, W], its label must have at ' \ + 'most 2 classes' + pred = pred.squeeze() if pred.dim() != label.dim(): - label, weight = _expand_onehot_labels(label, weight, pred.size(-1)) + assert (pred.dim() == 2 and label.dim() == 1) or ( + pred.dim() == 4 and label.dim() == 3), \ + 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ + 'H, W], label shape [N, H, W] are supported' + # `weight` returned from `_expand_onehot_labels` + # has been treated for valid (non-ignore) pixels + label, weight, valid_mask = _expand_onehot_labels( + label, weight, pred.shape, ignore_index) + else: + # should mask out the ignored elements + valid_mask = ((label >= 0) & (label != ignore_index)).float() + if weight is not None: + weight = weight * valid_mask + else: + weight = valid_mask + # average loss over non-ignored and valid elements + if reduction == 'mean' and avg_factor is None and avg_non_ignore: + avg_factor = valid_mask.sum().item() - # weighted element-wise losses - if weight is not None: - weight = weight.float() loss = F.binary_cross_entropy_with_logits( - pred, label.float(), weight=class_weight, reduction='none') + pred, label.float(), pos_weight=class_weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss( loss, weight, reduction=reduction, avg_factor=avg_factor) @@ -87,7 +159,9 @@ def mask_cross_entropy(pred, label, reduction='mean', avg_factor=None, - class_weight=None): + class_weight=None, + ignore_index=None, + **kwargs): """Calculate the CrossEntropy loss for masks. Args: @@ -103,10 +177,13 @@ def mask_cross_entropy(pred, avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. Returns: torch.Tensor: The calculated loss """ + assert ignore_index is None, 'BCE loss does not support ignore_index' # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] @@ -116,7 +193,7 @@ def mask_cross_entropy(pred, pred_slice, target, weight=class_weight, reduction='mean')[None] -@LOSSES.register_module() +@MODELS.register_module() class CrossEntropyLoss(nn.Module): """CrossEntropyLoss. @@ -127,9 +204,15 @@ class CrossEntropyLoss(nn.Module): Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". - class_weight (list[float], optional): Weight of each class. - Defaults to None. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` """ def __init__(self, @@ -137,14 +220,23 @@ def __init__(self, use_mask=False, reduction='mean', class_weight=None, - loss_weight=1.0): - super(CrossEntropyLoss, self).__init__() + loss_weight=1.0, + loss_name='loss_ce', + avg_non_ignore=False): + super().__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight - self.class_weight = class_weight + self.class_weight = get_class_weight(class_weight) + self.avg_non_ignore = avg_non_ignore + if not self.avg_non_ignore and self.reduction == 'mean': + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') if self.use_sigmoid: self.cls_criterion = binary_cross_entropy @@ -152,6 +244,12 @@ def __init__(self, self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy + self._loss_name = loss_name + + def extra_repr(self): + """Extra repr.""" + s = f'avg_non_ignore={self.avg_non_ignore}' + return s def forward(self, cls_score, @@ -159,6 +257,7 @@ def forward(self, weight=None, avg_factor=None, reduction_override=None, + ignore_index=-100, **kwargs): """Forward function.""" assert reduction_override in (None, 'none', 'mean', 'sum') @@ -168,6 +267,7 @@ def forward(self, class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None + # Note: for BCE loss, label < 0 is invalid. loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, @@ -175,5 +275,22 @@ def forward(self, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, + avg_non_ignore=self.avg_non_ignore, + ignore_index=ignore_index, **kwargs) return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/mmseg/models/losses/dice_loss.py b/mmseg/models/losses/dice_loss.py new file mode 100644 index 0000000000..2ee89a81f4 --- /dev/null +++ b/mmseg/models/losses/dice_loss.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/ +segmentron/solver/loss.py (Apache-2.0 License)""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmseg.registry import MODELS +from .utils import get_class_weight, weighted_loss + + +@weighted_loss +def dice_loss(pred, + target, + valid_mask, + smooth=1, + exponent=2, + class_weight=None, + ignore_index=255): + assert pred.shape[0] == target.shape[0] + total_loss = 0 + num_classes = pred.shape[1] + for i in range(num_classes): + if i != ignore_index: + dice_loss = binary_dice_loss( + pred[:, i], + target[..., i], + valid_mask=valid_mask, + smooth=smooth, + exponent=exponent) + if class_weight is not None: + dice_loss *= class_weight[i] + total_loss += dice_loss + return total_loss / num_classes + + +@weighted_loss +def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): + assert pred.shape[0] == target.shape[0] + pred = pred.reshape(pred.shape[0], -1) + target = target.reshape(target.shape[0], -1) + valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) + + num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth + den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth + + return 1 - num / den + + +@MODELS.register_module() +class DiceLoss(nn.Module): + """DiceLoss. + + This loss is proposed in `V-Net: Fully Convolutional Neural Networks for + Volumetric Medical Image Segmentation `_. + + Args: + smooth (float): A float number to smooth loss, and avoid NaN error. + Default: 1 + exponent (float): An float number to calculate denominator + value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Default to 1.0. + ignore_index (int | None): The label index to be ignored. Default: 255. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_dice'. + """ + + def __init__(self, + smooth=1, + exponent=2, + reduction='mean', + class_weight=None, + loss_weight=1.0, + ignore_index=255, + loss_name='loss_dice', + **kwards): + super().__init__() + self.smooth = smooth + self.exponent = exponent + self.reduction = reduction + self.class_weight = get_class_weight(class_weight) + self.loss_weight = loss_weight + self.ignore_index = ignore_index + self._loss_name = loss_name + + def forward(self, + pred, + target, + avg_factor=None, + reduction_override=None, + **kwards): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = pred.new_tensor(self.class_weight) + else: + class_weight = None + + pred = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + one_hot_target = F.one_hot( + torch.clamp(target.long(), 0, num_classes - 1), + num_classes=num_classes) + valid_mask = (target != self.ignore_index).long() + + loss = self.loss_weight * dice_loss( + pred, + one_hot_target, + valid_mask=valid_mask, + reduction=reduction, + avg_factor=avg_factor, + smooth=self.smooth, + exponent=self.exponent, + class_weight=class_weight, + ignore_index=self.ignore_index) + return loss + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/mmseg/models/losses/focal_loss.py b/mmseg/models/losses/focal_loss.py new file mode 100644 index 0000000000..104d6602c8 --- /dev/null +++ b/mmseg/models/losses/focal_loss.py @@ -0,0 +1,327 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/open-mmlab/mmdetection +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss + +from mmseg.registry import MODELS +from .utils import weight_reduce_loss + + +# This method is used when cuda is not available +def py_sigmoid_focal_loss(pred, + target, + one_hot_target=None, + weight=None, + gamma=2.0, + alpha=0.5, + class_weight=None, + valid_mask=None, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning label of the prediction with + shape (N, C) + one_hot_target (None): Placeholder. It should be None. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal Loss. + Defaults to 0.5. + class_weight (list[float], optional): Weight of each class. + Defaults to None. + valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid + samples and uses 0 to mark the ignored samples. Default: None. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + if isinstance(alpha, list): + alpha = pred.new_tensor(alpha) + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + one_minus_pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * one_minus_pt.pow(gamma) + + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + final_weight = torch.ones(1, pred.size(1)).type_as(loss) + if weight is not None: + if weight.shape != loss.shape and weight.size(0) == loss.size(0): + # For most cases, weight is of shape (N, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + assert weight.dim() == loss.dim() + final_weight = final_weight * weight + if class_weight is not None: + final_weight = final_weight * pred.new_tensor(class_weight) + if valid_mask is not None: + final_weight = final_weight * valid_mask + loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + one_hot_target, + weight=None, + gamma=2.0, + alpha=0.5, + class_weight=None, + valid_mask=None, + reduction='mean', + avg_factor=None): + r"""A wrapper of cuda version `Focal Loss + `_. + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. It's shape + should be (N, ) + one_hot_target (torch.Tensor): The learning label with shape (N, C) + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal Loss. + Defaults to 0.5. + class_weight (list[float], optional): Weight of each class. + Defaults to None. + valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid + samples and uses 0 to mark the ignored samples. Default: None. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + final_weight = torch.ones(1, pred.size(1)).type_as(pred) + if isinstance(alpha, list): + # _sigmoid_focal_loss doesn't accept alpha of list type. Therefore, if + # a list is given, we set the input alpha as 0.5. This means setting + # equal weight for foreground class and background class. By + # multiplying the loss by 2, the effect of setting alpha as 0.5 is + # undone. The alpha of type list is used to regulate the loss in the + # post-processing process. + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), + gamma, 0.5, None, 'none') * 2 + alpha = pred.new_tensor(alpha) + final_weight = final_weight * ( + alpha * one_hot_target + (1 - alpha) * (1 - one_hot_target)) + else: + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), + gamma, alpha, None, 'none') + if weight is not None: + if weight.shape != loss.shape and weight.size(0) == loss.size(0): + # For most cases, weight is of shape (N, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + assert weight.dim() == loss.dim() + final_weight = final_weight * weight + if class_weight is not None: + final_weight = final_weight * pred.new_tensor(class_weight) + if valid_mask is not None: + final_weight = final_weight * valid_mask + loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor) + return loss + + +@MODELS.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.5, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_focal'): + """`Focal Loss `_ + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal + Loss. Defaults to 0.5. When a list is provided, the length + of the list should be equal to the number of classes. + Please be careful that this parameter is not the + class-wise weight but the weight of a binary classification + problem. This binary classification problem regards the + pixels which belong to one class as the foreground + and the other pixels as the background, each element in + the list is the weight of the corresponding foreground class. + The value of alpha or each element of alpha should be a float + in the interval [0, 1]. If you want to specify the class-wise + weight, please use `class_weight` parameter. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + class_weight (list[float], optional): Weight of each class. + Defaults to None. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this + loss item to be included into the backward graph, `loss_` must + be the prefix of the name. Defaults to 'loss_focal'. + """ + super().__init__() + assert use_sigmoid is True, \ + 'AssertionError: Only sigmoid focal loss supported now.' + assert reduction in ('none', 'mean', 'sum'), \ + "AssertionError: reduction should be 'none', 'mean' or " \ + "'sum'" + assert isinstance(alpha, (float, list)), \ + 'AssertionError: alpha should be of type float' + assert isinstance(gamma, float), \ + 'AssertionError: gamma should be of type float' + assert isinstance(loss_weight, float), \ + 'AssertionError: loss_weight should be of type float' + assert isinstance(loss_name, str), \ + 'AssertionError: loss_name should be of type str' + assert isinstance(class_weight, list) or class_weight is None, \ + 'AssertionError: class_weight must be None or of type list' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.class_weight = class_weight + self.loss_weight = loss_weight + self._loss_name = loss_name + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=255, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction with shape + (N, C) where C = number of classes, or + (N, C, d_1, d_2, ..., d_K) with K≥1 in the + case of K-dimensional loss. + target (torch.Tensor): The ground truth. If containing class + indices, shape (N) where each value is 0≤targets[i]≤C−1, + or (N, d_1, d_2, ..., d_K) with K≥1 in the case of + K-dimensional loss. If containing class probabilities, + same shape as the input. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): The reduction method used + to override the original reduction method of the loss. + Options are "none", "mean" and "sum". + ignore_index (int, optional): The label index to be ignored. + Default: 255 + Returns: + torch.Tensor: The calculated loss + """ + assert isinstance(ignore_index, int), \ + 'ignore_index must be of type int' + assert reduction_override in (None, 'none', 'mean', 'sum'), \ + "AssertionError: reduction should be 'none', 'mean' or " \ + "'sum'" + assert pred.shape == target.shape or \ + (pred.size(0) == target.size(0) and + pred.shape[2:] == target.shape[1:]), \ + "The shape of pred doesn't match the shape of target" + + original_shape = pred.shape + + # [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k] + pred = pred.transpose(0, 1) + # [C, B, d_1, d_2, ..., d_k] -> [C, N] + pred = pred.reshape(pred.size(0), -1) + # [C, N] -> [N, C] + pred = pred.transpose(0, 1).contiguous() + + if original_shape == target.shape: + # target with shape [B, C, d_1, d_2, ...] + # transform it's shape into [N, C] + # [B, C, d_1, d_2, ...] -> [C, B, d_1, d_2, ..., d_k] + target = target.transpose(0, 1) + # [C, B, d_1, d_2, ..., d_k] -> [C, N] + target = target.reshape(target.size(0), -1) + # [C, N] -> [N, C] + target = target.transpose(0, 1).contiguous() + else: + # target with shape [B, d_1, d_2, ...] + # transform it's shape into [N, ] + target = target.view(-1).contiguous() + valid_mask = (target != ignore_index).view(-1, 1) + # avoid raising error when using F.one_hot() + target = torch.where(target == ignore_index, target.new_tensor(0), + target) + + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + num_classes = pred.size(1) + if torch.cuda.is_available() and pred.is_cuda: + if target.dim() == 1: + one_hot_target = F.one_hot(target, num_classes=num_classes) + else: + one_hot_target = target + target = target.argmax(dim=1) + valid_mask = (target != ignore_index).view(-1, 1) + calculate_loss_func = sigmoid_focal_loss + else: + one_hot_target = None + if target.dim() == 1: + target = F.one_hot(target, num_classes=num_classes) + else: + valid_mask = (target.argmax(dim=1) != ignore_index).view( + -1, 1) + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + one_hot_target, + weight, + gamma=self.gamma, + alpha=self.alpha, + class_weight=self.class_weight, + valid_mask=valid_mask, + reduction=reduction, + avg_factor=avg_factor) + + if reduction == 'none': + # [N, C] -> [C, N] + loss_cls = loss_cls.transpose(0, 1) + # [C, N] -> [C, B, d1, d2, ...] + # original_shape: [B, C, d1, d2, ...] + loss_cls = loss_cls.reshape(original_shape[1], + original_shape[0], + *original_shape[2:]) + # [C, B, d1, d2, ...] -> [B, C, d1, d2, ...] + loss_cls = loss_cls.transpose(0, 1).contiguous() + else: + raise NotImplementedError + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/mmseg/models/losses/lovasz_loss.py b/mmseg/models/losses/lovasz_loss.py new file mode 100644 index 0000000000..b47f9d8a15 --- /dev/null +++ b/mmseg/models/losses/lovasz_loss.py @@ -0,0 +1,323 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor +ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim +Berman 2018 ESAT-PSI KU Leuven (MIT License)""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.utils import is_list_of + +from mmseg.registry import MODELS +from .utils import get_class_weight, weight_reduce_loss + + +def lovasz_grad(gt_sorted): + """Computes gradient of the Lovasz extension w.r.t sorted errors. + + See Alg. 1 in paper. + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def flatten_binary_logits(logits, labels, ignore_index=None): + """Flattens predictions in the batch (binary case) Remove labels equal to + 'ignore_index'.""" + logits = logits.view(-1) + labels = labels.view(-1) + if ignore_index is None: + return logits, labels + valid = (labels != ignore_index) + vlogits = logits[valid] + vlabels = labels[valid] + return vlogits, vlabels + + +def flatten_probs(probs, labels, ignore_index=None): + """Flattens predictions in the batch.""" + if probs.dim() == 3: + # assumes output of a sigmoid layer + B, H, W = probs.size() + probs = probs.view(B, 1, H, W) + B, C, H, W = probs.size() + probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C + labels = labels.view(-1) + if ignore_index is None: + return probs, labels + valid = (labels != ignore_index) + vprobs = probs[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobs, vlabels + + +def lovasz_hinge_flat(logits, labels): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [P], logits at each prediction + (between -infty and +infty). + labels (torch.Tensor): [P], binary ground truth labels (0 or 1). + + Returns: + torch.Tensor: The calculated loss. + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * signs) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), grad) + return loss + + +def lovasz_hinge(logits, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [B, H, W], logits at each pixel + (between -infty and +infty). + labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). + classes (str | list[int], optional): Placeholder, to be consistent with + other loss. Default: None. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): Placeholder, to be consistent + with other loss. Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + if per_image: + loss = [ + lovasz_hinge_flat(*flatten_binary_logits( + logit.unsqueeze(0), label.unsqueeze(0), ignore_index)) + for logit, label in zip(logits, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_hinge_flat( + *flatten_binary_logits(logits, labels, ignore_index)) + return loss + + +def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [P, C], class probabilities at each prediction + (between 0 and 1). + labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + class_weight (list[float], optional): The weight for each class. + Default: None. + + Returns: + torch.Tensor: The calculated loss. + """ + if probs.numel() == 0: + # only void pixels, the gradients should be 0 + return probs * 0. + C = probs.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes == 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probs[:, 0] + else: + class_pred = probs[:, c] + errors = (fg - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted)) + if class_weight is not None: + loss *= class_weight[c] + losses.append(loss) + return torch.stack(losses).mean() + + +def lovasz_softmax(probs, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [B, C, H, W], class probabilities at each + prediction (between 0 and 1). + labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and + C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + + if per_image: + loss = [ + lovasz_softmax_flat( + *flatten_probs( + prob.unsqueeze(0), label.unsqueeze(0), ignore_index), + classes=classes, + class_weight=class_weight) + for prob, label in zip(probs, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_softmax_flat( + *flatten_probs(probs, labels, ignore_index), + classes=classes, + class_weight=class_weight) + return loss + + +@MODELS.register_module() +class LovaszLoss(nn.Module): + """LovaszLoss. + + This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate + for the optimization of the intersection-over-union measure in neural + networks `_. + + Args: + loss_type (str, optional): Binary or multi-class loss. + Default: 'multi_class'. Options are "binary" and "multi_class". + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_lovasz'. + """ + + def __init__(self, + loss_type='multi_class', + classes='present', + per_image=False, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_lovasz'): + super().__init__() + assert loss_type in ('binary', 'multi_class'), "loss_type should be \ + 'binary' or 'multi_class'." + + if loss_type == 'binary': + self.cls_criterion = lovasz_hinge + else: + self.cls_criterion = lovasz_softmax + assert classes in ('all', 'present') or is_list_of(classes, int) + if not per_image: + assert reduction == 'none', "reduction should be 'none' when \ + per_image is False." + + self.classes = classes + self.per_image = per_image + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + self._loss_name = loss_name + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # if multi-class loss, transform logits to probs + if self.cls_criterion == lovasz_softmax: + cls_score = F.softmax(cls_score, dim=1) + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + self.classes, + self.per_image, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/mmseg/models/losses/tversky_loss.py b/mmseg/models/losses/tversky_loss.py new file mode 100644 index 0000000000..bfca1af666 --- /dev/null +++ b/mmseg/models/losses/tversky_loss.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from +https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py#L333 +(Apache-2.0 License)""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weighted_loss + + +@weighted_loss +def tversky_loss(pred, + target, + valid_mask, + alpha=0.3, + beta=0.7, + smooth=1, + class_weight=None, + ignore_index=255): + assert pred.shape[0] == target.shape[0] + total_loss = 0 + num_classes = pred.shape[1] + for i in range(num_classes): + if i != ignore_index: + tversky_loss = binary_tversky_loss( + pred[:, i], + target[..., i], + valid_mask=valid_mask, + alpha=alpha, + beta=beta, + smooth=smooth) + if class_weight is not None: + tversky_loss *= class_weight[i] + total_loss += tversky_loss + return total_loss / num_classes + + +@weighted_loss +def binary_tversky_loss(pred, + target, + valid_mask, + alpha=0.3, + beta=0.7, + smooth=1): + assert pred.shape[0] == target.shape[0] + pred = pred.reshape(pred.shape[0], -1) + target = target.reshape(target.shape[0], -1) + valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) + + TP = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) + FP = torch.sum(torch.mul(pred, 1 - target) * valid_mask, dim=1) + FN = torch.sum(torch.mul(1 - pred, target) * valid_mask, dim=1) + tversky = (TP + smooth) / (TP + alpha * FP + beta * FN + smooth) + + return 1 - tversky + + +@LOSSES.register_module() +class TverskyLoss(nn.Module): + """TverskyLoss. This loss is proposed in `Tversky loss function for image + segmentation using 3D fully convolutional deep networks. + + `_. + Args: + smooth (float): A float number to smooth loss, and avoid NaN error. + Default: 1. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Default to 1.0. + ignore_index (int | None): The label index to be ignored. Default: 255. + alpha(float, in [0, 1]): + The coefficient of false positives. Default: 0.3. + beta (float, in [0, 1]): + The coefficient of false negatives. Default: 0.7. + Note: alpha + beta = 1. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_tversky'. + """ + + def __init__(self, + smooth=1, + class_weight=None, + loss_weight=1.0, + ignore_index=255, + alpha=0.3, + beta=0.7, + loss_name='loss_tversky'): + super().__init__() + self.smooth = smooth + self.class_weight = get_class_weight(class_weight) + self.loss_weight = loss_weight + self.ignore_index = ignore_index + assert (alpha + beta == 1.0), 'Sum of alpha and beta but be 1.0!' + self.alpha = alpha + self.beta = beta + self._loss_name = loss_name + + def forward(self, pred, target, **kwargs): + if self.class_weight is not None: + class_weight = pred.new_tensor(self.class_weight) + else: + class_weight = None + + pred = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + one_hot_target = F.one_hot( + torch.clamp(target.long(), 0, num_classes - 1), + num_classes=num_classes) + valid_mask = (target != self.ignore_index).long() + + loss = self.loss_weight * tversky_loss( + pred, + one_hot_target, + valid_mask=valid_mask, + alpha=self.alpha, + beta=self.beta, + smooth=self.smooth, + class_weight=class_weight, + ignore_index=self.ignore_index) + return loss + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/mmseg/models/losses/utils.py b/mmseg/models/losses/utils.py index a1153fa9f3..f74efcf35c 100644 --- a/mmseg/models/losses/utils.py +++ b/mmseg/models/losses/utils.py @@ -1,6 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. import functools +import numpy as np +import torch import torch.nn.functional as F +from mmengine.fileio import load + + +def get_class_weight(class_weight): + """Get class weight for loss function. + + Args: + class_weight (list[float] | str | None): If class_weight is a str, + take it as a file name and read from it. + """ + if isinstance(class_weight, str): + # take it as a file path + if class_weight.endswith('.npy'): + class_weight = np.load(class_weight) + else: + # pkl, json or yaml + class_weight = load(class_weight) + + return class_weight def reduce_loss(loss, reduction): @@ -30,7 +52,7 @@ def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. - avg_factor (float): Avarage factor when computing the mean of losses. + avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. @@ -48,7 +70,10 @@ def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': - loss = loss.sum() / avg_factor + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') diff --git a/mmseg/models/necks/__init__.py b/mmseg/models/necks/__init__.py new file mode 100644 index 0000000000..ff03186a92 --- /dev/null +++ b/mmseg/models/necks/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .featurepyramid import Feature2Pyramid +from .fpn import FPN +from .ic_neck import ICNeck +from .jpu import JPU +from .mla_neck import MLANeck +from .multilevel_neck import MultiLevelNeck + +__all__ = [ + 'FPN', 'MultiLevelNeck', 'MLANeck', 'ICNeck', 'JPU', 'Feature2Pyramid' +] diff --git a/mmseg/models/necks/featurepyramid.py b/mmseg/models/necks/featurepyramid.py new file mode 100644 index 0000000000..dc1250d39d --- /dev/null +++ b/mmseg/models/necks/featurepyramid.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_norm_layer + +from mmseg.registry import MODELS + + +@MODELS.register_module() +class Feature2Pyramid(nn.Module): + """Feature2Pyramid. + + A neck structure connect ViT backbone and decoder_heads. + + Args: + embed_dims (int): Embedding dimension. + rescales (list[float]): Different sampling multiples were + used to obtain pyramid features. Default: [4, 2, 1, 0.5]. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='SyncBN', requires_grad=True). + """ + + def __init__(self, + embed_dim, + rescales=[4, 2, 1, 0.5], + norm_cfg=dict(type='SyncBN', requires_grad=True)): + super().__init__() + self.rescales = rescales + self.upsample_4x = None + for k in self.rescales: + if k == 4: + self.upsample_4x = nn.Sequential( + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2), + build_norm_layer(norm_cfg, embed_dim)[1], + nn.GELU(), + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2), + ) + elif k == 2: + self.upsample_2x = nn.Sequential( + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2)) + elif k == 1: + self.identity = nn.Identity() + elif k == 0.5: + self.downsample_2x = nn.MaxPool2d(kernel_size=2, stride=2) + elif k == 0.25: + self.downsample_4x = nn.MaxPool2d(kernel_size=4, stride=4) + else: + raise KeyError(f'invalid {k} for feature2pyramid') + + def forward(self, inputs): + assert len(inputs) == len(self.rescales) + outputs = [] + if self.upsample_4x is not None: + ops = [ + self.upsample_4x, self.upsample_2x, self.identity, + self.downsample_2x + ] + else: + ops = [ + self.upsample_2x, self.identity, self.downsample_2x, + self.downsample_4x + ] + for i in range(len(inputs)): + outputs.append(ops[i](inputs[i])) + return tuple(outputs) diff --git a/mmseg/models/necks/fpn.py b/mmseg/models/necks/fpn.py new file mode 100644 index 0000000000..ddab74c00a --- /dev/null +++ b/mmseg/models/necks/fpn.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +@MODELS.register_module() +class FPN(BaseModule): + """Feature Pyramid Network. + + This neck is the implementation of `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (list[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs + on the original feature from the backbone. If True, + it is equivalent to `add_extra_convs='on_input'`. If False, it is + equivalent to set `add_extra_convs='on_output'`. Default to True. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: dict(mode='nearest'). + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super().__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + if extra_convs_on_inputs: + # For compatibility with previous release + # TODO: deprecate `extra_convs_on_inputs` + self.add_extra_convs = 'on_input' + else: + self.add_extra_convs = 'on_output' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] = laterals[i - 1] + resize( + laterals[i], **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + resize( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/mmseg/models/necks/ic_neck.py b/mmseg/models/necks/ic_neck.py new file mode 100644 index 0000000000..9763541e09 --- /dev/null +++ b/mmseg/models/necks/ic_neck.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +class CascadeFeatureFusion(BaseModule): + """Cascade Feature Fusion Unit in ICNet. + + Args: + low_channels (int): The number of input channels for + low resolution feature map. + high_channels (int): The number of input channels for + high resolution feature map. + out_channels (int): The number of output channels. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Returns: + x (Tensor): The output tensor of shape (N, out_channels, H, W). + x_low (Tensor): The output tensor of shape (N, out_channels, H, W) + for Cascade Label Guidance in auxiliary heads. + """ + + def __init__(self, + low_channels, + high_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.align_corners = align_corners + self.conv_low = ConvModule( + low_channels, + out_channels, + 3, + padding=2, + dilation=2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv_high = ConvModule( + high_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x_low, x_high): + x_low = resize( + x_low, + size=x_high.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + # Note: Different from original paper, `x_low` is underwent + # `self.conv_low` rather than another 1x1 conv classifier + # before being used for auxiliary head. + x_low = self.conv_low(x_low) + x_high = self.conv_high(x_high) + x = x_low + x_high + x = F.relu(x, inplace=True) + return x, x_low + + +@MODELS.register_module() +class ICNeck(BaseModule): + """ICNet for Real-Time Semantic Segmentation on High-Resolution Images. + + This head is the implementation of `ICHead + `_. + + Args: + in_channels (int): The number of input image channels. Default: 3. + out_channels (int): The numbers of output feature channels. + Default: 128. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=(64, 256, 256), + out_channels=128, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert len(in_channels) == 3, 'Length of input channels \ + must be 3!' + + self.in_channels = in_channels + self.out_channels = out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.cff_24 = CascadeFeatureFusion( + self.in_channels[2], + self.in_channels[1], + self.out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + + self.cff_12 = CascadeFeatureFusion( + self.out_channels, + self.in_channels[0], + self.out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + + def forward(self, inputs): + assert len(inputs) == 3, 'Length of input feature \ + maps must be 3!' + + x_sub1, x_sub2, x_sub4 = inputs + x_cff_24, x_24 = self.cff_24(x_sub4, x_sub2) + x_cff_12, x_12 = self.cff_12(x_cff_24, x_sub1) + # Note: `x_cff_12` is used for decode_head, + # `x_24` and `x_12` are used for auxiliary head. + return x_24, x_12, x_cff_12 diff --git a/mmseg/models/necks/jpu.py b/mmseg/models/necks/jpu.py new file mode 100644 index 0000000000..3ea0fe2183 --- /dev/null +++ b/mmseg/models/necks/jpu.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.model import BaseModule + +from mmseg.registry import MODELS +from ..utils import resize + + +@MODELS.register_module() +class JPU(BaseModule): + """FastFCN: Rethinking Dilated Convolution in the Backbone + for Semantic Segmentation. + + This Joint Pyramid Upsampling (JPU) neck is the implementation of + `FastFCN `_. + + Args: + in_channels (Tuple[int], optional): The number of input channels + for each convolution operations before upsampling. + Default: (512, 1024, 2048). + mid_channels (int): The number of output channels of JPU. + Default: 512. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + dilations (tuple[int]): Dilation rate of each Depthwise + Separable ConvModule. Default: (1, 2, 4, 8). + align_corners (bool, optional): The align_corners argument of + resize operation. Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=(512, 1024, 2048), + mid_channels=512, + start_level=0, + end_level=-1, + dilations=(1, 2, 4, 8), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + assert isinstance(in_channels, tuple) + assert isinstance(dilations, tuple) + self.in_channels = in_channels + self.mid_channels = mid_channels + self.start_level = start_level + self.num_ins = len(in_channels) + if end_level == -1: + self.backbone_end_level = self.num_ins + else: + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + + self.dilations = dilations + self.align_corners = align_corners + + self.conv_layers = nn.ModuleList() + self.dilation_layers = nn.ModuleList() + for i in range(self.start_level, self.backbone_end_level): + conv_layer = nn.Sequential( + ConvModule( + self.in_channels[i], + self.mid_channels, + kernel_size=3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.conv_layers.append(conv_layer) + for i in range(len(dilations)): + dilation_layer = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=(self.backbone_end_level - self.start_level) * + self.mid_channels, + out_channels=self.mid_channels, + kernel_size=3, + stride=1, + padding=dilations[i], + dilation=dilations[i], + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=norm_cfg, + pw_act_cfg=act_cfg)) + self.dilation_layers.append(dilation_layer) + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels), 'Length of inputs must \ + be the same with self.in_channels!' + + feats = [ + self.conv_layers[i - self.start_level](inputs[i]) + for i in range(self.start_level, self.backbone_end_level) + ] + + h, w = feats[0].shape[2:] + for i in range(1, len(feats)): + feats[i] = resize( + feats[i], + size=(h, w), + mode='bilinear', + align_corners=self.align_corners) + + feat = torch.cat(feats, dim=1) + concat_feat = torch.cat([ + self.dilation_layers[i](feat) for i in range(len(self.dilations)) + ], + dim=1) + + outs = [] + + # Default: outs[2] is the output of JPU for decoder head, outs[1] is + # the feature map from backbone for auxiliary head. Additionally, + # outs[0] can also be used for auxiliary head. + for i in range(self.start_level, self.backbone_end_level - 1): + outs.append(inputs[i]) + outs.append(concat_feat) + return tuple(outs) diff --git a/mmseg/models/necks/mla_neck.py b/mmseg/models/necks/mla_neck.py new file mode 100644 index 0000000000..db250aefbf --- /dev/null +++ b/mmseg/models/necks/mla_neck.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, build_norm_layer + +from mmseg.registry import MODELS + + +class MLAModule(nn.Module): + + def __init__(self, + in_channels=[1024, 1024, 1024, 1024], + out_channels=256, + norm_cfg=None, + act_cfg=None): + super().__init__() + self.channel_proj = nn.ModuleList() + for i in range(len(in_channels)): + self.channel_proj.append( + ConvModule( + in_channels=in_channels[i], + out_channels=out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.feat_extract = nn.ModuleList() + for i in range(len(in_channels)): + self.feat_extract.append( + ConvModule( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + + # feat_list -> [p2, p3, p4, p5] + feat_list = [] + for x, conv in zip(inputs, self.channel_proj): + feat_list.append(conv(x)) + + # feat_list -> [p5, p4, p3, p2] + # mid_list -> [m5, m4, m3, m2] + feat_list = feat_list[::-1] + mid_list = [] + for feat in feat_list: + if len(mid_list) == 0: + mid_list.append(feat) + else: + mid_list.append(mid_list[-1] + feat) + + # mid_list -> [m5, m4, m3, m2] + # out_list -> [o2, o3, o4, o5] + out_list = [] + for mid, conv in zip(mid_list, self.feat_extract): + out_list.append(conv(mid)) + + return tuple(out_list) + + +@MODELS.register_module() +class MLANeck(nn.Module): + """Multi-level Feature Aggregation. + + This neck is `The Multi-level Feature Aggregation construction of + SETR `_. + + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + norm_layer (dict): Config dict for input normalization. + Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True). + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + norm_layer=dict(type='LN', eps=1e-6, requires_grad=True), + norm_cfg=None, + act_cfg=None): + super().__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + + # In order to build general vision transformer backbone, we have to + # move MLA to neck. + self.norm = nn.ModuleList([ + build_norm_layer(norm_layer, in_channels[i])[1] + for i in range(len(in_channels)) + ]) + + self.mla = MLAModule( + in_channels=in_channels, + out_channels=out_channels, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # Convert from nchw to nlc + outs = [] + for i in range(len(inputs)): + x = inputs[i] + n, c, h, w = x.shape + x = x.reshape(n, c, h * w).transpose(2, 1).contiguous() + x = self.norm[i](x) + x = x.transpose(1, 2).reshape(n, c, h, w).contiguous() + outs.append(x) + + outs = self.mla(outs) + return tuple(outs) diff --git a/mmseg/models/necks/multilevel_neck.py b/mmseg/models/necks/multilevel_neck.py new file mode 100644 index 0000000000..c997125f24 --- /dev/null +++ b/mmseg/models/necks/multilevel_neck.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model.weight_init import xavier_init + +from mmseg.registry import MODELS +from ..utils import resize + + +@MODELS.register_module() +class MultiLevelNeck(nn.Module): + """MultiLevelNeck. + + A neck structure connect vit backbone and decoder_heads. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + scales (List[float]): Scale factors for each input feature map. + Default: [0.5, 1, 2, 4] + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + scales=[0.5, 1, 2, 4], + norm_cfg=None, + act_cfg=None): + super().__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.scales = scales + self.num_outs = len(scales) + self.lateral_convs = nn.ModuleList() + self.convs = nn.ModuleList() + for in_channel in in_channels: + self.lateral_convs.append( + ConvModule( + in_channel, + out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + for _ in range(self.num_outs): + self.convs.append( + ConvModule( + out_channels, + out_channels, + kernel_size=3, + padding=1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + inputs = [ + lateral_conv(inputs[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + # for len(inputs) not equal to self.num_outs + if len(inputs) == 1: + inputs = [inputs[0] for _ in range(self.num_outs)] + outs = [] + for i in range(self.num_outs): + x_resize = resize( + inputs[i], scale_factor=self.scales[i], mode='bilinear') + outs.append(self.convs[i](x_resize)) + return tuple(outs) diff --git a/mmseg/models/segmentors/__init__.py b/mmseg/models/segmentors/__init__.py index 3f600ecb9f..387c858bd7 100644 --- a/mmseg/models/segmentors/__init__.py +++ b/mmseg/models/segmentors/__init__.py @@ -1,4 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseSegmentor from .cascade_encoder_decoder import CascadeEncoderDecoder from .encoder_decoder import EncoderDecoder -__all__ = ['EncoderDecoder', 'CascadeEncoderDecoder'] +__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] diff --git a/mmseg/models/segmentors/base.py b/mmseg/models/segmentors/base.py index 4f31127210..1625addf6c 100644 --- a/mmseg/models/segmentors/base.py +++ b/mmseg/models/segmentors/base.py @@ -1,267 +1,195 @@ -import logging -import warnings +# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod -from collections import OrderedDict +from typing import List, Tuple -import mmcv -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn +from mmengine.model import BaseModel +from mmengine.structures import PixelData +from torch import Tensor +from mmseg.structures import SegDataSample +from mmseg.utils import (ForwardResults, OptConfigType, OptMultiConfig, + OptSampleList, SampleList) +from ..utils import resize -class BaseSegmentor(nn.Module): - """Base class for segmentors.""" - __metaclass__ = ABCMeta +class BaseSegmentor(BaseModel, metaclass=ABCMeta): + """Base class for segmentors. - def __init__(self): - super(BaseSegmentor, self).__init__() + Args: + data_preprocessor (dict, optional): Model preprocessing config + for processing the input data. it usually includes + ``to_rgb``, ``pad_size_divisor``, ``pad_val``, + ``mean`` and ``std``. Default to None. + init_cfg (dict, optional): the config to control the + initialization. Default to None. + """ + + def __init__(self, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) @property - def with_neck(self): + def with_neck(self) -> bool: """bool: whether the segmentor has neck""" return hasattr(self, 'neck') and self.neck is not None @property - def with_auxiliary_head(self): + def with_auxiliary_head(self) -> bool: """bool: whether the segmentor has auxiliary head""" return hasattr(self, 'auxiliary_head') and self.auxiliary_head is not None @property - def with_decode_head(self): + def with_decode_head(self) -> bool: """bool: whether the segmentor has decode head""" return hasattr(self, 'decode_head') and self.decode_head is not None @abstractmethod - def extract_feat(self, imgs): + def extract_feat(self, inputs: Tensor) -> bool: """Placeholder for extract features from images.""" pass @abstractmethod - def encode_decode(self, img, img_metas): + def encode_decode(self, inputs: Tensor, batch_data_samples: SampleList): """Placeholder for encode images with backbone and decode into a semantic segmentation map of the same size as input.""" pass - @abstractmethod - def forward_train(self, imgs, img_metas, **kwargs): - """Placeholder for Forward function for training.""" - pass + def forward(self, + inputs: Tensor, + data_samples: OptSampleList = None, + mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. - @abstractmethod - def simple_test(self, img, img_meta, **kwargs): - """Placeholder for single image test.""" - pass + The method should accept three modes: "tensor", "predict" and "loss": - @abstractmethod - def aug_test(self, imgs, img_metas, **kwargs): - """Placeholder for augmentation test.""" - pass - - def init_weights(self, pretrained=None): - """Initialize the weights in segmentor. + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`SegDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if pretrained is not None: - logger = logging.getLogger() - logger.info(f'load model from: {pretrained}') + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. - def forward_test(self, imgs, img_metas, **kwargs): - """ Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got ' - f'{type(var)}') + inputs (torch.Tensor): The input tensor with shape (N, C, ...) in + general. + data_samples (list[:obj:`SegDataSample`]): The seg data samples. + It usually includes information such as `metainfo` and + `gt_sem_seg`. Default to None. + mode (str): Return what kind of value. Defaults to 'tensor'. - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) != ' - f'num of image meta ({len(img_metas)})') - # all images in the same aug batch all of the same ori_shape and pad - # shape - for img_meta in img_metas: - ori_shapes = [_['ori_shape'] for _ in img_meta] - assert all(shape == ori_shapes[0] for shape in ori_shapes) - img_shapes = [_['img_shape'] for _ in img_meta] - assert all(shape == img_shapes[0] for shape in img_shapes) - pad_shapes = [_['pad_shape'] for _ in img_meta] - assert all(shape == pad_shapes[0] for shape in pad_shapes) - - if num_augs == 1: - return self.simple_test(imgs[0], img_metas[0], **kwargs) - else: - return self.aug_test(imgs, img_metas, **kwargs) - - def forward(self, img, img_metas, return_loss=True, **kwargs): - """Calls either :func:`forward_train` or :func:`forward_test` depending - on whether ``return_loss`` is ``True``. + Returns: + The return type depends on ``mode``. - Note this setting will change the expected inputs. When - ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor - and List[dict]), and when ``resturn_loss=False``, img and img_meta - should be double nested (i.e. List[Tensor], List[List[dict]]), with - the outer list indicating test time augmentations. + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`DetDataSample`. + - If ``mode="loss"``, return a dict of tensor. """ - if return_loss: - return self.forward_train(img, img_metas, **kwargs) + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs, data_samples) else: - return self.forward_test(img, img_metas, **kwargs) - - def train_step(self, data_batch, optimizer, **kwargs): - """The iteration step during training. - - This method defines an iteration step during training, except for the - back propagation and optimizer updating, which are done in an optimizer - hook. Note that in some complicated cases or models, the whole process - including back propagation and optimizer updating is also defined in - this method, such as GAN. + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode') - Args: - data (dict): The output of dataloader. - optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of - runner is passed to ``train_step()``. This argument is unused - and reserved. - - Returns: - dict: It should contain at least 3 keys: ``loss``, ``log_vars``, - ``num_samples``. - ``loss`` is a tensor for back propagation, which can be a - weighted sum of multiple losses. - ``log_vars`` contains all the variables to be sent to the - logger. - ``num_samples`` indicates the batch size (when the model is - DDP, it means the batch size on each GPU), which is used for - averaging the logs. - """ - losses = self.forward_train(**data_batch, **kwargs) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, - log_vars=log_vars, - num_samples=len(data_batch['img'].data)) + @abstractmethod + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + pass - return outputs + @abstractmethod + def predict(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + pass - def val_step(self, data_batch, **kwargs): - """The iteration step during validation. + @abstractmethod + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: + """Network forward process. - This method shares the same signature as :func:`train_step`, but used - during val epochs. Note that the evaluation after training epochs is - not implemented with this method, but an evaluation hook. + Usually includes backbone, neck and head forward without any post- + processing. """ - output = self.forward_test(**data_batch, **kwargs) - return output + pass - @staticmethod - def _parse_losses(losses): - """Parse the raw outputs (losses) of the network. + @abstractmethod + def aug_test(self, batch_inputs, batch_img_metas): + """Placeholder for augmentation test.""" + pass + def postprocess_result(self, + seg_logits: Tensor, + data_samples: OptSampleList = None) -> list: + """ Convert results list to `SegDataSample`. Args: - losses (dict): Raw output of the network, which usually contain - losses and other necessary information. - + seg_logits (Tensor): The segmentation results, seg_logits from + model of each input image. + data_samples (list[:obj:`SegDataSample`]): The seg data samples. + It usually includes information such as `metainfo` and + `gt_sem_seg`. Default to None. Returns: - tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor - which may be a weighted sum of all losses, log_vars contains - all the variables to be sent to the logger. - """ - log_vars = OrderedDict() - for loss_name, loss_value in losses.items(): - if isinstance(loss_value, torch.Tensor): - log_vars[loss_name] = loss_value.mean() - elif isinstance(loss_value, list): - log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) - else: - raise TypeError( - f'{loss_name} is not a tensor or list of tensors') - - loss = sum(_value for _key, _value in log_vars.items() - if 'loss' in _key) + list[:obj:`SegDataSample`]: Segmentation results of the + input images. Each SegDataSample usually contain: - log_vars['loss'] = loss - for loss_name, loss_value in log_vars.items(): - # reduce loss when distributed training - if dist.is_available() and dist.is_initialized(): - loss_value = loss_value.data.clone() - dist.all_reduce(loss_value.div_(dist.get_world_size())) - log_vars[loss_name] = loss_value.item() - - return loss, log_vars - - def show_result(self, - img, - result, - palette=None, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (Tensor): The semantic segmentation results to draw over - `img`. - palette (list[list[int]]] | np.ndarray | None): The palette of - segmentation map. If None is given, random palette will be - generated. Default: None - win_name (str): The window name. - wait_time (int): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file` + - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation. + - ``seg_logits``(PixelData): Predicted logits of semantic + segmentation before normalization. """ - img = mmcv.imread(img) - img = img.copy() - seg = result[0] - if palette is None: - if self.PALETTE is None: - palette = np.random.randint( - 0, 255, size=(len(self.CLASSES), 3)) - else: - palette = self.PALETTE - else: - palette = np.array(palette) - assert palette.shape[0] == len(self.CLASSES) - assert palette.shape[1] == 3 - assert len(palette.shape) == 2 - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - # convert to BGR - color_seg = color_seg[..., ::-1] - - img = img * 0.5 + color_seg * 0.5 - img = img.astype(np.uint8) - # if out_file specified, do not show image in window - if out_file is not None: - show = False + batch_size, C, H, W = seg_logits.shape - if show: - mmcv.imshow(img, win_name, wait_time) - if out_file is not None: - mmcv.imwrite(img, out_file) + if data_samples is None: + data_samples = [SegDataSample() for _ in range(batch_size)] + only_prediction = True + else: + only_prediction = False + + for i in range(batch_size): + if not only_prediction: + img_meta = data_samples[i].metainfo + # remove padding area + if 'img_padding_size' not in img_meta: + padding_size = img_meta.get('padding_size', [0] * 4) + else: + padding_size = img_meta['img_padding_size'] + padding_left, padding_right, padding_top, padding_bottom =\ + padding_size + # i_seg_logits shape is 1, C, H, W after remove padding + i_seg_logits = seg_logits[i:i + 1, :, + padding_top:H - padding_bottom, + padding_left:W - padding_right] + + # resize as original shape + i_seg_logits = resize( + i_seg_logits, + size=img_meta['ori_shape'], + mode='bilinear', + align_corners=self.align_corners, + warning=False).squeeze(0) + else: + i_seg_logits = seg_logits[i] - if not (show or out_file): - warnings.warn('show==False and out_file is not specified, only ' - 'result image will be returned') - return img + if C > 1: + i_seg_pred = i_seg_logits.argmax(dim=0, keepdim=True) + else: + i_seg_pred = (i_seg_logits > + self.decode_head.threshold).to(i_seg_logits) + data_samples[i].set_data({ + 'seg_logits': + PixelData(**{'data': i_seg_logits}), + 'pred_sem_seg': + PixelData(**{'data': i_seg_pred}) + }) + + return data_samples diff --git a/mmseg/models/segmentors/cascade_encoder_decoder.py b/mmseg/models/segmentors/cascade_encoder_decoder.py index 220ab2bb36..f76e66f931 100644 --- a/mmseg/models/segmentors/cascade_encoder_decoder.py +++ b/mmseg/models/segmentors/cascade_encoder_decoder.py @@ -1,98 +1,137 @@ -from torch import nn +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional -from mmseg.core import add_prefix -from mmseg.ops import resize -from .. import builder -from ..builder import SEGMENTORS +from torch import Tensor, nn + +from mmseg.registry import MODELS +from mmseg.utils import (ConfigType, OptConfigType, OptMultiConfig, + OptSampleList, SampleList, add_prefix) from .encoder_decoder import EncoderDecoder -@SEGMENTORS.register_module() +@MODELS.register_module() class CascadeEncoderDecoder(EncoderDecoder): """Cascade Encoder Decoder segmentors. CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of CascadeEncoderDecoder are cascaded. The output of previous decoder_head will be the input of next decoder_head. + + Args: + + num_stages (int): How many stages will be cascaded. + backbone (ConfigType): The config for the backnone of segmentor. + decode_head (ConfigType): The config for the decode head of segmentor. + neck (OptConfigType): The config for the neck of segmentor. + Defaults to None. + auxiliary_head (OptConfigType): The config for the auxiliary head of + segmentor. Defaults to None. + train_cfg (OptConfigType): The config for training. Defaults to None. + test_cfg (OptConfigType): The config for testing. Defaults to None. + data_preprocessor (dict, optional): The pre-process config of + :class:`BaseDataPreprocessor`. + pretrained (str, optional): The path for pretrained model. + Defaults to None. + init_cfg (dict, optional): The weight initialized config for + :class:`BaseModule`. """ def __init__(self, - num_stages, - backbone, - decode_head, - neck=None, - auxiliary_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): + num_stages: int, + backbone: ConfigType, + decode_head: ConfigType, + neck: OptConfigType = None, + auxiliary_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + pretrained: Optional[str] = None, + init_cfg: OptMultiConfig = None): self.num_stages = num_stages - super(CascadeEncoderDecoder, self).__init__( + super().__init__( backbone=backbone, decode_head=decode_head, neck=neck, auxiliary_head=auxiliary_head, train_cfg=train_cfg, test_cfg=test_cfg, - pretrained=pretrained) + data_preprocessor=data_preprocessor, + pretrained=pretrained, + init_cfg=init_cfg) - def _init_decode_head(self, decode_head): + def _init_decode_head(self, decode_head: ConfigType) -> None: """Initialize ``decode_head``""" assert isinstance(decode_head, list) assert len(decode_head) == self.num_stages self.decode_head = nn.ModuleList() for i in range(self.num_stages): - self.decode_head.append(builder.build_head(decode_head[i])) + self.decode_head.append(MODELS.build(decode_head[i])) self.align_corners = self.decode_head[-1].align_corners self.num_classes = self.decode_head[-1].num_classes - def init_weights(self, pretrained=None): - """Initialize the weights in backbone and heads. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - self.backbone.init_weights(pretrained=pretrained) - for i in range(self.num_stages): - self.decode_head[i].init_weights() - if self.with_auxiliary_head: - if isinstance(self.auxiliary_head, nn.ModuleList): - for aux_head in self.auxiliary_head: - aux_head.init_weights() - else: - self.auxiliary_head.init_weights() - - def encode_decode(self, img, img_metas): + def encode_decode(self, inputs: Tensor, + batch_img_metas: List[dict]) -> List[Tensor]: """Encode images with backbone and decode into a semantic segmentation map of the same size as input.""" - x = self.extract_feat(img) - out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) - for i in range(1, self.num_stages): - out = self.decode_head[i].forward_test(x, out, img_metas, - self.test_cfg) - out = resize( - input=out, - size=img.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - return out + x = self.extract_feat(inputs) + out = self.decode_head[0].forward(x) + for i in range(1, self.num_stages - 1): + out = self.decode_head[i].forward(x, out) + seg_logits_list = self.decode_head[-1].predict(x, out, batch_img_metas, + self.test_cfg) - def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + return seg_logits_list + + def _decode_head_forward_train(self, inputs: Tensor, + data_samples: SampleList) -> dict: """Run forward function and calculate loss for decode head in training.""" losses = dict() - loss_decode = self.decode_head[0].forward_train( - x, img_metas, gt_semantic_seg, self.train_cfg) + loss_decode = self.decode_head[0].loss(inputs, data_samples, + self.train_cfg) losses.update(add_prefix(loss_decode, 'decode_0')) + # get batch_img_metas + batch_size = len(data_samples) + batch_img_metas = [] + for batch_index in range(batch_size): + metainfo = data_samples[batch_index].metainfo + batch_img_metas.append(metainfo) for i in range(1, self.num_stages): # forward test again, maybe unnecessary for most methods. - prev_outputs = self.decode_head[i - 1].forward_test( - x, img_metas, self.test_cfg) - loss_decode = self.decode_head[i].forward_train( - x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) + if i == 1: + prev_outputs = self.decode_head[0].forward(inputs) + else: + prev_outputs = self.decode_head[i - 1].forward( + inputs, prev_outputs) + loss_decode = self.decode_head[i].loss(inputs, prev_outputs, + data_samples, + self.train_cfg) losses.update(add_prefix(loss_decode, f'decode_{i}')) return losses + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> Tensor: + """Network forward process. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`SegDataSample`]): The seg data samples. + It usually includes information such as `metainfo` and + `gt_semantic_seg`. + + Returns: + Tensor: Forward output of model without any post-processes. + """ + x = self.extract_feat(inputs) + + out = self.decode_head[0].forward(x) + for i in range(1, self.num_stages): + # TODO support PointRend tensor mode + out = self.decode_head[i].forward(x, out) + + return out diff --git a/mmseg/models/segmentors/encoder_decoder.py b/mmseg/models/segmentors/encoder_decoder.py index d3ce17adbb..c4f44ba005 100644 --- a/mmseg/models/segmentors/encoder_decoder.py +++ b/mmseg/models/segmentors/encoder_decoder.py @@ -1,181 +1,270 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + import torch.nn as nn import torch.nn.functional as F +from torch import Tensor -from mmseg.core import add_prefix -from mmseg.ops import resize -from .. import builder -from ..builder import SEGMENTORS +from mmseg.registry import MODELS +from mmseg.utils import (ConfigType, OptConfigType, OptMultiConfig, + OptSampleList, SampleList, add_prefix) from .base import BaseSegmentor -@SEGMENTORS.register_module() +@MODELS.register_module() class EncoderDecoder(BaseSegmentor): """Encoder Decoder segmentors. EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. Note that auxiliary_head is only used for deep supervision during training, which could be dumped during inference. - """ + + 1. The ``loss`` method is used to calculate the loss of model, + which includes two steps: (1) Extracts features to obtain the feature maps + (2) Call the decode head loss function to forward decode head model and + calculate losses. + + .. code:: text + + loss(): extract_feat() -> _decode_head_forward_train() -> _auxiliary_head_forward_train (optional) + _decode_head_forward_train(): decode_head.loss() + _auxiliary_head_forward_train(): auxiliary_head.loss (optional) + + 2. The ``predict`` method is used to predict segmentation results, + which includes two steps: (1) Run inference function to obtain the list of + seg_logits (2) Call post-processing function to obtain list of + ``SegDataSampel`` including ``pred_sem_seg`` and ``seg_logits``. + + .. code:: text + + predict(): inference() -> postprocess_result() + infercen(): whole_inference()/slide_inference() + whole_inference()/slide_inference(): encoder_decoder() + encoder_decoder(): extract_feat() -> decode_head.predict() + + 3. The ``_forward`` method is used to output the tensor by running the model, + which includes two steps: (1) Extracts features to obtain the feature maps + (2)Call the decode head forward function to forward decode head model. + + .. code:: text + + _forward(): extract_feat() -> _decode_head.forward() + + Args: + + backbone (ConfigType): The config for the backnone of segmentor. + decode_head (ConfigType): The config for the decode head of segmentor. + neck (OptConfigType): The config for the neck of segmentor. + Defaults to None. + auxiliary_head (OptConfigType): The config for the auxiliary head of + segmentor. Defaults to None. + train_cfg (OptConfigType): The config for training. Defaults to None. + test_cfg (OptConfigType): The config for testing. Defaults to None. + data_preprocessor (dict, optional): The pre-process config of + :class:`BaseDataPreprocessor`. + pretrained (str, optional): The path for pretrained model. + Defaults to None. + init_cfg (dict, optional): The weight initialized config for + :class:`BaseModule`. + """ # noqa: E501 def __init__(self, - backbone, - decode_head, - neck=None, - auxiliary_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(EncoderDecoder, self).__init__() - self.backbone = builder.build_backbone(backbone) + backbone: ConfigType, + decode_head: ConfigType, + neck: OptConfigType = None, + auxiliary_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + pretrained: Optional[str] = None, + init_cfg: OptMultiConfig = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + if pretrained is not None: + assert backbone.get('pretrained') is None, \ + 'both backbone and segmentor set pretrained weight' + backbone.pretrained = pretrained + self.backbone = MODELS.build(backbone) if neck is not None: - self.neck = builder.build_neck(neck) + self.neck = MODELS.build(neck) self._init_decode_head(decode_head) self._init_auxiliary_head(auxiliary_head) self.train_cfg = train_cfg self.test_cfg = test_cfg - self.init_weights(pretrained=pretrained) - assert self.with_decode_head - def _init_decode_head(self, decode_head): + def _init_decode_head(self, decode_head: ConfigType) -> None: """Initialize ``decode_head``""" - self.decode_head = builder.build_head(decode_head) + self.decode_head = MODELS.build(decode_head) self.align_corners = self.decode_head.align_corners self.num_classes = self.decode_head.num_classes + self.out_channels = self.decode_head.out_channels - def _init_auxiliary_head(self, auxiliary_head): + def _init_auxiliary_head(self, auxiliary_head: ConfigType) -> None: """Initialize ``auxiliary_head``""" if auxiliary_head is not None: if isinstance(auxiliary_head, list): self.auxiliary_head = nn.ModuleList() for head_cfg in auxiliary_head: - self.auxiliary_head.append(builder.build_head(head_cfg)) - else: - self.auxiliary_head = builder.build_head(auxiliary_head) - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone and heads. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - super(EncoderDecoder, self).init_weights(pretrained) - self.backbone.init_weights(pretrained=pretrained) - self.decode_head.init_weights() - if self.with_auxiliary_head: - if isinstance(self.auxiliary_head, nn.ModuleList): - for aux_head in self.auxiliary_head: - aux_head.init_weights() + self.auxiliary_head.append(MODELS.build(head_cfg)) else: - self.auxiliary_head.init_weights() + self.auxiliary_head = MODELS.build(auxiliary_head) - def extract_feat(self, img): + def extract_feat(self, inputs: Tensor) -> List[Tensor]: """Extract features from images.""" - x = self.backbone(img) + x = self.backbone(inputs) if self.with_neck: x = self.neck(x) return x - def encode_decode(self, img, img_metas): + def encode_decode(self, inputs: Tensor, + batch_img_metas: List[dict]) -> List[Tensor]: """Encode images with backbone and decode into a semantic segmentation map of the same size as input.""" - x = self.extract_feat(img) - out = self._decode_head_forward_test(x, img_metas) - out = resize( - input=out, - size=img.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - return out - - def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + x = self.extract_feat(inputs) + seg_logits = self.decode_head.predict(x, batch_img_metas, + self.test_cfg) + + return seg_logits + + def _decode_head_forward_train(self, inputs: List[Tensor], + data_samples: SampleList) -> dict: """Run forward function and calculate loss for decode head in training.""" losses = dict() - loss_decode = self.decode_head.forward_train(x, img_metas, - gt_semantic_seg, - self.train_cfg) + loss_decode = self.decode_head.loss(inputs, data_samples, + self.train_cfg) losses.update(add_prefix(loss_decode, 'decode')) return losses - def _decode_head_forward_test(self, x, img_metas): - """Run forward function and calculate loss for decode head in - inference.""" - seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) - return seg_logits - - def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): + def _auxiliary_head_forward_train(self, inputs: List[Tensor], + data_samples: SampleList) -> dict: """Run forward function and calculate loss for auxiliary head in training.""" losses = dict() if isinstance(self.auxiliary_head, nn.ModuleList): for idx, aux_head in enumerate(self.auxiliary_head): - loss_aux = aux_head.forward_train(x, img_metas, - gt_semantic_seg, - self.train_cfg) + loss_aux = aux_head.loss(inputs, data_samples, self.train_cfg) losses.update(add_prefix(loss_aux, f'aux_{idx}')) else: - loss_aux = self.auxiliary_head.forward_train( - x, img_metas, gt_semantic_seg, self.train_cfg) + loss_aux = self.auxiliary_head.loss(inputs, data_samples, + self.train_cfg) losses.update(add_prefix(loss_aux, 'aux')) return losses - def forward_dummy(self, img): - """Dummy forward function.""" - seg_logit = self.encode_decode(img, None) - - return seg_logit - - def forward_train(self, img, img_metas, gt_semantic_seg): - """Forward function for training. + def loss(self, inputs: Tensor, data_samples: SampleList) -> dict: + """Calculate losses from a batch of inputs and data samples. Args: - img (Tensor): Input images. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. + inputs (Tensor): Input images. + data_samples (list[:obj:`SegDataSample`]): The seg data samples. + It usually includes information such as `metainfo` and + `gt_sem_seg`. Returns: dict[str, Tensor]: a dictionary of loss components """ - x = self.extract_feat(img) + x = self.extract_feat(inputs) losses = dict() - loss_decode = self._decode_head_forward_train(x, img_metas, - gt_semantic_seg) + loss_decode = self._decode_head_forward_train(x, data_samples) losses.update(loss_decode) if self.with_auxiliary_head: - loss_aux = self._auxiliary_head_forward_train( - x, img_metas, gt_semantic_seg) + loss_aux = self._auxiliary_head_forward_train(x, data_samples) losses.update(loss_aux) return losses - # TODO refactor - def slide_inference(self, img, img_meta, rescale): - """Inference by sliding-window with overlap.""" + def predict(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`SegDataSample`], optional): The seg data + samples. It usually includes information such as `metainfo` + and `gt_sem_seg`. + + Returns: + list[:obj:`SegDataSample`]: Segmentation results of the + input images. Each SegDataSample usually contain: + + - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation. + - ``seg_logits``(PixelData): Predicted logits of semantic + segmentation before normalization. + """ + if data_samples is not None: + batch_img_metas = [ + data_sample.metainfo for data_sample in data_samples + ] + else: + batch_img_metas = [ + dict( + ori_shape=inputs.shape[2:], + img_shape=inputs.shape[2:], + pad_shape=inputs.shape[2:], + padding_size=[0, 0, 0, 0]) + ] * inputs.shape[0] + + seg_logits = self.inference(inputs, batch_img_metas) + + return self.postprocess_result(seg_logits, data_samples) + + def _forward(self, + inputs: Tensor, + data_samples: OptSampleList = None) -> Tensor: + """Network forward process. + + Args: + inputs (Tensor): Inputs with shape (N, C, H, W). + data_samples (List[:obj:`SegDataSample`]): The seg + data samples. It usually includes information such + as `metainfo` and `gt_sem_seg`. + + Returns: + Tensor: Forward output of model without any post-processes. + """ + x = self.extract_feat(inputs) + return self.decode_head.forward(x) + + def slide_inference(self, inputs: Tensor, + batch_img_metas: List[dict]) -> Tensor: + """Inference by sliding-window with overlap. + + If h_crop > h_img or w_crop > w_img, the small patch will be used to + decode without padding. + + Args: + inputs (tensor): the tensor should have a shape NxCxHxW, + which contains all images in the batch. + batch_img_metas (List[dict]): List of image metainfo where each may + also contain: 'img_shape', 'scale_factor', 'flip', 'img_path', + 'ori_shape', and 'pad_shape'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. + + Returns: + Tensor: The segmentation results, seg_logits from model of each + input image. + """ h_stride, w_stride = self.test_cfg.stride h_crop, w_crop = self.test_cfg.crop_size - batch_size, _, h_img, w_img = img.size() + batch_size, _, h_img, w_img = inputs.size() num_classes = self.num_classes h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 - preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) - count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) + preds = inputs.new_zeros((batch_size, num_classes, h_img, w_img)) + count_mat = inputs.new_zeros((batch_size, 1, h_img, w_img)) for h_idx in range(h_grids): for w_idx in range(w_grids): y1 = h_idx * h_stride @@ -184,85 +273,71 @@ def slide_inference(self, img, img_meta, rescale): x2 = min(x1 + w_crop, w_img) y1 = max(y2 - h_crop, 0) x1 = max(x2 - w_crop, 0) - crop_img = img[:, :, y1:y2, x1:x2] - pad_img = crop_img.new_zeros( - (crop_img.size(0), crop_img.size(1), h_crop, w_crop)) - pad_img[:, :, :y2 - y1, :x2 - x1] = crop_img - pad_seg_logit = self.encode_decode(pad_img, img_meta) - preds[:, :, y1:y2, - x1:x2] += pad_seg_logit[:, :, :y2 - y1, :x2 - x1] + crop_img = inputs[:, :, y1:y2, x1:x2] + # change the image shape to patch shape + batch_img_metas[0]['img_shape'] = crop_img.shape[2:] + # the output of encode_decode is seg logits tensor map + # with shape [N, C, H, W] + crop_seg_logit = self.encode_decode(crop_img, batch_img_metas) + preds += F.pad(crop_seg_logit, + (int(x1), int(preds.shape[3] - x2), int(y1), + int(preds.shape[2] - y2))) + count_mat[:, :, y1:y2, x1:x2] += 1 assert (count_mat == 0).sum() == 0 - preds = preds / count_mat - if rescale: - preds = resize( - preds, - size=img_meta[0]['ori_shape'][:2], - mode='bilinear', - align_corners=self.align_corners, - warning=False) - - return preds - - def whole_inference(self, img, img_meta, rescale): - """Inference with full image.""" - - seg_logit = self.encode_decode(img, img_meta) - if rescale: - seg_logit = resize( - seg_logit, - size=img_meta[0]['ori_shape'][:2], - mode='bilinear', - align_corners=self.align_corners, - warning=False) + seg_logits = preds / count_mat - return seg_logit + return seg_logits + + def whole_inference(self, inputs: Tensor, + batch_img_metas: List[dict]) -> Tensor: + """Inference with full image. + + Args: + inputs (Tensor): The tensor should have a shape NxCxHxW, which + contains all images in the batch. + batch_img_metas (List[dict]): List of image metainfo where each may + also contain: 'img_shape', 'scale_factor', 'flip', 'img_path', + 'ori_shape', and 'pad_shape'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. + + Returns: + Tensor: The segmentation results, seg_logits from model of each + input image. + """ - def inference(self, img, img_meta, rescale): + seg_logits = self.encode_decode(inputs, batch_img_metas) + + return seg_logits + + def inference(self, inputs: Tensor, batch_img_metas: List[dict]) -> Tensor: """Inference with slide/whole style. Args: - img (Tensor): The input image of shape (N, 3, H, W). - img_meta (dict): Image info dict where each dict has: 'img_shape', - 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + inputs (Tensor): The input image of shape (N, 3, H, W). + batch_img_metas (List[dict]): List of image metainfo where each may + also contain: 'img_shape', 'scale_factor', 'flip', 'img_path', + 'ori_shape', 'pad_shape', and 'padding_size'. For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - rescale (bool): Whether rescale back to original shape. + `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. Returns: - Tensor: The output segmentation map. + Tensor: The segmentation results, seg_logits from model of each + input image. """ assert self.test_cfg.mode in ['slide', 'whole'] - ori_shape = img_meta[0]['ori_shape'] - assert all(_['ori_shape'] == ori_shape for _ in img_meta) + ori_shape = batch_img_metas[0]['ori_shape'] + assert all(_['ori_shape'] == ori_shape for _ in batch_img_metas) if self.test_cfg.mode == 'slide': - seg_logit = self.slide_inference(img, img_meta, rescale) + seg_logit = self.slide_inference(inputs, batch_img_metas) else: - seg_logit = self.whole_inference(img, img_meta, rescale) - output = F.softmax(seg_logit, dim=1) - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - if flip: - assert flip_direction in ['horizontal', 'vertical'] - if flip_direction == 'horizontal': - output = output.flip(dims=(3, )) - elif flip_direction == 'vertical': - output = output.flip(dims=(2, )) - - return output - - def simple_test(self, img, img_meta, rescale=True): - """Simple test with single image.""" - seg_logit = self.inference(img, img_meta, rescale) - seg_pred = seg_logit.argmax(dim=1) - seg_pred = seg_pred.cpu().numpy() - # unravel batch dim - seg_pred = list(seg_pred) - return seg_pred + seg_logit = self.whole_inference(inputs, batch_img_metas) + + return seg_logit - def aug_test(self, imgs, img_metas, rescale=True): + def aug_test(self, inputs, batch_img_metas, rescale=True): """Test with augmentations. Only rescale=True is supported. @@ -270,13 +345,13 @@ def aug_test(self, imgs, img_metas, rescale=True): # aug_test rescale all imgs back to ori_shape for now assert rescale # to save memory, we get augmented seg logit inplace - seg_logit = self.inference(imgs[0], img_metas[0], rescale) - for i in range(1, len(imgs)): - cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) + seg_logit = self.inference(inputs[0], batch_img_metas[0], rescale) + for i in range(1, len(inputs)): + cur_seg_logit = self.inference(inputs[i], batch_img_metas[i], + rescale) seg_logit += cur_seg_logit - seg_logit /= len(imgs) + seg_logit /= len(inputs) seg_pred = seg_logit.argmax(dim=1) - seg_pred = seg_pred.cpu().numpy() # unravel batch dim seg_pred = list(seg_pred) return seg_pred diff --git a/mmseg/models/utils/__init__.py b/mmseg/models/utils/__init__.py index 71d3f423ce..7aaa600c2d 100644 --- a/mmseg/models/utils/__init__.py +++ b/mmseg/models/utils/__init__.py @@ -1,4 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .embed import PatchEmbed +from .encoding import Encoding +from .inverted_residual import InvertedResidual, InvertedResidualV3 +from .make_divisible import make_divisible from .res_layer import ResLayer +from .se_layer import SELayer from .self_attention_block import SelfAttentionBlock +from .shape_convert import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc, + nlc_to_nchw) +from .up_conv_block import UpConvBlock +from .wrappers import Upsample, resize -__all__ = ['ResLayer', 'SelfAttentionBlock'] +__all__ = [ + 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', + 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'PatchEmbed', + 'nchw_to_nlc', 'nlc_to_nchw', 'nchw2nlc2nchw', 'nlc2nchw2nlc', 'Encoding', + 'Upsample', 'resize' +] diff --git a/mmseg/models/utils/embed.py b/mmseg/models/utils/embed.py new file mode 100644 index 0000000000..aef0a40b0a --- /dev/null +++ b/mmseg/models/utils/embed.py @@ -0,0 +1,330 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from mmengine.utils import to_2tuple + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1. + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super().__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int, optional): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmengine.ConfigDict`, optional): The Config for + initialization. Default: None. + """ + + def __init__(self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=None, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size diff --git a/mmseg/ops/encoding.py b/mmseg/models/utils/encoding.py similarity index 88% rename from mmseg/ops/encoding.py rename to mmseg/models/utils/encoding.py index d939189657..ee4f0574fb 100644 --- a/mmseg/ops/encoding.py +++ b/mmseg/models/utils/encoding.py @@ -1,5 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch -from torch import nn as nn +from torch import nn from torch.nn import functional as F @@ -15,7 +16,7 @@ class Encoding(nn.Module): """ def __init__(self, channels, num_codes): - super(Encoding, self).__init__() + super().__init__() # init codewords and smoothing factor self.channels, self.num_codes = channels, num_codes std = 1. / ((num_codes * channels)**0.5) @@ -43,14 +44,14 @@ def scaled_l2(x, codewords, scale): return scaled_l2_norm @staticmethod - def aggregate(assigment_weights, x, codewords): + def aggregate(assignment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand( (batch_size, x.size(1), num_codes, channels)) - encoded_feat = (assigment_weights.unsqueeze(3) * + encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat @@ -61,10 +62,10 @@ def forward(self, x): # [batch_size, height x width, channels] x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() # assignment_weights: [batch_size, channels, num_codes] - assigment_weights = F.softmax( + assignment_weights = F.softmax( self.scaled_l2(x, self.codewords, self.scale), dim=2) # aggregate - encoded_feat = self.aggregate(assigment_weights, x, self.codewords) + encoded_feat = self.aggregate(assignment_weights, x, self.codewords) return encoded_feat def __repr__(self): diff --git a/mmseg/models/utils/inverted_residual.py b/mmseg/models/utils/inverted_residual.py new file mode 100644 index 0000000000..56190b3bfe --- /dev/null +++ b/mmseg/models/utils/inverted_residual.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from torch import nn +from torch.utils import checkpoint as cp + +from .se_layer import SELayer + + +class InvertedResidual(nn.Module): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + dilation (int): Dilation rate of depthwise conv. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + dilation=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + **kwargs): + super().__init__() + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **kwargs)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **kwargs), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + **kwargs) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InvertedResidualV3(nn.Module): + """Inverted Residual Block for MobileNetV3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super().__init__() + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=dict( + type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/mmseg/models/utils/make_divisible.py b/mmseg/models/utils/make_divisible.py new file mode 100644 index 0000000000..ed42c2eeea --- /dev/null +++ b/mmseg/models/utils/make_divisible.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number to the nearest value that can be + divisible by the divisor. It is taken from the original tf repo. It ensures + that all layers have a channel number that is divisible by divisor. It can + be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel number to + the original channel number. Default: 0.9. + + Returns: + int: The modified output channel number. + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/mmseg/models/utils/res_layer.py b/mmseg/models/utils/res_layer.py index 9ef51b95b0..3dd7a6f75a 100644 --- a/mmseg/models/utils/res_layer.py +++ b/mmseg/models/utils/res_layer.py @@ -1,8 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import Sequential from torch import nn as nn -class ResLayer(nn.Sequential): +class ResLayer(Sequential): """ResLayer to build ResNet style backbone. Args: @@ -42,8 +44,7 @@ def __init__(self, if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride - # check dilation for dilated ResNet - if avg_down and (stride != 1 or dilation != 1): + if avg_down: conv_stride = 1 downsample.append( nn.AvgPool2d( @@ -92,4 +93,4 @@ def __init__(self, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) - super(ResLayer, self).__init__(*layers) + super().__init__(*layers) diff --git a/mmseg/models/utils/se_layer.py b/mmseg/models/utils/se_layer.py new file mode 100644 index 0000000000..0ff632cfea --- /dev/null +++ b/mmseg/models/utils/se_layer.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils import is_tuple_of + +from .make_divisible import make_divisible + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configured + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configured by the first dict and the + second activation layer will be configured by the second dict. + Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + divisor=6.0)). + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))): + super().__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=make_divisible(channels // ratio, 8), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=make_divisible(channels // ratio, 8), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/mmseg/models/utils/self_attention_block.py b/mmseg/models/utils/self_attention_block.py index 372fad2e00..5bb6e8284e 100644 --- a/mmseg/models/utils/self_attention_block.py +++ b/mmseg/models/utils/self_attention_block.py @@ -1,5 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch -from mmcv.cnn import ConvModule, constant_init +from mmcv.cnn import ConvModule +from mmengine.model.weight_init import constant_init from torch import nn as nn from torch.nn import functional as F @@ -34,7 +36,7 @@ def __init__(self, key_in_channels, query_in_channels, channels, key_downsample, key_query_num_convs, value_out_num_convs, key_query_norm, value_out_norm, matmul_norm, with_out, conv_cfg, norm_cfg, act_cfg): - super(SelfAttentionBlock, self).__init__() + super().__init__() if share_key_query: assert key_in_channels == query_in_channels self.key_in_channels = key_in_channels diff --git a/mmseg/models/utils/shape_convert.py b/mmseg/models/utils/shape_convert.py new file mode 100644 index 0000000000..cce1e220b6 --- /dev/null +++ b/mmseg/models/utils/shape_convert.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + return x.transpose(1, 2).reshape(B, C, H, W) + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +def nchw2nlc2nchw(module, x, contiguous=False, **kwargs): + """Flatten [N, C, H, W] shape tensor `x` to [N, L, C] shape tensor. Use the + reshaped tensor as the input of `module`, and the convert the output of + `module`, whose shape is. + + [N, L, C], to [N, C, H, W]. + + Args: + module (Callable): A callable object the takes a tensor + with shape [N, L, C] as input. + x (Tensor): The input tensor of shape [N, C, H, W]. + contiguous: + contiguous (Bool): Whether to make the tensor contiguous + after each shape transform. + + Returns: + Tensor: The output tensor of shape [N, C, H, W]. + + Example: + >>> import torch + >>> import torch.nn as nn + >>> norm = nn.LayerNorm(4) + >>> feature_map = torch.rand(4, 4, 5, 5) + >>> output = nchw2nlc2nchw(norm, feature_map) + """ + B, C, H, W = x.shape + if not contiguous: + x = x.flatten(2).transpose(1, 2) + x = module(x, **kwargs) + x = x.transpose(1, 2).reshape(B, C, H, W) + else: + x = x.flatten(2).transpose(1, 2).contiguous() + x = module(x, **kwargs) + x = x.transpose(1, 2).reshape(B, C, H, W).contiguous() + return x + + +def nlc2nchw2nlc(module, x, hw_shape, contiguous=False, **kwargs): + """Convert [N, L, C] shape tensor `x` to [N, C, H, W] shape tensor. Use the + reshaped tensor as the input of `module`, and convert the output of + `module`, whose shape is. + + [N, C, H, W], to [N, L, C]. + + Args: + module (Callable): A callable object the takes a tensor + with shape [N, C, H, W] as input. + x (Tensor): The input tensor of shape [N, L, C]. + hw_shape: (Sequence[int]): The height and width of the + feature map with shape [N, C, H, W]. + contiguous (Bool): Whether to make the tensor contiguous + after each shape transform. + + Returns: + Tensor: The output tensor of shape [N, L, C]. + + Example: + >>> import torch + >>> import torch.nn as nn + >>> conv = nn.Conv2d(16, 16, 3, 1, 1) + >>> feature_map = torch.rand(4, 25, 16) + >>> output = nlc2nchw2nlc(conv, feature_map, (5, 5)) + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + if not contiguous: + x = x.transpose(1, 2).reshape(B, C, H, W) + x = module(x, **kwargs) + x = x.flatten(2).transpose(1, 2) + else: + x = x.transpose(1, 2).reshape(B, C, H, W).contiguous() + x = module(x, **kwargs) + x = x.flatten(2).transpose(1, 2).contiguous() + return x diff --git a/mmseg/models/utils/up_conv_block.py b/mmseg/models/utils/up_conv_block.py new file mode 100644 index 0000000000..4fa3b598de --- /dev/null +++ b/mmseg/models/utils/up_conv_block.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, build_upsample_layer + + +class UpConvBlock(nn.Module): + """Upsample convolution block in decoder for UNet. + + This upsample convolution block consists of one upsample module + followed by one convolution block. The upsample module expands the + high-level low-resolution feature map and the convolution block fuses + the upsampled high-level low-resolution feature map and the low-level + high-resolution feature map from encoder. + + Args: + conv_block (nn.Sequential): Sequential of convolutional layers. + in_channels (int): Number of input channels of the high-level + skip_channels (int): Number of input channels of the low-level + high-resolution feature map from encoder. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers in the conv_block. + Default: 2. + stride (int): Stride of convolutional layer in conv_block. Default: 1. + dilation (int): Dilation rate of convolutional layer in conv_block. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). If the size of + high-level feature map is the same as that of skip feature map + (low-level feature map from encoder), it does not need upsample the + high-level feature map and the upsample_cfg is None. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + conv_block, + in_channels, + skip_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + dcn=None, + plugins=None): + super().__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.conv_block = conv_block( + in_channels=2 * skip_channels, + out_channels=out_channels, + num_convs=num_convs, + stride=stride, + dilation=dilation, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None) + if upsample_cfg is not None: + self.upsample = build_upsample_layer( + cfg=upsample_cfg, + in_channels=in_channels, + out_channels=skip_channels, + with_cp=with_cp, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.upsample = ConvModule( + in_channels, + skip_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, skip, x): + """Forward function.""" + + x = self.upsample(x) + out = torch.cat([skip, x], dim=1) + out = self.conv_block(out) + + return out diff --git a/mmseg/models/utils/wrappers.py b/mmseg/models/utils/wrappers.py new file mode 100644 index 0000000000..abbd0c0296 --- /dev/null +++ b/mmseg/models/utils/wrappers.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.nn.functional as F + + +def resize(input, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None, + warning=True): + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + return F.interpolate(input, size, scale_factor, mode, align_corners) + + +class Upsample(nn.Module): + + def __init__(self, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None): + super().__init__() + self.size = size + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + if not self.size: + size = [int(t * self.scale_factor) for t in x.shape[-2:]] + else: + size = self.size + return resize(x, size, None, self.mode, self.align_corners) diff --git a/mmseg/ops/__init__.py b/mmseg/ops/__init__.py deleted file mode 100644 index 54b0d0b79c..0000000000 --- a/mmseg/ops/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .encoding import Encoding -from .separable_conv_module import DepthwiseSeparableConvModule -from .wrappers import resize - -__all__ = ['resize', 'DepthwiseSeparableConvModule', 'Encoding'] diff --git a/mmseg/ops/separable_conv_module.py b/mmseg/ops/separable_conv_module.py deleted file mode 100644 index 4e5922cc4d..0000000000 --- a/mmseg/ops/separable_conv_module.py +++ /dev/null @@ -1,88 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule - - -class DepthwiseSeparableConvModule(nn.Module): - """Depthwise separable convolution module. - - See https://arxiv.org/pdf/1704.04861.pdf for details. - - This module can replace a ConvModule with the conv block replaced by two - conv block: depthwise conv block and pointwise conv block. The depthwise - conv block contains depthwise-conv/norm/activation layers. The pointwise - conv block contains pointwise-conv/norm/activation layers. It should be - noted that there will be norm/activation layer in the depthwise conv block - if `norm_cfg` and `act_cfg` are specified. - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int or tuple[int]): Same as nn.Conv2d. - stride (int or tuple[int]): Same as nn.Conv2d. Default: 1. - padding (int or tuple[int]): Same as nn.Conv2d. Default: 0. - dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1. - norm_cfg (dict): Default norm config for both depthwise ConvModule and - pointwise ConvModule. Default: None. - act_cfg (dict): Default activation config for both depthwise ConvModule - and pointwise ConvModule. Default: dict(type='ReLU'). - dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is - 'default', it will be the same as `norm_cfg`. Default: 'default'. - dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is - 'default', it will be the same as `act_cfg`. Default: 'default'. - pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is - 'default', it will be the same as `norm_cfg`. Default: 'default'. - pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is - 'default', it will be the same as `act_cfg`. Default: 'default'. - kwargs (optional): Other shared arguments for depthwise and pointwise - ConvModule. See ConvModule for ref. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - dw_norm_cfg='default', - dw_act_cfg='default', - pw_norm_cfg='default', - pw_act_cfg='default', - **kwargs): - super(DepthwiseSeparableConvModule, self).__init__() - assert 'groups' not in kwargs, 'groups should not be specified' - - # if norm/activation config of depthwise/pointwise ConvModule is not - # specified, use default config. - dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg - dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg - pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg - pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg - - # depthwise convolution - self.depthwise_conv = ConvModule( - in_channels, - in_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=in_channels, - norm_cfg=dw_norm_cfg, - act_cfg=dw_act_cfg, - **kwargs) - - self.pointwise_conv = ConvModule( - in_channels, - out_channels, - 1, - norm_cfg=pw_norm_cfg, - act_cfg=pw_act_cfg, - **kwargs) - - def forward(self, x): - x = self.depthwise_conv(x) - x = self.pointwise_conv(x) - return x diff --git a/mmseg/ops/wrappers.py b/mmseg/ops/wrappers.py deleted file mode 100644 index 0b319767f5..0000000000 --- a/mmseg/ops/wrappers.py +++ /dev/null @@ -1,25 +0,0 @@ -import warnings - -import torch.nn.functional as F - - -def resize(input, - size=None, - scale_factor=None, - mode='nearest', - align_corners=None, - warning=True): - if warning: - if size is not None and align_corners: - input_h, input_w = input.shape[2:] - output_h, output_w = size - if output_h > input_h or output_w > output_h: - if ((output_h > 1 and output_w > 1 and input_h > 1 - and input_w > 1) and (output_h - 1) % (input_h - 1) - and (output_w - 1) % (input_w - 1)): - warnings.warn( - f'When align_corners={align_corners}, ' - 'the output would more aligned if ' - f'input size {(input_h, input_w)} is `x+1` and ' - f'out size {(output_h, output_w)} is `nx+1`') - return F.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/mmseg/registry/__init__.py b/mmseg/registry/__init__.py new file mode 100644 index 0000000000..c646b7e5ac --- /dev/null +++ b/mmseg/registry/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .registry import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, + MODEL_WRAPPERS, MODELS, OPTIM_WRAPPER_CONSTRUCTORS, + OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, + RUNNERS, TASK_UTILS, TRANSFORMS, VISBACKENDS, + VISUALIZERS, WEIGHT_INITIALIZERS) + +__all__ = [ + 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS', 'DATA_SAMPLERS', + 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS', 'OPTIMIZERS', + 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS', 'METRICS', + 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS' +] diff --git a/mmseg/registry/registry.py b/mmseg/registry/registry.py new file mode 100644 index 0000000000..5c9977ab8d --- /dev/null +++ b/mmseg/registry/registry.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""MMSegmentation provides 17 registry nodes to support using modules across +projects. Each node is a child of the root registry in MMEngine. + +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS) +# manage runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS) +# manage all kinds of loops like `EpochBasedTrainLoop` +LOOPS = Registry('loop', parent=MMENGINE_LOOPS) +# manage all kinds of hooks like `CheckpointHook` +HOOKS = Registry('hook', parent=MMENGINE_HOOKS) + +# manage data-related modules +DATASETS = Registry('dataset', parent=MMENGINE_DATASETS) +DATA_SAMPLERS = Registry('data sampler', parent=MMENGINE_DATA_SAMPLERS) +TRANSFORMS = Registry('transform', parent=MMENGINE_TRANSFORMS) + +# mangage all kinds of modules inheriting `nn.Module` +MODELS = Registry('model', parent=MMENGINE_MODELS) +# mangage all kinds of model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry('model_wrapper', parent=MMENGINE_MODEL_WRAPPERS) +# mangage all kinds of weight initialization modules like `Uniform` +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', parent=MMENGINE_WEIGHT_INITIALIZERS) + +# mangage all kinds of optimizers like `SGD` and `Adam` +OPTIMIZERS = Registry('optimizer', parent=MMENGINE_OPTIMIZERS) +# manage optimizer wrapper +OPTIM_WRAPPERS = Registry('optim_wrapper', parent=MMENGINE_OPTIM_WRAPPERS) +# manage constructors that customize the optimization hyperparameters. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer wrapper constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS) +# mangage all kinds of parameter schedulers like `MultiStepLR` +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', parent=MMENGINE_PARAM_SCHEDULERS) + +# manage all kinds of metrics +METRICS = Registry('metric', parent=MMENGINE_METRICS) +# manage evaluator +EVALUATOR = Registry('evaluator', parent=MMENGINE_EVALUATOR) + +# manage task-specific modules like ohem pixel sampler +TASK_UTILS = Registry('task util', parent=MMENGINE_TASK_UTILS) + +# manage visualizer +VISUALIZERS = Registry('visualizer', parent=MMENGINE_VISUALIZERS) +# manage visualizer backend +VISBACKENDS = Registry('vis_backend', parent=MMENGINE_VISBACKENDS) + +# manage logprocessor +LOG_PROCESSORS = Registry('log_processor', parent=MMENGINE_LOG_PROCESSORS) diff --git a/mmseg/structures/__init__.py b/mmseg/structures/__init__.py new file mode 100644 index 0000000000..63d118dca3 --- /dev/null +++ b/mmseg/structures/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .sampler import BasePixelSampler, OHEMPixelSampler, build_pixel_sampler +from .seg_data_sample import SegDataSample + +__all__ = [ + 'SegDataSample', 'BasePixelSampler', 'OHEMPixelSampler', + 'build_pixel_sampler' +] diff --git a/mmseg/structures/sampler/__init__.py b/mmseg/structures/sampler/__init__.py new file mode 100644 index 0000000000..91d762d1b4 --- /dev/null +++ b/mmseg/structures/sampler/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_pixel_sampler import BasePixelSampler +from .builder import build_pixel_sampler +from .ohem_pixel_sampler import OHEMPixelSampler + +__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/mmseg/core/seg/sampler/base_pixel_sampler.py b/mmseg/structures/sampler/base_pixel_sampler.py similarity index 85% rename from mmseg/core/seg/sampler/base_pixel_sampler.py rename to mmseg/structures/sampler/base_pixel_sampler.py index db322d199f..03672cd478 100644 --- a/mmseg/core/seg/sampler/base_pixel_sampler.py +++ b/mmseg/structures/sampler/base_pixel_sampler.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod @@ -10,4 +11,3 @@ def __init__(self, **kwargs): @abstractmethod def sample(self, seg_logit, seg_label): """Placeholder for sample function.""" - pass diff --git a/mmseg/structures/sampler/builder.py b/mmseg/structures/sampler/builder.py new file mode 100644 index 0000000000..48e1479026 --- /dev/null +++ b/mmseg/structures/sampler/builder.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmseg.registry import TASK_UTILS + +PIXEL_SAMPLERS = TASK_UTILS + + +def build_pixel_sampler(cfg, **default_args): + """Build pixel sampler for segmentation map.""" + warnings.warn( + '``build_pixel_sampler`` would be deprecated soon, please use ' + '``mmseg.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) diff --git a/mmseg/structures/sampler/ohem_pixel_sampler.py b/mmseg/structures/sampler/ohem_pixel_sampler.py new file mode 100644 index 0000000000..a974273cab --- /dev/null +++ b/mmseg/structures/sampler/ohem_pixel_sampler.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_pixel_sampler import BasePixelSampler +from .builder import PIXEL_SAMPLERS + + +@PIXEL_SAMPLERS.register_module() +class OHEMPixelSampler(BasePixelSampler): + """Online Hard Example Mining Sampler for segmentation. + + Args: + context (nn.Module): The context of sampler, subclass of + :obj:`BaseDecodeHead`. + thresh (float, optional): The threshold for hard example selection. + Below which, are prediction with low confidence. If not + specified, the hard examples will be pixels of top ``min_kept`` + loss. Default: None. + min_kept (int, optional): The minimum number of predictions to keep. + Default: 100000. + """ + + def __init__(self, context, thresh=None, min_kept=100000): + super().__init__() + self.context = context + assert min_kept > 1 + self.thresh = thresh + self.min_kept = min_kept + + def sample(self, seg_logit, seg_label): + """Sample pixels that have high loss or with low prediction confidence. + + Args: + seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) + seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) + + Returns: + torch.Tensor: segmentation weight, shape (N, H, W) + """ + with torch.no_grad(): + assert seg_logit.shape[2:] == seg_label.shape[2:] + assert seg_label.shape[1] == 1 + seg_label = seg_label.squeeze(1).long() + batch_kept = self.min_kept * seg_label.size(0) + valid_mask = seg_label != self.context.ignore_index + seg_weight = seg_logit.new_zeros(size=seg_label.size()) + valid_seg_weight = seg_weight[valid_mask] + if self.thresh is not None: + seg_prob = F.softmax(seg_logit, dim=1) + + tmp_seg_label = seg_label.clone().unsqueeze(1) + tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 + seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) + sort_prob, sort_indices = seg_prob[valid_mask].sort() + + if sort_prob.numel() > 0: + min_threshold = sort_prob[min(batch_kept, + sort_prob.numel() - 1)] + else: + min_threshold = 0.0 + threshold = max(min_threshold, self.thresh) + valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. + else: + if not isinstance(self.context.loss_decode, nn.ModuleList): + losses_decode = [self.context.loss_decode] + else: + losses_decode = self.context.loss_decode + losses = 0.0 + for loss_module in losses_decode: + losses += loss_module( + seg_logit, + seg_label, + weight=None, + ignore_index=self.context.ignore_index, + reduction_override='none') + + # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa + _, sort_indices = losses[valid_mask].sort(descending=True) + valid_seg_weight[sort_indices[:batch_kept]] = 1. + + seg_weight[valid_mask] = valid_seg_weight + + return seg_weight diff --git a/mmseg/structures/seg_data_sample.py b/mmseg/structures/seg_data_sample.py new file mode 100644 index 0000000000..ce68b54743 --- /dev/null +++ b/mmseg/structures/seg_data_sample.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.structures import BaseDataElement, PixelData + + +class SegDataSample(BaseDataElement): + """A data structure interface of MMSegmentation. They are used as + interfaces between different components. + + The attributes in ``SegDataSample`` are divided into several parts: + + - ``gt_sem_seg``(PixelData): Ground truth of semantic segmentation. + - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation. + - ``seg_logits``(PixelData): Predicted logits of semantic segmentation. + + Examples: + >>> import torch + >>> import numpy as np + >>> from mmengine.structures import PixelData + >>> from mmseg.structures import SegDataSample + + >>> data_sample = SegDataSample() + >>> img_meta = dict(img_shape=(4, 4, 3), + ... pad_shape=(4, 4, 3)) + >>> gt_segmentations = PixelData(metainfo=img_meta) + >>> gt_segmentations.data = torch.randint(0, 2, (1, 4, 4)) + >>> data_sample.gt_sem_seg = gt_segmentations + >>> assert 'img_shape' in data_sample.gt_sem_seg.metainfo_keys() + >>> data_sample.gt_sem_seg.shape + (4, 4) + >>> print(data_sample) + + ) at 0x1c2aae44d60> + + >>> data_sample = SegDataSample() + >>> gt_sem_seg_data = dict(sem_seg=torch.rand(1, 4, 4)) + >>> gt_sem_seg = PixelData(**gt_sem_seg_data) + >>> data_sample.gt_sem_seg = gt_sem_seg + >>> assert 'gt_sem_seg' in data_sample + >>> assert 'sem_seg' in data_sample.gt_sem_seg + """ + + @property + def gt_sem_seg(self) -> PixelData: + return self._gt_sem_seg + + @gt_sem_seg.setter + def gt_sem_seg(self, value: PixelData) -> None: + self.set_field(value, '_gt_sem_seg', dtype=PixelData) + + @gt_sem_seg.deleter + def gt_sem_seg(self) -> None: + del self._gt_sem_seg + + @property + def pred_sem_seg(self) -> PixelData: + return self._pred_sem_seg + + @pred_sem_seg.setter + def pred_sem_seg(self, value: PixelData) -> None: + self.set_field(value, '_pred_sem_seg', dtype=PixelData) + + @pred_sem_seg.deleter + def pred_sem_seg(self) -> None: + del self._pred_sem_seg + + @property + def seg_logits(self) -> PixelData: + return self._seg_logits + + @seg_logits.setter + def seg_logits(self, value: PixelData) -> None: + self.set_field(value, '_seg_logits', dtype=PixelData) + + @seg_logits.deleter + def seg_logits(self) -> None: + del self._seg_logits diff --git a/mmseg/utils/__init__.py b/mmseg/utils/__init__.py index e7d28670e0..1048e8b50f 100644 --- a/mmseg/utils/__init__.py +++ b/mmseg/utils/__init__.py @@ -1,7 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# yapf: disable +from .class_names import (ade_classes, ade_palette, cityscapes_classes, + cityscapes_palette, cocostuff_classes, + cocostuff_palette, dataset_aliases, get_classes, + get_palette, isaid_classes, isaid_palette, + loveda_classes, loveda_palette, potsdam_classes, + potsdam_palette, stare_classes, stare_palette, + synapse_classes, synapse_palette, vaihingen_classes, + vaihingen_palette, voc_classes, voc_palette) +# yapf: enable from .collect_env import collect_env -from .logger import get_root_logger +from .io import datafrombytes +from .misc import add_prefix, stack_batch +from .set_env import register_all_modules +from .typing import (ConfigType, ForwardResults, MultiConfig, OptConfigType, + OptMultiConfig, OptSampleList, SampleList, TensorDict, + TensorList) __all__ = [ - 'get_root_logger', - 'collect_env', + 'collect_env', 'register_all_modules', 'stack_batch', 'add_prefix', + 'ConfigType', 'OptConfigType', 'MultiConfig', 'OptMultiConfig', + 'SampleList', 'OptSampleList', 'TensorDict', 'TensorList', + 'ForwardResults', 'cityscapes_classes', 'ade_classes', 'voc_classes', + 'cocostuff_classes', 'loveda_classes', 'potsdam_classes', + 'vaihingen_classes', 'isaid_classes', 'stare_classes', 'synapse_classes', + 'cityscapes_palette', 'ade_palette', 'voc_palette', 'cocostuff_palette', + 'loveda_palette', 'potsdam_palette', 'vaihingen_palette', 'isaid_palette', + 'stare_palette', 'synapse_palette', 'dataset_aliases', 'get_classes', + 'get_palette', 'datafrombytes' ] diff --git a/mmseg/utils/class_names.py b/mmseg/utils/class_names.py new file mode 100644 index 0000000000..33fe0238f0 --- /dev/null +++ b/mmseg/utils/class_names.py @@ -0,0 +1,352 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.utils import is_str + + +def cityscapes_classes(): + """Cityscapes class names for external use.""" + return [ + 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle' + ] + + +def ade_classes(): + """ADE20K class names for external use.""" + return [ + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag' + ] + + +def voc_classes(): + """Pascal VOC class names for external use.""" + return [ + 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor' + ] + + +def cocostuff_classes(): + """CocoStuff class names for external use.""" + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', + 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', + 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', + 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', + 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', + 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', + 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', + 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', + 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', + 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', + 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', + 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', + 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', + 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', + 'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable', + 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel', + 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', + 'window-blind', 'window-other', 'wood' + ] + + +def loveda_classes(): + """LoveDA class names for external use.""" + return [ + 'background', 'building', 'road', 'water', 'barren', 'forest', + 'agricultural' + ] + + +def potsdam_classes(): + """Potsdam class names for external use.""" + return [ + 'impervious_surface', 'building', 'low_vegetation', 'tree', 'car', + 'clutter' + ] + + +def vaihingen_classes(): + """Vaihingen class names for external use.""" + return [ + 'impervious_surface', 'building', 'low_vegetation', 'tree', 'car', + 'clutter' + ] + + +def isaid_classes(): + """iSAID class names for external use.""" + return [ + 'background', 'ship', 'store_tank', 'baseball_diamond', 'tennis_court', + 'basketball_court', 'Ground_Track_Field', 'Bridge', 'Large_Vehicle', + 'Small_Vehicle', 'Helicopter', 'Swimming_pool', 'Roundabout', + 'Soccer_ball_field', 'plane', 'Harbor' + ] + + +def stare_classes(): + """stare class names for external use.""" + return ['background', 'vessel'] + + +def synapse_classes(): + """synapse class names for external use.""" + return [ + 'background', 'aorta', 'gallbladder', 'left_kidney', 'right_kidney', + 'liver', 'pancreas', 'spleen', 'stomach' + ] + + +def cityscapes_palette(): + """Cityscapes palette for external use.""" + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + +def ade_palette(): + """ADE20K palette for external use.""" + return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + + +def voc_palette(): + """Pascal VOC palette for external use.""" + return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + +def cocostuff_palette(): + """CocoStuff palette for external use.""" + return [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], + [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], + [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], + [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], + [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], + [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], + [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0], + [0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0], + [192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32], + [0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128], + [128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64], + [192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32], + [64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0], + [0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64], + [128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32], + [64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128], + [128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0], + [128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96], + [64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0], + [0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0], + [192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96], + [64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128], + [128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64], + [192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96], + [0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0], + [64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64], + [128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96], + [0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128], + [192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0], + [128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32], + [0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64], + [64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0], + [192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32], + [0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192], + [192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64], + [192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32], + [64, 224, 64], [64, 0, 64], [128, 192, 160], [64, 96, 64], + [64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64], + [128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32], + [64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192], + [192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0], + [128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96], + [64, 160, 64], [64, 64, 0]] + + +def loveda_palette(): + """LoveDA palette for external use.""" + return [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255], + [159, 129, 183], [0, 255, 0], [255, 195, 128]] + + +def potsdam_palette(): + """Potsdam palette for external use.""" + return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + +def vaihingen_palette(): + """Vaihingen palette for external use.""" + return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + +def isaid_palette(): + """iSAID palette for external use.""" + return [[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127], + [0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127, + 127], [0, 0, 127], + [0, 0, 191], [0, 0, 255], [0, 191, 127], [0, 127, 191], + [0, 127, 255], [0, 100, 155]] + + +def stare_palette(): + """STARE palette for external use.""" + return [[120, 120, 120], [6, 230, 230]] + + +def lip_classes(): + """LIP class names for external use.""" + return [ + 'background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', + 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', + 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', + 'rightShoe' + ] + + +def lip_palette(): + """LIP palette for external use.""" + return [ + 'Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'UpperClothes', + 'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', + 'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe', + 'Right-shoe' + ] + + +def synapse_palette(): + """synapse palette for external use.""" + return [[0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], [0, 255, 255], + [255, 0, 255], [255, 255, 0], [60, 255, 255], [240, 240, 240]] + + +dataset_aliases = { + 'cityscapes': ['cityscapes'], + 'ade': ['ade', 'ade20k'], + 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'], + 'loveda': ['loveda'], + 'potsdam': ['potsdam'], + 'vaihingen': ['vaihingen'], + 'cocostuff': [ + 'cocostuff', 'cocostuff10k', 'cocostuff164k', 'coco-stuff', + 'coco-stuff10k', 'coco-stuff164k', 'coco_stuff', 'coco_stuff10k', + 'coco_stuff164k' + ], + 'isaid': ['isaid', 'iSAID'], + 'stare': ['stare', 'STARE'], + 'lip': ['LIP', 'lip'], + 'synapse': ['synapse'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels + + +def get_palette(dataset): + """Get class palette (RGB) of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_palette()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels diff --git a/mmseg/utils/collect_env.py b/mmseg/utils/collect_env.py index 8b82019668..d5d6ea2902 100644 --- a/mmseg/utils/collect_env.py +++ b/mmseg/utils/collect_env.py @@ -1,70 +1,18 @@ -import os.path as osp -import subprocess -import sys -from collections import defaultdict - -import cv2 -import mmcv -import torch -import torchvision -from mmcv.utils.parrots_wrapper import get_build_config +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.utils import get_git_hash +from mmengine.utils.dl_utils import collect_env as collect_base_env import mmseg def collect_env(): """Collect the information of the running environments.""" - env_info = {} - env_info['sys.platform'] = sys.platform - env_info['Python'] = sys.version.replace('\n', '') - - cuda_available = torch.cuda.is_available() - env_info['CUDA available'] = cuda_available - - if cuda_available: - from mmcv.utils.parrots_wrapper import CUDA_HOME - env_info['CUDA_HOME'] = CUDA_HOME - - if CUDA_HOME is not None and osp.isdir(CUDA_HOME): - try: - nvcc = osp.join(CUDA_HOME, 'bin/nvcc') - nvcc = subprocess.check_output( - '"{}" -V | tail -n1'.format(nvcc), shell=True) - nvcc = nvcc.decode('utf-8').strip() - except subprocess.SubprocessError: - nvcc = 'Not Available' - env_info['NVCC'] = nvcc - - devices = defaultdict(list) - for k in range(torch.cuda.device_count()): - devices[torch.cuda.get_device_name(k)].append(str(k)) - for name, devids in devices.items(): - env_info['GPU ' + ','.join(devids)] = name - - gcc = subprocess.check_output('gcc --version | head -n1', shell=True) - gcc = gcc.decode('utf-8').strip() - env_info['GCC'] = gcc - - env_info['PyTorch'] = torch.__version__ - env_info['PyTorch compiling details'] = get_build_config() - - env_info['TorchVision'] = torchvision.__version__ - - env_info['OpenCV'] = cv2.__version__ - - env_info['MMCV'] = mmcv.__version__ - env_info['MMSegmentation'] = mmseg.__version__ - try: - from mmcv.ops import get_compiler_version, get_compiling_cuda_version - env_info['MMCV Compiler'] = get_compiler_version() - env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() - except ImportError: - env_info['MMCV Compiler'] = 'n/a' - env_info['MMCV CUDA Compiler'] = 'n/a' + env_info = collect_base_env() + env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' return env_info if __name__ == '__main__': for name, val in collect_env().items(): - print('{}: {}'.format(name, val)) + print(f'{name}: {val}') diff --git a/mmseg/utils/io.py b/mmseg/utils/io.py new file mode 100644 index 0000000000..d03517401c --- /dev/null +++ b/mmseg/utils/io.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gzip +import io +import pickle + +import numpy as np + + +def datafrombytes(content: bytes, backend: str = 'numpy') -> np.ndarray: + """Data decoding from bytes. + + Args: + content (bytes): The data bytes got from files or other streams. + backend (str): The data decoding backend type. Options are 'numpy', + 'nifti' and 'pickle'. Defaults to 'numpy'. + + Returns: + numpy.ndarray: Loaded data array. + """ + if backend == 'pickle': + data = pickle.loads(content) + else: + with io.BytesIO(content) as f: + if backend == 'nifti': + f = gzip.open(f) + try: + from nibabel import FileHolder, Nifti1Image + except ImportError: + print('nifti files io depends on nibabel, please run' + '`pip install nibabel` to install it') + fh = FileHolder(fileobj=f) + data = Nifti1Image.from_file_map({'header': fh, 'image': fh}) + data = Nifti1Image.from_bytes(data.to_bytes()).get_fdata() + elif backend == 'numpy': + data = np.load(f) + else: + raise ValueError + return data diff --git a/mmseg/utils/logger.py b/mmseg/utils/logger.py deleted file mode 100644 index 05d2f13439..0000000000 --- a/mmseg/utils/logger.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from mmcv.utils import get_logger - - -def get_root_logger(log_file=None, log_level=logging.INFO): - """Get the root logger. - - The logger will be initialized if it has not been initialized. By default a - StreamHandler will be added. If `log_file` is specified, a FileHandler will - also be added. The name of the root logger is the top-level package name, - e.g., "mmseg". - - Args: - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the root logger. - log_level (int): The root logger level. Note that only the process of - rank 0 is affected, while other processes will set the level to - "Error" and be silent most of the time. - - Returns: - logging.Logger: The root logger. - """ - - logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) - - return logger diff --git a/mmseg/utils/misc.py b/mmseg/utils/misc.py new file mode 100644 index 0000000000..aa30893609 --- /dev/null +++ b/mmseg/utils/misc.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from .typing import SampleList + + +def add_prefix(inputs, prefix): + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs + + +def stack_batch(inputs: List[torch.Tensor], + data_samples: Optional[SampleList] = None, + size: Optional[tuple] = None, + size_divisor: Optional[int] = None, + pad_val: Union[int, float] = 0, + seg_pad_val: Union[int, float] = 255) -> torch.Tensor: + """Stack multiple inputs to form a batch and pad the images and gt_sem_segs + to the max shape use the right bottom padding mode. + + Args: + inputs (List[Tensor]): The input multiple tensors. each is a + CHW 3D-tensor. + data_samples (list[:obj:`SegDataSample`]): The list of data samples. + It usually includes information such as `gt_sem_seg`. + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (int, float): The padding value. Defaults to 0 + seg_pad_val (int, float): The padding value. Defaults to 255 + + Returns: + Tensor: The 4D-tensor. + List[:obj:`SegDataSample`]: After the padding of the gt_seg_map. + """ + assert isinstance(inputs, list), \ + f'Expected input type to be list, but got {type(inputs)}' + assert len({tensor.ndim for tensor in inputs}) == 1, \ + f'Expected the dimensions of all inputs must be the same, ' \ + f'but got {[tensor.ndim for tensor in inputs]}' + assert inputs[0].ndim == 3, f'Expected tensor dimension to be 3, ' \ + f'but got {inputs[0].ndim}' + assert len({tensor.shape[0] for tensor in inputs}) == 1, \ + f'Expected the channels of all inputs must be the same, ' \ + f'but got {[tensor.shape[0] for tensor in inputs]}' + + # only one of size and size_divisor should be valid + assert (size is not None) ^ (size_divisor is not None), \ + 'only one of size and size_divisor should be valid' + + padded_inputs = [] + padded_samples = [] + inputs_sizes = [(img.shape[-2], img.shape[-1]) for img in inputs] + max_size = np.stack(inputs_sizes).max(0) + if size_divisor is not None and size_divisor > 1: + # the last two dims are H,W, both subject to divisibility requirement + max_size = (max_size + + (size_divisor - 1)) // size_divisor * size_divisor + + for i in range(len(inputs)): + tensor = inputs[i] + if size is not None: + width = max(size[-1] - tensor.shape[-1], 0) + height = max(size[-2] - tensor.shape[-2], 0) + # (padding_left, padding_right, padding_top, padding_bottom) + padding_size = (0, width, 0, height) + elif size_divisor is not None: + width = max(max_size[-1] - tensor.shape[-1], 0) + height = max(max_size[-2] - tensor.shape[-2], 0) + padding_size = (0, width, 0, height) + else: + padding_size = [0, 0, 0, 0] + + # pad img + pad_img = F.pad(tensor, padding_size, value=pad_val) + padded_inputs.append(pad_img) + # pad gt_sem_seg + if data_samples is not None: + data_sample = data_samples[i] + gt_sem_seg = data_sample.gt_sem_seg.data + del data_sample.gt_sem_seg.data + data_sample.gt_sem_seg.data = F.pad( + gt_sem_seg, padding_size, value=seg_pad_val) + data_sample.set_metainfo({ + 'img_shape': tensor.shape[-2:], + 'pad_shape': data_sample.gt_sem_seg.shape, + 'padding_size': padding_size + }) + padded_samples.append(data_sample) + else: + padded_samples.append( + dict( + img_padding_size=padding_size, + pad_shape=pad_img.shape[-2:])) + + return torch.stack(padded_inputs, dim=0), padded_samples diff --git a/mmseg/utils/set_env.py b/mmseg/utils/set_env.py new file mode 100644 index 0000000000..c948950d62 --- /dev/null +++ b/mmseg/utils/set_env.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import warnings + +from mmengine import DefaultScope + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmseg into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmseg default scope. + When `init_default_scope=True`, the global default scope will be + set to `mmseg`, and all registries will build modules from mmseg's + registry node. To understand more about the registry, please refer + to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa + import mmseg.datasets # noqa: F401,F403 + import mmseg.engine # noqa: F401,F403 + import mmseg.evaluation # noqa: F401,F403 + import mmseg.models # noqa: F401,F403 + import mmseg.structures # noqa: F401,F403 + + if init_default_scope: + never_created = DefaultScope.get_current_instance() is None \ + or not DefaultScope.check_instance_created('mmseg') + if never_created: + DefaultScope.get_instance('mmseg', scope_name='mmseg') + return + current_scope = DefaultScope.get_current_instance() + if current_scope.scope_name != 'mmseg': + warnings.warn('The current default scope ' + f'"{current_scope.scope_name}" is not "mmseg", ' + '`register_all_modules` will force the current' + 'default scope to be "mmseg". If this is not ' + 'expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmseg-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmseg') diff --git a/mmseg/utils/typing.py b/mmseg/utils/typing.py new file mode 100644 index 0000000000..fba7d3b92b --- /dev/null +++ b/mmseg/utils/typing.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Collecting some commonly used type hint in mmflow.""" +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +from mmengine.config import ConfigDict + +from mmseg.structures import SegDataSample + +# Type hint of config data +ConfigType = Union[ConfigDict, dict] +OptConfigType = Optional[ConfigType] +# Type hint of one or more config data +MultiConfig = Union[ConfigType, Sequence[ConfigType]] +OptMultiConfig = Optional[MultiConfig] + +SampleList = Sequence[SegDataSample] +OptSampleList = Optional[SampleList] + +# Type hint of Tensor +TensorDict = Dict[str, torch.Tensor] +TensorList = Sequence[torch.Tensor] + +ForwardResults = Union[Dict[str, torch.Tensor], List[SegDataSample], + Tuple[torch.Tensor], torch.Tensor] diff --git a/mmseg/version.py b/mmseg/version.py new file mode 100644 index 0000000000..f6ccff6019 --- /dev/null +++ b/mmseg/version.py @@ -0,0 +1,18 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '1.0.0rc2' + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/mmseg/visualization/__init__.py b/mmseg/visualization/__init__.py new file mode 100644 index 0000000000..8cbb211e52 --- /dev/null +++ b/mmseg/visualization/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .local_visualizer import SegLocalVisualizer + +__all__ = ['SegLocalVisualizer'] diff --git a/mmseg/visualization/local_visualizer.py b/mmseg/visualization/local_visualizer.py new file mode 100644 index 0000000000..070b06b73b --- /dev/null +++ b/mmseg/visualization/local_visualizer.py @@ -0,0 +1,181 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import mmcv +import numpy as np +from mmengine.dist import master_only +from mmengine.structures import PixelData +from mmengine.visualization import Visualizer + +from mmseg.registry import VISUALIZERS +from mmseg.structures import SegDataSample + + +@VISUALIZERS.register_module() +class SegLocalVisualizer(Visualizer): + """Local Visualizer. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to None. + vis_backends (list, optional): Visual backend config list. + Defaults to None. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + alpha (int, float): The transparency of segmentation mask. + Defaults to 0.8. + + Examples: + >>> import numpy as np + >>> import torch + >>> from mmengine.structures import PixelData + >>> from mmseg.data import SegDataSample + >>> from mmseg.engine.visualization import SegLocalVisualizer + + >>> seg_local_visualizer = SegLocalVisualizer() + >>> image = np.random.randint(0, 256, + ... size=(10, 12, 3)).astype('uint8') + >>> gt_sem_seg_data = dict(data=torch.randint(0, 2, (1, 10, 12))) + >>> gt_sem_seg = PixelData(**gt_sem_seg_data) + >>> gt_seg_data_sample = SegDataSample() + >>> gt_seg_data_sample.gt_sem_seg = gt_sem_seg + >>> seg_local_visualizer.dataset_meta = dict( + >>> classes=('background', 'foreground'), + >>> palette=[[120, 120, 120], [6, 230, 230]]) + >>> seg_local_visualizer.add_datasample('visualizer_example', + ... image, gt_seg_data_sample) + >>> seg_local_visualizer.add_datasample( + ... 'visualizer_example', image, + ... gt_seg_data_sample, show=True) + """ + + def __init__(self, + name: str = 'visualizer', + image: Optional[np.ndarray] = None, + vis_backends: Optional[Dict] = None, + save_dir: Optional[str] = None, + alpha: float = 0.8, + **kwargs): + super().__init__(name, image, vis_backends, save_dir, **kwargs) + self.alpha = alpha + # Set default value. When calling + # `SegLocalVisualizer().dataset_meta=xxx`, + # it will override the default value. + self.dataset_meta = {} + + def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData, + classes: Optional[Tuple[str]], + palette: Optional[List[List[int]]]) -> np.ndarray: + """Draw semantic seg of GT or prediction. + + Args: + image (np.ndarray): The image to draw. + sem_seg (:obj:`PixelData`): Data structure for + pixel-level annotations or predictions. + classes (Tuple[str], optional): Category information. + palette (List[List[int]], optional): The palette of + segmentation map. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + num_classes = len(classes) + + sem_seg = sem_seg.cpu().data + ids = np.unique(sem_seg)[::-1] + legal_indices = ids < num_classes + ids = ids[legal_indices] + labels = np.array(ids, dtype=np.int64) + + colors = [palette[label] for label in labels] + + self.set_image(image) + + # draw semantic masks + for label, color in zip(labels, colors): + self.draw_binary_masks( + sem_seg == label, colors=[color], alphas=self.alpha) + + return self.get_image() + + @master_only + def add_datasample( + self, + name: str, + image: np.ndarray, + data_sample: Optional[SegDataSample] = None, + draw_gt: bool = True, + draw_pred: bool = True, + show: bool = False, + wait_time: float = 0, + # TODO: Supported in mmengine's Viusalizer. + out_file: Optional[str] = None, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are + displayed in a stitched image where the left image is the + ground truth and the right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and + the images will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be + saved to ``out_file``. it is usually used when the display + is not available. + + Args: + name (str): The image identifier. + image (np.ndarray): The image to draw. + gt_sample (:obj:`SegDataSample`, optional): GT SegDataSample. + Defaults to None. + pred_sample (:obj:`SegDataSample`, optional): Prediction + SegDataSample. Defaults to None. + draw_gt (bool): Whether to draw GT SegDataSample. Default to True. + draw_pred (bool): Whether to draw Prediction SegDataSample. + Defaults to True. + show (bool): Whether to display the drawn image. Default to False. + wait_time (float): The interval of show (s). Defaults to 0. + out_file (str): Path to output file. Defaults to None. + step (int): Global step value to record. Defaults to 0. + """ + classes = self.dataset_meta.get('classes', None) + palette = self.dataset_meta.get('palette', None) + + gt_img_data = None + pred_img_data = None + + if draw_gt and data_sample is not None and 'gt_sem_seg' in data_sample: + gt_img_data = image + assert classes is not None, 'class information is ' \ + 'not provided when ' \ + 'visualizing semantic ' \ + 'segmentation results.' + gt_img_data = self._draw_sem_seg(gt_img_data, + data_sample.gt_sem_seg, classes, + palette) + + if (draw_pred and data_sample is not None + and 'pred_sem_seg' in data_sample): + pred_img_data = image + assert classes is not None, 'class information is ' \ + 'not provided when ' \ + 'visualizing semantic ' \ + 'segmentation results.' + pred_img_data = self._draw_sem_seg(pred_img_data, + data_sample.pred_sem_seg, + classes, palette) + + if gt_img_data is not None and pred_img_data is not None: + drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) + elif gt_img_data is not None: + drawn_img = gt_img_data + else: + drawn_img = pred_img_data + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + mmcv.imwrite(drawn_img, out_file) + else: + self.add_image(name, drawn_img, step) diff --git a/model-index.yml b/model-index.yml new file mode 100644 index 0000000000..ae96bd30f7 --- /dev/null +++ b/model-index.yml @@ -0,0 +1,48 @@ +Import: +- configs/ann/ann.yml +- configs/apcnet/apcnet.yml +- configs/beit/beit.yml +- configs/bisenetv1/bisenetv1.yml +- configs/bisenetv2/bisenetv2.yml +- configs/ccnet/ccnet.yml +- configs/cgnet/cgnet.yml +- configs/convnext/convnext.yml +- configs/danet/danet.yml +- configs/deeplabv3/deeplabv3.yml +- configs/deeplabv3plus/deeplabv3plus.yml +- configs/dmnet/dmnet.yml +- configs/dnlnet/dnlnet.yml +- configs/dpt/dpt.yml +- configs/emanet/emanet.yml +- configs/encnet/encnet.yml +- configs/erfnet/erfnet.yml +- configs/fastfcn/fastfcn.yml +- configs/fastscnn/fastscnn.yml +- configs/fcn/fcn.yml +- configs/gcnet/gcnet.yml +- configs/hrnet/hrnet.yml +- configs/icnet/icnet.yml +- configs/isanet/isanet.yml +- configs/knet/knet.yml +- configs/mae/mae.yml +- configs/mask2former/mask2former.yml +- configs/maskformer/maskformer.yml +- configs/mobilenet_v2/mobilenet_v2.yml +- configs/mobilenet_v3/mobilenet_v3.yml +- configs/nonlocal_net/nonlocal_net.yml +- configs/ocrnet/ocrnet.yml +- configs/point_rend/point_rend.yml +- configs/poolformer/poolformer.yml +- configs/psanet/psanet.yml +- configs/pspnet/pspnet.yml +- configs/resnest/resnest.yml +- configs/segformer/segformer.yml +- configs/segmenter/segmenter.yml +- configs/sem_fpn/sem_fpn.yml +- configs/setr/setr.yml +- configs/stdc/stdc.yml +- configs/swin/swin.yml +- configs/twins/twins.yml +- configs/unet/unet.yml +- configs/upernet/upernet.yml +- configs/vit/vit.yml diff --git a/projects/README.md b/projects/README.md new file mode 100644 index 0000000000..40d515eda3 --- /dev/null +++ b/projects/README.md @@ -0,0 +1,9 @@ +# Projects + +Implementing new models and features into OpenMMLab's algorithm libraries could be troublesome due to the rigorous requirements on code quality, which could hinder the fast iteration of SOTA models and might discourage our members from sharing their latest outcomes here. + +And that's why we have this `Projects/` folder now, where some experimental features, frameworks and models are placed, only needed to satisfy the minimum requirement on the code quality, and can be used as standalone libraries. Users are welcome to use them if they [use MMSegmentation from source](https://mmsegmentation.readthedocs.io/en/dev-1.x/get_started.html#best-practices). + +Everyone is welcome to post their implementation of any great ideas in this folder! If you wish to start your own project, please go through the [example project](example_project/) for the best practice. + +Note: The core maintainers of MMSegmentation only ensure the results are reproducible and the code quality meets its claim at the time each project was submitted, but they may not be responsible for future maintenance. The original authors take responsibility for maintaining their own projects. diff --git a/projects/example_project/README.md b/projects/example_project/README.md new file mode 100644 index 0000000000..27ca5d4e2a --- /dev/null +++ b/projects/example_project/README.md @@ -0,0 +1,129 @@ +# Dummy ResNet Wrapper + +This is an example README for community `projects/`. We have provided detailed explanations for each field in the form of html comments, which are visible when you read the source of this README file. If you wish to submit your project to our main repository, then all the fields in this README are mandatory for others to understand what you have achieved in this implementation. For more details, read our [contribution guide](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/.github/CONTRIBUTING.md) or approach us in [Discussions](https://github.com/open-mmlab/mmsegmentation/discussions). + +## Description + + + +This project implements a dummy ResNet wrapper, which literally does nothing new but prints "hello world" during initialization. + +## Usage + + + +### Prerequisites + +- Python 3.7 +- PyTorch 1.6 or higher +- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) v1.0.0rc2 or higher + +All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`: + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Training commands + +```shell +mim train mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet +``` + +To train on multiple GPUs, e.g. 8 GPUs, run the following command: + +```shell +mim train mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet --launcher pytorch --gpus 8 +``` + +### Testing commands + +```shell +mim test mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet --checkpoint ${CHECKPOINT_PATH} +``` + + + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [config](configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) | + +## Citation + + + +```bibtex +@misc{mmseg2020, + title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark}, + author={MMSegmentation Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}}, + year={2020} +} +``` + +## Checklist + +Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. + + + +- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [ ] Finish the code + + + + - [ ] Basic docstrings & proper citation + + + + - [ ] Test-time correctness + + + + - [ ] A full README + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/example_project/configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py b/projects/example_project/configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py new file mode 100644 index 0000000000..b0ec67b691 --- /dev/null +++ b/projects/example_project/configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py @@ -0,0 +1,8 @@ +_base_ = ['../../../configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py'] + +custom_imports = dict(imports=['projects.example_project.dummy']) + +crop_size = (512, 1024) +data_preprocessor = dict(size=crop_size) +model = dict( + data_preprocessor=data_preprocessor, backbone=dict(type='DummyResNet')) diff --git a/projects/example_project/dummy/__init__.py b/projects/example_project/dummy/__init__.py new file mode 100644 index 0000000000..70df7896d6 --- /dev/null +++ b/projects/example_project/dummy/__init__.py @@ -0,0 +1,3 @@ +from .dummy_resnet import DummyResNet + +__all__ = ['DummyResNet'] diff --git a/projects/example_project/dummy/dummy_resnet.py b/projects/example_project/dummy/dummy_resnet.py new file mode 100644 index 0000000000..a510eafd52 --- /dev/null +++ b/projects/example_project/dummy/dummy_resnet.py @@ -0,0 +1,14 @@ +from mmseg.models.backbones import ResNetV1c +from mmseg.registry import MODELS + + +@MODELS.register_module() +class DummyResNet(ResNetV1c): + """Implements a dummy ResNet wrapper for demonstration purpose. + Args: + **kwargs: All the arguments are passed to the parent class. + """ + + def __init__(self, **kwargs) -> None: + print('Hello world!') + super().__init__(**kwargs) diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 9796e871e7..0000000000 --- a/pytest.ini +++ /dev/null @@ -1,7 +0,0 @@ -[pytest] -addopts = --xdoctest --xdoctest-style=auto -norecursedirs = .git ignore build __pycache__ data docker docs .eggs - -filterwarnings= default - ignore:.*No cfgstr given in Cacher constructor or call.*:Warning - ignore:.*Define the __nice__ method for.*:Warning diff --git a/requirements.txt b/requirements.txt index 6981bd7233..6da5adea75 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ --r requirements/build.txt -r requirements/optional.txt -r requirements/runtime.txt -r requirements/tests.txt diff --git a/requirements/build.txt b/requirements/build.txt deleted file mode 100644 index 2f74f3d17f..0000000000 --- a/requirements/build.txt +++ /dev/null @@ -1,3 +0,0 @@ -# These must be installed before building mmsegmentation -numpy -# torch diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000..8e98c16fc7 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,6 @@ +docutils==0.16.0 +myst-parser +-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx_copybutton +sphinx_markdown_tables diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt new file mode 100644 index 0000000000..d27af8dd0f --- /dev/null +++ b/requirements/mminstall.txt @@ -0,0 +1,4 @@ +mmcls>=1.0.0rc0 +mmcv>=2.0.0rc3,<2.1.0 +mmdet>=3.0.0rc4 +mmengine>=0.1.0,<1.0.0 diff --git a/requirements/optional.txt b/requirements/optional.txt index 47fa593315..5eca649247 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1 +1,2 @@ cityscapesscripts +nibabel diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt new file mode 100644 index 0000000000..af6029b9ad --- /dev/null +++ b/requirements/readthedocs.txt @@ -0,0 +1,5 @@ +mmcv>=2.0.0rc0 +mmengine +prettytable +torch +torchvision diff --git a/requirements/runtime.txt b/requirements/runtime.txt index a03605f66a..3e242581e9 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,4 +1,5 @@ matplotlib numpy -# torch -# torchvision +packaging +prettytable +scipy diff --git a/requirements/tests.txt b/requirements/tests.txt index 400f79cd26..74fc76146d 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,8 +1,6 @@ -asynctest codecov flake8 interrogate -isort==4.3.21 pytest xdoctest>=0.10.0 yapf diff --git a/resources/3dogs.jpg b/resources/3dogs.jpg new file mode 100644 index 0000000000..02ef6fc849 Binary files /dev/null and b/resources/3dogs.jpg differ diff --git a/resources/3dogs_mask.png b/resources/3dogs_mask.png new file mode 100644 index 0000000000..339c2f5da5 Binary files /dev/null and b/resources/3dogs_mask.png differ diff --git a/resources/cascade_encoder_decoder_dataflow.png b/resources/cascade_encoder_decoder_dataflow.png new file mode 100644 index 0000000000..28e33d0527 Binary files /dev/null and b/resources/cascade_encoder_decoder_dataflow.png differ diff --git a/resources/encoder_decoder_dataflow.png b/resources/encoder_decoder_dataflow.png new file mode 100644 index 0000000000..33a8a49163 Binary files /dev/null and b/resources/encoder_decoder_dataflow.png differ diff --git a/resources/test_step.png b/resources/test_step.png new file mode 100644 index 0000000000..4d52351b85 Binary files /dev/null and b/resources/test_step.png differ diff --git a/resources/train_step.png b/resources/train_step.png new file mode 100644 index 0000000000..1e06105a06 Binary files /dev/null and b/resources/train_step.png differ diff --git a/setup.cfg b/setup.cfg index 2102a8ca60..dc5ea07111 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,8 +6,14 @@ split_before_expression_after_opening_paren = true [isort] line_length = 79 multi_line_output = 0 -known_standard_library = setuptools +extra_standard_library = setuptools known_first_party = mmseg -known_third_party = PIL,cityscapesscripts,cv2,matplotlib,mmcv,numpy,pytablewriter,pytest,scipy,torch,torchvision +known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY + +[codespell] +skip = *.po,*.ts,*.ipynb +count = +quiet-level = 3 +ignore-words-list = formating,sur,hist,dota,warmup diff --git a/setup.py b/setup.py index af05f95548..854dd18605 100755 --- a/setup.py +++ b/setup.py @@ -1,13 +1,12 @@ -#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. import os -import subprocess -import time +import os.path as osp +import platform +import shutil +import sys +import warnings from setuptools import find_packages, setup -import torch -from mmcv.utils.parrots_wrapper import (BuildExtension, CppExtension, - CUDAExtension) - def readme(): with open('README.md', encoding='utf-8') as f: @@ -18,99 +17,12 @@ def readme(): version_file = 'mmseg/version.py' -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen( - cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - elif os.path.exists(version_file): - try: - from mmseg.version import __version__ - sha = __version__.split('+')[-1] - except ImportError: - raise ImportError('Unable to get git version') - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} - -__version__ = '{}' -short_version = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('mmseg/VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join(SHORT_VERSION.split('.')) - VERSION = SHORT_VERSION + '+' + sha - - version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION, - VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - def get_version(): - with open(version_file, 'r') as f: + with open(version_file) as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] -def make_cuda_ext(name, module, sources, sources_cuda=[]): - - define_macros = [] - extra_compile_args = {'cxx': []} - - if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': - define_macros += [('WITH_CUDA', None)] - extension = CUDAExtension - extra_compile_args['nvcc'] = [ - '-D__CUDA_NO_HALF_OPERATORS__', - '-D__CUDA_NO_HALF_CONVERSIONS__', - '-D__CUDA_NO_HALF2_OPERATORS__', - ] - sources += sources_cuda - else: - print('Compiling {} without CUDA'.format(name)) - extension = CppExtension - # raise EnvironmentError('CUDA is required to compile MMSegmentation!') - - return extension( - name='{}.{}'.format(module, name), - sources=[os.path.join(*module.split('.'), p) for p in sources], - define_macros=define_macros, - extra_compile_args=extra_compile_args) - - def parse_requirements(fname='requirements.txt', with_version=True): """Parse the package dependencies listed in a requirements file but strips specific versioning information. @@ -125,9 +37,9 @@ def parse_requirements(fname='requirements.txt', with_version=True): CommandLine: python -c "import setup; print(setup.parse_requirements())" """ + import re import sys from os.path import exists - import re require_fpath = fname def parse_line(line): @@ -162,12 +74,11 @@ def parse_line(line): yield info def parse_require_file(fpath): - with open(fpath, 'r') as f: + with open(fpath) as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): - for info in parse_line(line): - yield info + yield from parse_line(line) def gen_packages_items(): if exists(require_fpath): @@ -187,19 +98,86 @@ def gen_packages_items(): return packages +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + # set `copy` mode here since symlink fails on Windows. + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv or \ + platform.system() == 'Windows': + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + # set `copy` mode here since symlink fails with WinError on Windows. + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmseg', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + try: + os.symlink(src_relpath, tar_path) + except OSError: + # Creating a symbolic link on windows may raise an + # `OSError: [WinError 1314]` due to privilege. If + # the error happens, the src file will be copied + mode = 'copy' + warnings.warn( + f'Failed to create a symbolic link for {src_relpath}, ' + f'and it will be copied to {tar_path}') + else: + continue + + if mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + if __name__ == '__main__': - write_version_py() + add_mim_extension() setup( - name='mmseg', + name='mmsegmentation', version=get_version(), description='Open MMLab Semantic Segmentation Toolbox and Benchmark', long_description=readme(), - author='MMSegmentation Authors', + long_description_content_type='text/markdown', + author='MMSegmentation Contributors', author_email='openmmlab@gmail.com', keywords='computer vision, semantic segmentation', url='http://github.com/open-mmlab/mmsegmentation', packages=find_packages(exclude=('configs', 'tools', 'demo')), - package_data={'mmseg.ops': ['*/*.so']}, + include_package_data=True, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', @@ -207,17 +185,15 @@ def gen_packages_items(): 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], license='Apache License 2.0', - setup_requires=parse_requirements('requirements/build.txt'), - tests_require=parse_requirements('requirements/tests.txt'), install_requires=parse_requirements('requirements/runtime.txt'), extras_require={ 'all': parse_requirements('requirements.txt'), 'tests': parse_requirements('requirements/tests.txt'), - 'build': parse_requirements('requirements/build.txt'), 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), }, ext_modules=[], - cmdclass={'build_ext': BuildExtension}, zip_safe=False) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/data/biomedical.nii.gz b/tests/data/biomedical.nii.gz new file mode 100755 index 0000000000..32f3276d9e Binary files /dev/null and b/tests/data/biomedical.nii.gz differ diff --git a/tests/data/biomedical.npy b/tests/data/biomedical.npy new file mode 100644 index 0000000000..481944493d Binary files /dev/null and b/tests/data/biomedical.npy differ diff --git a/tests/data/biomedical.pkl b/tests/data/biomedical.pkl new file mode 100644 index 0000000000..48c32a7cef Binary files /dev/null and b/tests/data/biomedical.pkl differ diff --git a/tests/data/biomedical_ann.nii.gz b/tests/data/biomedical_ann.nii.gz new file mode 100755 index 0000000000..5eae8a4a49 Binary files /dev/null and b/tests/data/biomedical_ann.nii.gz differ diff --git a/tests/data/dataset.json b/tests/data/dataset.json new file mode 100755 index 0000000000..09b01235ec --- /dev/null +++ b/tests/data/dataset.json @@ -0,0 +1,30 @@ +{ + "name": "BRATS", + "description": "Gliomas segmentation tumour and oedema in on brain images", + "tensorImageSize": "4D", + "modality": { + "0": "FLAIR", + "1": "T1w", + "2": "t1gd", + "3": "T2w" + }, + "labels": { + "0": "background", + "1": "edema", + "2": "non-enhancing tumor", + "3": "enhancing tumour" + }, + "numTraining": 484, + "numTest": 266, + "training": [ + { + "image": "./imagesTr/BRATS_457.nii.gz", + "label": "./labelsTr/BRATS_457.nii.gz" + } + ], + "test": [ + "./imagesTs/BRATS_568.nii.gz", + "./imagesTs/BRATS_515.nii.gz", + "./imagesTs/BRATS_576.nii.gz" + ] +} diff --git a/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_instanceIds.png b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_instanceIds.png new file mode 100644 index 0000000000..dfe7aea9b5 Binary files /dev/null and b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_instanceIds.png differ diff --git a/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelIds.png b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelIds.png new file mode 100644 index 0000000000..faab6f5541 Binary files /dev/null and b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelIds.png differ diff --git a/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png new file mode 100644 index 0000000000..659229b924 Binary files /dev/null and b/tests/data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png differ diff --git a/tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png b/tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png new file mode 100644 index 0000000000..2c83ee4f53 Binary files /dev/null and b/tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png differ diff --git a/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1024_1920_instance_color_RGB.png b/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1024_1920_instance_color_RGB.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1536_2432_instance_color_RGB.png b/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1536_2432_instance_color_RGB.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1024_1920.png b/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1024_1920.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1536_2432.png b/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1536_2432.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/pseudo_isaid_dataset/splits/train.txt b/tests/data/pseudo_isaid_dataset/splits/train.txt new file mode 100644 index 0000000000..c310167fe1 --- /dev/null +++ b/tests/data/pseudo_isaid_dataset/splits/train.txt @@ -0,0 +1 @@ +P0000_0_896_1536_2432 diff --git a/tests/data/pseudo_isaid_dataset/splits/val.txt b/tests/data/pseudo_isaid_dataset/splits/val.txt new file mode 100644 index 0000000000..aeff0ee339 --- /dev/null +++ b/tests/data/pseudo_isaid_dataset/splits/val.txt @@ -0,0 +1 @@ +P0000_0_896_1024_1920 diff --git a/tests/data/pseudo_lip_dataset/train_images/684_2150041.jpg b/tests/data/pseudo_lip_dataset/train_images/684_2150041.jpg new file mode 100644 index 0000000000..d6ac13a992 Binary files /dev/null and b/tests/data/pseudo_lip_dataset/train_images/684_2150041.jpg differ diff --git a/tests/data/pseudo_lip_dataset/train_segmentations/684_2150041.png b/tests/data/pseudo_lip_dataset/train_segmentations/684_2150041.png new file mode 100644 index 0000000000..47271e2cab Binary files /dev/null and b/tests/data/pseudo_lip_dataset/train_segmentations/684_2150041.png differ diff --git a/tests/data/pseudo_lip_dataset/val_images/86_185913.jpg b/tests/data/pseudo_lip_dataset/val_images/86_185913.jpg new file mode 100644 index 0000000000..7f66845a7b Binary files /dev/null and b/tests/data/pseudo_lip_dataset/val_images/86_185913.jpg differ diff --git a/tests/data/pseudo_lip_dataset/val_segmentations/86_185913.png b/tests/data/pseudo_lip_dataset/val_segmentations/86_185913.png new file mode 100644 index 0000000000..0708e53902 Binary files /dev/null and b/tests/data/pseudo_lip_dataset/val_segmentations/86_185913.png differ diff --git a/tests/data/pseudo_loveda_dataset/ann_dir/0.png b/tests/data/pseudo_loveda_dataset/ann_dir/0.png new file mode 100644 index 0000000000..7823fd6717 Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/ann_dir/0.png differ diff --git a/tests/data/pseudo_loveda_dataset/ann_dir/1.png b/tests/data/pseudo_loveda_dataset/ann_dir/1.png new file mode 100644 index 0000000000..bc50ac11ec Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/ann_dir/1.png differ diff --git a/tests/data/pseudo_loveda_dataset/ann_dir/2.png b/tests/data/pseudo_loveda_dataset/ann_dir/2.png new file mode 100644 index 0000000000..c182838513 Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/ann_dir/2.png differ diff --git a/tests/data/pseudo_loveda_dataset/img_dir/0.png b/tests/data/pseudo_loveda_dataset/img_dir/0.png new file mode 100644 index 0000000000..03a0652396 Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/img_dir/0.png differ diff --git a/tests/data/pseudo_loveda_dataset/img_dir/1.png b/tests/data/pseudo_loveda_dataset/img_dir/1.png new file mode 100644 index 0000000000..2fe837f228 Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/img_dir/1.png differ diff --git a/tests/data/pseudo_loveda_dataset/img_dir/2.png b/tests/data/pseudo_loveda_dataset/img_dir/2.png new file mode 100644 index 0000000000..b824499402 Binary files /dev/null and b/tests/data/pseudo_loveda_dataset/img_dir/2.png differ diff --git a/tests/data/pseudo_potsdam_dataset/ann_dir/2_10_0_0_512_512.png b/tests/data/pseudo_potsdam_dataset/ann_dir/2_10_0_0_512_512.png new file mode 100644 index 0000000000..6f22278518 Binary files /dev/null and b/tests/data/pseudo_potsdam_dataset/ann_dir/2_10_0_0_512_512.png differ diff --git a/tests/data/pseudo_potsdam_dataset/img_dir/2_10_0_0_512_512.png b/tests/data/pseudo_potsdam_dataset/img_dir/2_10_0_0_512_512.png new file mode 100644 index 0000000000..7821a18621 Binary files /dev/null and b/tests/data/pseudo_potsdam_dataset/img_dir/2_10_0_0_512_512.png differ diff --git a/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png b/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png new file mode 100644 index 0000000000..a22059b58e Binary files /dev/null and b/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png differ diff --git a/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice001.png b/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice001.png new file mode 100644 index 0000000000..a22059b58e Binary files /dev/null and b/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice001.png differ diff --git a/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg b/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg new file mode 100644 index 0000000000..51609926b4 Binary files /dev/null and b/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg differ diff --git a/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice001.jpg b/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice001.jpg new file mode 100644 index 0000000000..e285b8c7f0 Binary files /dev/null and b/tests/data/pseudo_synapse_dataset/img_dir/case0005_slice001.jpg differ diff --git a/tests/data/pseudo_vaihingen_dataset/ann_dir/area1_0_0_512_512.png b/tests/data/pseudo_vaihingen_dataset/ann_dir/area1_0_0_512_512.png new file mode 100644 index 0000000000..f58e18711a Binary files /dev/null and b/tests/data/pseudo_vaihingen_dataset/ann_dir/area1_0_0_512_512.png differ diff --git a/tests/data/pseudo_vaihingen_dataset/img_dir/area1_0_0_512_512.png b/tests/data/pseudo_vaihingen_dataset/img_dir/area1_0_0_512_512.png new file mode 100644 index 0000000000..648be0b65e Binary files /dev/null and b/tests/data/pseudo_vaihingen_dataset/img_dir/area1_0_0_512_512.png differ diff --git a/tests/test_config.py b/tests/test_config.py index 77a0035e55..bd664ed74f 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,11 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. import glob import os from os.path import dirname, exists, isdir, join, relpath -from mmcv import Config +import numpy as np +from mmengine import Config +from mmengine.dataset import Compose from torch import nn from mmseg.models import build_segmentor +from mmseg.utils import register_all_modules def _get_config_directory(): @@ -27,7 +31,7 @@ def test_config_build_segmentor(): """Test that all segmentation models defined in the configs can be initialized.""" config_dpath = _get_config_directory() - print('Found config_dpath = {!r}'.format(config_dpath)) + print(f'Found config_dpath = {config_dpath!r}') config_fpaths = [] # one config each sub folder @@ -38,26 +42,21 @@ def test_config_build_segmentor(): config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1] config_names = [relpath(p, config_dpath) for p in config_fpaths] - print('Using {} config files'.format(len(config_names))) + print(f'Using {len(config_names)} config files') for config_fname in config_names: config_fpath = join(config_dpath, config_fname) config_mod = Config.fromfile(config_fpath) config_mod.model - config_mod.train_cfg - config_mod.test_cfg - print('Building segmentor, config_fpath = {!r}'.format(config_fpath)) + print(f'Building segmentor, config_fpath = {config_fpath!r}') # Remove pretrained keys to allow for testing in an offline environment if 'pretrained' in config_mod.model: config_mod.model['pretrained'] = None - print('building {}'.format(config_fname)) - segmentor = build_segmentor( - config_mod.model, - train_cfg=config_mod.train_cfg, - test_cfg=config_mod.test_cfg) + print(f'building {config_fname}') + segmentor = build_segmentor(config_mod.model) assert segmentor is not None head_config = config_mod.model['decode_head'] @@ -70,31 +69,30 @@ def test_config_data_pipeline(): CommandLine: xdoctest -m tests/test_config.py test_config_build_data_pipeline """ - from mmcv import Config - from mmseg.datasets.pipelines import Compose - import numpy as np + register_all_modules() config_dpath = _get_config_directory() - print('Found config_dpath = {!r}'.format(config_dpath)) + print(f'Found config_dpath = {config_dpath!r}') import glob config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py'))) config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1] config_names = [relpath(p, config_dpath) for p in config_fpaths] - print('Using {} config files'.format(len(config_names))) + print(f'Using {len(config_names)} config files') for config_fname in config_names: config_fpath = join(config_dpath, config_fname) - print( - 'Building data pipeline, config_fpath = {!r}'.format(config_fpath)) + print(f'Building data pipeline, config_fpath = {config_fpath!r}') config_mod = Config.fromfile(config_fpath) # remove loading pipeline load_img_pipeline = config_mod.train_pipeline.pop(0) to_float32 = load_img_pipeline.get('to_float32', False) - config_mod.train_pipeline.pop(0) - config_mod.test_pipeline.pop(0) + del config_mod.train_pipeline[0] + del config_mod.test_pipeline[0] + # remove loading annotation in test pipeline + del config_mod.test_pipeline[-2] train_pipeline = Compose(config_mod.train_pipeline) test_pipeline = Compose(config_mod.test_pipeline) @@ -110,10 +108,10 @@ def test_config_data_pipeline(): img=img, img_shape=img.shape, ori_shape=img.shape, - gt_semantic_seg=seg) - results['seg_fields'] = ['gt_semantic_seg'] + gt_seg_map=seg) + results['seg_fields'] = ['gt_seg_map'] - print('Test training data pipeline: \n{!r}'.format(train_pipeline)) + print(f'Test training data pipeline: \n{train_pipeline!r}') output_results = train_pipeline(results) assert output_results is not None @@ -122,9 +120,8 @@ def test_config_data_pipeline(): ori_filename='test_img.png', img=img, img_shape=img.shape, - ori_shape=img.shape, - ) - print('Test testing data pipeline: \n{!r}'.format(test_pipeline)) + ori_shape=img.shape) + print(f'Test testing data pipeline: \n{test_pipeline!r}') output_results = test_pipeline(results) assert output_results is not None diff --git a/tests/test_data/test_dataset.py b/tests/test_data/test_dataset.py deleted file mode 100644 index ee6d2c47a8..0000000000 --- a/tests/test_data/test_dataset.py +++ /dev/null @@ -1,173 +0,0 @@ -import os.path as osp -from unittest.mock import MagicMock, patch - -import numpy as np -import pytest - -from mmseg.core.evaluation import get_classes, get_palette -from mmseg.datasets import (ADE20KDataset, CityscapesDataset, ConcatDataset, - CustomDataset, PascalVOCDataset, RepeatDataset) - - -def test_classes(): - assert list(CityscapesDataset.CLASSES) == get_classes('cityscapes') - assert list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes( - 'pascal_voc') - assert list( - ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k') - - with pytest.raises(ValueError): - get_classes('unsupported') - - -def test_palette(): - assert CityscapesDataset.PALETTE == get_palette('cityscapes') - assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette( - 'pascal_voc') - assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k') - - with pytest.raises(ValueError): - get_palette('unsupported') - - -@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock) -@patch('mmseg.datasets.CustomDataset.__getitem__', - MagicMock(side_effect=lambda idx: idx)) -def test_dataset_wrapper(): - # CustomDataset.load_annotations = MagicMock() - # CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) - dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[]) - len_a = 10 - dataset_a.img_infos = MagicMock() - dataset_a.img_infos.__len__.return_value = len_a - dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[]) - len_b = 20 - dataset_b.img_infos = MagicMock() - dataset_b.img_infos.__len__.return_value = len_b - - concat_dataset = ConcatDataset([dataset_a, dataset_b]) - assert concat_dataset[5] == 5 - assert concat_dataset[25] == 15 - assert len(concat_dataset) == len(dataset_a) + len(dataset_b) - - repeat_dataset = RepeatDataset(dataset_a, 10) - assert repeat_dataset[5] == 5 - assert repeat_dataset[15] == 5 - assert repeat_dataset[27] == 7 - assert len(repeat_dataset) == 10 * len(dataset_a) - - -def test_custom_dataset(): - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True) - crop_size = (512, 1024) - train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), - ] - test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(128, 256), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) - ] - - # with img_dir and ann_dir - train_dataset = CustomDataset( - train_pipeline, - data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), - img_dir='imgs/', - ann_dir='gts/', - img_suffix='img.jpg', - seg_map_suffix='gt.png') - assert len(train_dataset) == 5 - - # with img_dir, ann_dir, split - train_dataset = CustomDataset( - train_pipeline, - data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), - img_dir='imgs/', - ann_dir='gts/', - img_suffix='img.jpg', - seg_map_suffix='gt.png', - split='splits/train.txt') - assert len(train_dataset) == 4 - - # no data_root - train_dataset = CustomDataset( - train_pipeline, - img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'), - ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts'), - img_suffix='img.jpg', - seg_map_suffix='gt.png') - assert len(train_dataset) == 5 - - # with data_root but img_dir/ann_dir are abs path - train_dataset = CustomDataset( - train_pipeline, - data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), - img_dir=osp.abspath( - osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')), - ann_dir=osp.abspath( - osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts')), - img_suffix='img.jpg', - seg_map_suffix='gt.png') - assert len(train_dataset) == 5 - - # test_mode=True - test_dataset = CustomDataset( - test_pipeline, - img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'), - img_suffix='img.jpg', - test_mode=True) - assert len(test_dataset) == 5 - - # training data get - train_data = train_dataset[0] - assert isinstance(train_data, dict) - - # test data get - test_data = test_dataset[0] - assert isinstance(test_data, dict) - - # get gt seg map - gt_seg_maps = train_dataset.get_gt_seg_maps() - assert len(gt_seg_maps) == 5 - - # evaluation - pseudo_results = [] - for gt_seg_map in gt_seg_maps: - h, w = gt_seg_map.shape - pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w))) - eval_results = train_dataset.evaluate(pseudo_results) - assert isinstance(eval_results, dict) - assert 'mIoU' in eval_results - assert 'mAcc' in eval_results - assert 'aAcc' in eval_results - - # evaluation with CLASSES - train_dataset.CLASSES = tuple(['a'] * 7) - eval_results = train_dataset.evaluate(pseudo_results) - assert isinstance(eval_results, dict) - assert 'mIoU' in eval_results - assert 'mAcc' in eval_results - assert 'aAcc' in eval_results diff --git a/tests/test_data/test_dataset_builder.py b/tests/test_data/test_dataset_builder.py deleted file mode 100644 index c6827e4d17..0000000000 --- a/tests/test_data/test_dataset_builder.py +++ /dev/null @@ -1,192 +0,0 @@ -import math -import os.path as osp - -import pytest -from torch.utils.data import (DistributedSampler, RandomSampler, - SequentialSampler) - -from mmseg.datasets import (DATASETS, ConcatDataset, build_dataloader, - build_dataset) - - -@DATASETS.register_module() -class ToyDataset(object): - - def __init__(self, cnt=0): - self.cnt = cnt - - def __item__(self, idx): - return idx - - def __len__(self): - return 100 - - -def test_build_dataset(): - cfg = dict(type='ToyDataset') - dataset = build_dataset(cfg) - assert isinstance(dataset, ToyDataset) - assert dataset.cnt == 0 - dataset = build_dataset(cfg, default_args=dict(cnt=1)) - assert isinstance(dataset, ToyDataset) - assert dataset.cnt == 1 - - data_root = osp.join(osp.dirname(__file__), '../data/pseudo_dataset') - img_dir = 'imgs/' - ann_dir = 'gts/' - - # We use same dir twice for simplicity - # with ann_dir - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=[img_dir, img_dir], - ann_dir=[ann_dir, ann_dir]) - dataset = build_dataset(cfg) - assert isinstance(dataset, ConcatDataset) - assert len(dataset) == 10 - - # with ann_dir, split - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=img_dir, - ann_dir=ann_dir, - split=['splits/train.txt', 'splits/val.txt']) - dataset = build_dataset(cfg) - assert isinstance(dataset, ConcatDataset) - assert len(dataset) == 5 - - # with ann_dir, split - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=img_dir, - ann_dir=[ann_dir, ann_dir], - split=['splits/train.txt', 'splits/val.txt']) - dataset = build_dataset(cfg) - assert isinstance(dataset, ConcatDataset) - assert len(dataset) == 5 - - # test mode - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=[img_dir, img_dir], - test_mode=True) - dataset = build_dataset(cfg) - assert isinstance(dataset, ConcatDataset) - assert len(dataset) == 10 - - # test mode with splits - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=[img_dir, img_dir], - split=['splits/val.txt', 'splits/val.txt'], - test_mode=True) - dataset = build_dataset(cfg) - assert isinstance(dataset, ConcatDataset) - assert len(dataset) == 2 - - # len(ann_dir) should be zero or len(img_dir) when len(img_dir) > 1 - with pytest.raises(AssertionError): - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=[img_dir, img_dir], - ann_dir=[ann_dir, ann_dir, ann_dir]) - build_dataset(cfg) - - # len(splits) should be zero or len(img_dir) when len(img_dir) > 1 - with pytest.raises(AssertionError): - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=[img_dir, img_dir], - split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt']) - build_dataset(cfg) - - # len(splits) == len(ann_dir) when only len(img_dir) == 1 and len( - # ann_dir) > 1 - with pytest.raises(AssertionError): - cfg = dict( - type='CustomDataset', - pipeline=[], - data_root=data_root, - img_dir=img_dir, - ann_dir=[ann_dir, ann_dir], - split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt']) - build_dataset(cfg) - - -def test_build_dataloader(): - dataset = ToyDataset() - samples_per_gpu = 3 - # dist=True, shuffle=True, 1GPU - dataloader = build_dataloader( - dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2) - assert dataloader.batch_size == samples_per_gpu - assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu)) - assert isinstance(dataloader.sampler, DistributedSampler) - assert dataloader.sampler.shuffle - - # dist=True, shuffle=False, 1GPU - dataloader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=2, - shuffle=False) - assert dataloader.batch_size == samples_per_gpu - assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu)) - assert isinstance(dataloader.sampler, DistributedSampler) - assert not dataloader.sampler.shuffle - - # dist=True, shuffle=True, 8GPU - dataloader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=2, - num_gpus=8) - assert dataloader.batch_size == samples_per_gpu - assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu)) - assert dataloader.num_workers == 2 - - # dist=False, shuffle=True, 1GPU - dataloader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=2, - dist=False) - assert dataloader.batch_size == samples_per_gpu - assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu)) - assert isinstance(dataloader.sampler, RandomSampler) - assert dataloader.num_workers == 2 - - # dist=False, shuffle=False, 1GPU - dataloader = build_dataloader( - dataset, - samples_per_gpu=3, - workers_per_gpu=2, - shuffle=False, - dist=False) - assert dataloader.batch_size == samples_per_gpu - assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu)) - assert isinstance(dataloader.sampler, SequentialSampler) - assert dataloader.num_workers == 2 - - # dist=False, shuffle=True, 8GPU - dataloader = build_dataloader( - dataset, samples_per_gpu=3, workers_per_gpu=2, num_gpus=8, dist=False) - assert dataloader.batch_size == samples_per_gpu * 8 - assert len(dataloader) == int( - math.ceil(len(dataset) / samples_per_gpu / 8)) - assert isinstance(dataloader.sampler, RandomSampler) - assert dataloader.num_workers == 16 diff --git a/tests/test_data/test_loading.py b/tests/test_data/test_loading.py deleted file mode 100644 index 653b3daf4e..0000000000 --- a/tests/test_data/test_loading.py +++ /dev/null @@ -1,100 +0,0 @@ -import copy -import os.path as osp - -import numpy as np - -from mmseg.datasets.pipelines import LoadAnnotations, LoadImageFromFile - - -class TestLoading(object): - - @classmethod - def setup_class(cls): - cls.data_prefix = osp.join(osp.dirname(__file__), '../data') - - def test_load_img(self): - results = dict( - img_prefix=self.data_prefix, img_info=dict(filename='color.jpg')) - transform = LoadImageFromFile() - results = transform(copy.deepcopy(results)) - assert results['filename'] == osp.join(self.data_prefix, 'color.jpg') - assert results['ori_filename'] == 'color.jpg' - assert results['img'].shape == (288, 512, 3) - assert results['img'].dtype == np.uint8 - assert results['img_shape'] == (288, 512, 3) - assert results['ori_shape'] == (288, 512, 3) - assert results['pad_shape'] == (288, 512, 3) - assert results['scale_factor'] == 1.0 - np.testing.assert_equal(results['img_norm_cfg']['mean'], - np.zeros(3, dtype=np.float32)) - assert repr(transform) == transform.__class__.__name__ + \ - "(to_float32=False,color_type='color',imdecode_backend='cv2')" - - # no img_prefix - results = dict( - img_prefix=None, img_info=dict(filename='tests/data/color.jpg')) - transform = LoadImageFromFile() - results = transform(copy.deepcopy(results)) - assert results['filename'] == 'tests/data/color.jpg' - assert results['ori_filename'] == 'tests/data/color.jpg' - assert results['img'].shape == (288, 512, 3) - - # to_float32 - transform = LoadImageFromFile(to_float32=True) - results = transform(copy.deepcopy(results)) - assert results['img'].dtype == np.float32 - - # gray image - results = dict( - img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg')) - transform = LoadImageFromFile() - results = transform(copy.deepcopy(results)) - assert results['img'].shape == (288, 512, 3) - assert results['img'].dtype == np.uint8 - - transform = LoadImageFromFile(color_type='unchanged') - results = transform(copy.deepcopy(results)) - assert results['img'].shape == (288, 512) - assert results['img'].dtype == np.uint8 - np.testing.assert_equal(results['img_norm_cfg']['mean'], - np.zeros(1, dtype=np.float32)) - - def test_load_seg(self): - results = dict( - seg_prefix=self.data_prefix, - ann_info=dict(seg_map='seg.png'), - seg_fields=[]) - transform = LoadAnnotations() - results = transform(copy.deepcopy(results)) - assert results['seg_fields'] == ['gt_semantic_seg'] - assert results['gt_semantic_seg'].shape == (288, 512) - assert results['gt_semantic_seg'].dtype == np.uint8 - assert repr(transform) == transform.__class__.__name__ + \ - "(reduce_zero_label=False,imdecode_backend='pillow')" - - # no img_prefix - results = dict( - seg_prefix=None, - ann_info=dict(seg_map='tests/data/seg.png'), - seg_fields=[]) - transform = LoadAnnotations() - results = transform(copy.deepcopy(results)) - assert results['gt_semantic_seg'].shape == (288, 512) - assert results['gt_semantic_seg'].dtype == np.uint8 - - # reduce_zero_label - transform = LoadAnnotations(reduce_zero_label=True) - results = transform(copy.deepcopy(results)) - assert results['gt_semantic_seg'].shape == (288, 512) - assert results['gt_semantic_seg'].dtype == np.uint8 - - # mmcv backend - results = dict( - seg_prefix=self.data_prefix, - ann_info=dict(seg_map='seg.png'), - seg_fields=[]) - transform = LoadAnnotations(imdecode_backend='pillow') - results = transform(copy.deepcopy(results)) - # this image is saved by PIL - assert results['gt_semantic_seg'].shape == (288, 512) - assert results['gt_semantic_seg'].dtype == np.uint8 diff --git a/tests/test_data/test_transform.py b/tests/test_data/test_transform.py deleted file mode 100644 index 7a1ca0dde3..0000000000 --- a/tests/test_data/test_transform.py +++ /dev/null @@ -1,242 +0,0 @@ -import copy -import os.path as osp - -import mmcv -import numpy as np -import pytest -from mmcv.utils import build_from_cfg -from PIL import Image - -from mmseg.datasets.builder import PIPELINES - - -def test_resize(): - # test assertion if img_scale is a list - with pytest.raises(AssertionError): - transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True) - build_from_cfg(transform, PIPELINES) - - # test assertion if len(img_scale) while ratio_range is not None - with pytest.raises(AssertionError): - transform = dict( - type='Resize', - img_scale=[(1333, 800), (1333, 600)], - ratio_range=(0.9, 1.1), - keep_ratio=True) - build_from_cfg(transform, PIPELINES) - - # test assertion for invalid multiscale_mode - with pytest.raises(AssertionError): - transform = dict( - type='Resize', - img_scale=[(1333, 800), (1333, 600)], - keep_ratio=True, - multiscale_mode='2333') - build_from_cfg(transform, PIPELINES) - - transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True) - resize_module = build_from_cfg(transform, PIPELINES) - - results = dict() - img = mmcv.imread( - osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - - resized_results = resize_module(results.copy()) - assert resized_results['img_shape'] == (750, 1333, 3) - - # test keep_ratio=False - transform = dict( - type='Resize', - img_scale=(1280, 800), - multiscale_mode='value', - keep_ratio=False) - resize_module = build_from_cfg(transform, PIPELINES) - resized_results = resize_module(results.copy()) - assert resized_results['img_shape'] == (800, 1280, 3) - - # test multiscale_mode='range' - transform = dict( - type='Resize', - img_scale=[(1333, 400), (1333, 1200)], - multiscale_mode='range', - keep_ratio=True) - resize_module = build_from_cfg(transform, PIPELINES) - resized_results = resize_module(results.copy()) - assert max(resized_results['img_shape'][:2]) <= 1333 - assert min(resized_results['img_shape'][:2]) >= 400 - assert min(resized_results['img_shape'][:2]) <= 1200 - - # test multiscale_mode='value' - transform = dict( - type='Resize', - img_scale=[(1333, 800), (1333, 400)], - multiscale_mode='value', - keep_ratio=True) - resize_module = build_from_cfg(transform, PIPELINES) - resized_results = resize_module(results.copy()) - assert resized_results['img_shape'] in [(750, 1333, 3), (400, 711, 3)] - - # test multiscale_mode='range' - transform = dict( - type='Resize', - img_scale=(1333, 800), - ratio_range=(0.9, 1.1), - keep_ratio=True) - resize_module = build_from_cfg(transform, PIPELINES) - resized_results = resize_module(results.copy()) - assert max(resized_results['img_shape'][:2]) <= 1333 * 1.1 - - -def test_flip(): - # test assertion for invalid flip_ratio - with pytest.raises(AssertionError): - transform = dict(type='RandomFlip', flip_ratio=1.5) - build_from_cfg(transform, PIPELINES) - - # test assertion for invalid direction - with pytest.raises(AssertionError): - transform = dict( - type='RandomFlip', flip_ratio=1, direction='horizonta') - build_from_cfg(transform, PIPELINES) - - transform = dict(type='RandomFlip', flip_ratio=1) - flip_module = build_from_cfg(transform, PIPELINES) - - results = dict() - img = mmcv.imread( - osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') - original_img = copy.deepcopy(img) - seg = np.array( - Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) - original_seg = copy.deepcopy(seg) - results['img'] = img - results['gt_semantic_seg'] = seg - results['seg_fields'] = ['gt_semantic_seg'] - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - - results = flip_module(results) - - flip_module = build_from_cfg(transform, PIPELINES) - results = flip_module(results) - assert np.equal(original_img, results['img']).all() - assert np.equal(original_seg, results['gt_semantic_seg']).all() - - -def test_random_crop(): - # test assertion for invalid random crop - with pytest.raises(AssertionError): - transform = dict(type='RandomCrop', crop_size=(-1, 0)) - build_from_cfg(transform, PIPELINES) - - results = dict() - img = mmcv.imread( - osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') - seg = np.array( - Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) - results['img'] = img - results['gt_semantic_seg'] = seg - results['seg_fields'] = ['gt_semantic_seg'] - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - - h, w, _ = img.shape - transform = dict(type='RandomCrop', crop_size=(h - 20, w - 20)) - crop_module = build_from_cfg(transform, PIPELINES) - results = crop_module(results) - assert results['img'].shape[:2] == (h - 20, w - 20) - assert results['img_shape'][:2] == (h - 20, w - 20) - assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20) - - -def test_pad(): - # test assertion if both size_divisor and size is None - with pytest.raises(AssertionError): - transform = dict(type='Pad') - build_from_cfg(transform, PIPELINES) - - transform = dict(type='Pad', size_divisor=32) - transform = build_from_cfg(transform, PIPELINES) - results = dict() - img = mmcv.imread( - osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') - original_img = copy.deepcopy(img) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - - results = transform(results) - # original img already divisible by 32 - assert np.equal(results['img'], original_img).all() - img_shape = results['img'].shape - assert img_shape[0] % 32 == 0 - assert img_shape[1] % 32 == 0 - - resize_transform = dict( - type='Resize', img_scale=(1333, 800), keep_ratio=True) - resize_module = build_from_cfg(resize_transform, PIPELINES) - results = resize_module(results) - results = transform(results) - img_shape = results['img'].shape - assert img_shape[0] % 32 == 0 - assert img_shape[1] % 32 == 0 - - -def test_normalize(): - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True) - transform = dict(type='Normalize', **img_norm_cfg) - transform = build_from_cfg(transform, PIPELINES) - results = dict() - img = mmcv.imread( - osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') - original_img = copy.deepcopy(img) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - - results = transform(results) - - mean = np.array(img_norm_cfg['mean']) - std = np.array(img_norm_cfg['std']) - converted_img = (original_img[..., ::-1] - mean) / std - assert np.allclose(results['img'], converted_img) - - -def test_seg_rescale(): - results = dict() - seg = np.array( - Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) - results['gt_semantic_seg'] = seg - results['seg_fields'] = ['gt_semantic_seg'] - h, w = seg.shape - - transform = dict(type='SegRescale', scale_factor=1. / 2) - rescale_module = build_from_cfg(transform, PIPELINES) - rescale_results = rescale_module(results.copy()) - assert rescale_results['gt_semantic_seg'].shape == (h // 2, w // 2) - - transform = dict(type='SegRescale', scale_factor=1) - rescale_module = build_from_cfg(transform, PIPELINES) - rescale_results = rescale_module(results.copy()) - assert rescale_results['gt_semantic_seg'].shape == (h, w) diff --git a/tests/test_datasets/test_dataset.py b/tests/test_datasets/test_dataset.py new file mode 100644 index 0000000000..d973f0dcd4 --- /dev/null +++ b/tests/test_datasets/test_dataset.py @@ -0,0 +1,404 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import tempfile +from unittest.mock import MagicMock + +import pytest + +from mmseg.datasets import (ADE20KDataset, BaseSegDataset, CityscapesDataset, + COCOStuffDataset, DecathlonDataset, ISPRSDataset, + LIPDataset, LoveDADataset, PascalVOCDataset, + PotsdamDataset, SynapseDataset, iSAIDDataset) +from mmseg.registry import DATASETS +from mmseg.utils import get_classes, get_palette + + +def test_classes(): + assert list( + CityscapesDataset.METAINFO['classes']) == get_classes('cityscapes') + assert list(PascalVOCDataset.METAINFO['classes']) == get_classes( + 'voc') == get_classes('pascal_voc') + assert list(ADE20KDataset.METAINFO['classes']) == get_classes( + 'ade') == get_classes('ade20k') + assert list( + COCOStuffDataset.METAINFO['classes']) == get_classes('cocostuff') + assert list(LoveDADataset.METAINFO['classes']) == get_classes('loveda') + assert list(PotsdamDataset.METAINFO['classes']) == get_classes('potsdam') + assert list(ISPRSDataset.METAINFO['classes']) == get_classes('vaihingen') + assert list(iSAIDDataset.METAINFO['classes']) == get_classes('isaid') + assert list(SynapseDataset.METAINFO['classes']) == get_classes('synapse') + + with pytest.raises(ValueError): + get_classes('unsupported') + + +def test_classes_file_path(): + tmp_file = tempfile.NamedTemporaryFile() + classes_path = f'{tmp_file.name}.txt' + train_pipeline = [] + kwargs = dict( + pipeline=train_pipeline, + data_prefix=dict(img_path='./', seg_map_path='./'), + metainfo=dict(classes=classes_path)) + + # classes.txt with full categories + categories = get_classes('cityscapes') + with open(classes_path, 'w') as f: + f.write('\n'.join(categories)) + dataset = CityscapesDataset(**kwargs) + assert list(dataset.metainfo['classes']) == categories + assert dataset.label_map is None + + # classes.txt with sub categories + categories = ['road', 'sidewalk', 'building'] + with open(classes_path, 'w') as f: + f.write('\n'.join(categories)) + dataset = CityscapesDataset(**kwargs) + assert list(dataset.metainfo['classes']) == categories + assert dataset.label_map is not None + + # classes.txt with unknown categories + categories = ['road', 'sidewalk', 'unknown'] + with open(classes_path, 'w') as f: + f.write('\n'.join(categories)) + + with pytest.raises(ValueError): + CityscapesDataset(**kwargs) + + tmp_file.close() + os.remove(classes_path) + assert not osp.exists(classes_path) + + +def test_palette(): + assert CityscapesDataset.METAINFO['palette'] == get_palette('cityscapes') + assert PascalVOCDataset.METAINFO['palette'] == get_palette( + 'voc') == get_palette('pascal_voc') + assert ADE20KDataset.METAINFO['palette'] == get_palette( + 'ade') == get_palette('ade20k') + assert LoveDADataset.METAINFO['palette'] == get_palette('loveda') + assert PotsdamDataset.METAINFO['palette'] == get_palette('potsdam') + assert COCOStuffDataset.METAINFO['palette'] == get_palette('cocostuff') + assert iSAIDDataset.METAINFO['palette'] == get_palette('isaid') + assert SynapseDataset.METAINFO['palette'] == get_palette('synapse') + + with pytest.raises(ValueError): + get_palette('unsupported') + + +def test_custom_dataset(): + + # with 'img_path' and 'seg_map_path' in data_prefix + train_dataset = BaseSegDataset( + data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), + data_prefix=dict( + img_path='imgs/', + seg_map_path='gts/', + ), + img_suffix='img.jpg', + seg_map_suffix='gt.png') + assert len(train_dataset) == 5 + + # with 'img_path' and 'seg_map_path' in data_prefix and ann_file + train_dataset = BaseSegDataset( + data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), + data_prefix=dict( + img_path='imgs/', + seg_map_path='gts/', + ), + img_suffix='img.jpg', + seg_map_suffix='gt.png', + ann_file='splits/train.txt') + assert len(train_dataset) == 4 + + # no data_root + train_dataset = BaseSegDataset( + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/imgs'), + seg_map_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/gts')), + img_suffix='img.jpg', + seg_map_suffix='gt.png') + assert len(train_dataset) == 5 + + # with data_root but 'img_path' and 'seg_map_path' in data_prefix are + # abs path + train_dataset = BaseSegDataset( + data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/imgs'), + seg_map_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/gts')), + img_suffix='img.jpg', + seg_map_suffix='gt.png') + assert len(train_dataset) == 5 + + # test_mode=True + test_dataset = BaseSegDataset( + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/imgs')), + img_suffix='img.jpg', + test_mode=True, + metainfo=dict(classes=('pseudo_class', ))) + assert len(test_dataset) == 5 + + # training data get + train_data = train_dataset[0] + assert isinstance(train_data, dict) + assert 'img_path' in train_data and osp.isfile(train_data['img_path']) + assert 'seg_map_path' in train_data and osp.isfile( + train_data['seg_map_path']) + + # test data get + test_data = test_dataset[0] + assert isinstance(test_data, dict) + assert 'img_path' in train_data and osp.isfile(train_data['img_path']) + assert 'seg_map_path' in train_data and osp.isfile( + train_data['seg_map_path']) + + +def test_ade(): + test_dataset = ADE20KDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_dataset/imgs'))) + assert len(test_dataset) == 5 + + +def test_cityscapes(): + test_dataset = CityscapesDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_cityscapes_dataset/leftImg8bit/val'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_cityscapes_dataset/gtFine/val'))) + assert len(test_dataset) == 1 + + +def test_loveda(): + test_dataset = LoveDADataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_loveda_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_loveda_dataset/ann_dir'))) + assert len(test_dataset) == 3 + + +def test_potsdam(): + test_dataset = PotsdamDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_potsdam_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_potsdam_dataset/ann_dir'))) + assert len(test_dataset) == 1 + + +def test_vaihingen(): + test_dataset = ISPRSDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_vaihingen_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_vaihingen_dataset/ann_dir'))) + assert len(test_dataset) == 1 + + +def test_synapse(): + test_dataset = SynapseDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_synapse_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_synapse_dataset/ann_dir'))) + assert len(test_dataset) == 2 + + +def test_isaid(): + test_dataset = iSAIDDataset( + pipeline=[], + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_isaid_dataset/ann_dir'))) + assert len(test_dataset) == 2 + test_dataset = iSAIDDataset( + data_prefix=dict( + img_path=osp.join( + osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), + seg_map_path=osp.join( + osp.dirname(__file__), + '../data/pseudo_isaid_dataset/ann_dir')), + ann_file=osp.join( + osp.dirname(__file__), + '../data/pseudo_isaid_dataset/splits/train.txt')) + assert len(test_dataset) == 1 + + +def test_decathlon(): + data_root = osp.join(osp.dirname(__file__), '../data') + # test load training dataset + test_dataset = DecathlonDataset( + pipeline=[], data_root=data_root, ann_file='dataset.json') + assert len(test_dataset) == 1 + + # test load test dataset + test_dataset = DecathlonDataset( + pipeline=[], + data_root=data_root, + ann_file='dataset.json', + test_mode=True) + assert len(test_dataset) == 3 + + +def test_lip(): + data_root = osp.join(osp.dirname(__file__), '../data/pseudo_lip_dataset') + # train load training dataset + train_dataset = LIPDataset( + pipeline=[], + data_root=data_root, + data_prefix=dict( + img_path='train_images', seg_map_path='train_segmentations')) + assert len(train_dataset) == 1 + + # test load training dataset + test_dataset = LIPDataset( + pipeline=[], + data_root=data_root, + data_prefix=dict( + img_path='val_images', seg_map_path='val_segmentations')) + assert len(test_dataset) == 1 + + +@pytest.mark.parametrize('dataset, classes', [ + ('ADE20KDataset', ('wall', 'building')), + ('CityscapesDataset', ('road', 'sidewalk')), + ('BaseSegDataset', ('bus', 'car')), + ('PascalVOCDataset', ('aeroplane', 'bicycle')), +]) +def test_custom_classes_override_default(dataset, classes): + + dataset_class = DATASETS.get(dataset) + if isinstance(dataset_class, PascalVOCDataset): + tmp_file = tempfile.NamedTemporaryFile() + ann_file = f'{tmp_file.name}.txt' + else: + ann_file = MagicMock() + + original_classes = dataset_class.METAINFO.get('classes', None) + + # Test setting classes as a tuple + custom_dataset = dataset_class( + data_prefix=dict(img_path=MagicMock()), + ann_file=ann_file, + metainfo=dict(classes=classes), + test_mode=True, + lazy_init=True) + + assert custom_dataset.metainfo['classes'] != original_classes + assert custom_dataset.metainfo['classes'] == classes + if not isinstance(custom_dataset, BaseSegDataset): + assert isinstance(custom_dataset.label_map, dict) + + # Test setting classes as a list + custom_dataset = dataset_class( + data_prefix=dict(img_path=MagicMock()), + ann_file=ann_file, + metainfo=dict(classes=list(classes)), + test_mode=True, + lazy_init=True) + + assert custom_dataset.metainfo['classes'] != original_classes + assert custom_dataset.metainfo['classes'] == list(classes) + if not isinstance(custom_dataset, BaseSegDataset): + assert isinstance(custom_dataset.label_map, dict) + + # Test overriding not a subset + custom_dataset = dataset_class( + ann_file=ann_file, + data_prefix=dict(img_path=MagicMock()), + metainfo=dict(classes=[classes[0]]), + test_mode=True, + lazy_init=True) + + assert custom_dataset.metainfo['classes'] != original_classes + assert custom_dataset.metainfo['classes'] == [classes[0]] + if not isinstance(custom_dataset, BaseSegDataset): + assert isinstance(custom_dataset.label_map, dict) + + # Test default behavior + if dataset_class is BaseSegDataset: + with pytest.raises(AssertionError): + custom_dataset = dataset_class( + ann_file=ann_file, + data_prefix=dict(img_path=MagicMock()), + metainfo=None, + test_mode=True, + lazy_init=True) + else: + custom_dataset = dataset_class( + data_prefix=dict(img_path=MagicMock()), + ann_file=ann_file, + metainfo=None, + test_mode=True, + lazy_init=True) + + assert custom_dataset.METAINFO['classes'] == original_classes + assert custom_dataset.label_map is None + + +def test_custom_dataset_random_palette_is_generated(): + dataset = BaseSegDataset( + pipeline=[], + data_prefix=dict(img_path=MagicMock()), + ann_file=MagicMock(), + metainfo=dict(classes=('bus', 'car')), + lazy_init=True, + test_mode=True) + assert len(dataset.metainfo['palette']) == 2 + for class_color in dataset.metainfo['palette']: + assert len(class_color) == 3 + assert all(x >= 0 and x <= 255 for x in class_color) + + +def test_custom_dataset_custom_palette(): + dataset = BaseSegDataset( + data_prefix=dict(img_path=MagicMock()), + ann_file=MagicMock(), + metainfo=dict( + classes=('bus', 'car'), palette=[[100, 100, 100], [200, 200, + 200]]), + lazy_init=True, + test_mode=True) + assert tuple(dataset.metainfo['palette']) == tuple([[100, 100, 100], + [200, 200, 200]]) + # test custom class and palette don't match + with pytest.raises(ValueError): + dataset = BaseSegDataset( + data_prefix=dict(img_path=MagicMock()), + ann_file=MagicMock(), + metainfo=dict(classes=('bus', 'car'), palette=[[200, 200, 200]]), + lazy_init=True) diff --git a/tests/test_datasets/test_dataset_builder.py b/tests/test_datasets/test_dataset_builder.py new file mode 100644 index 0000000000..099c5b1df0 --- /dev/null +++ b/tests/test_datasets/test_dataset_builder.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from mmengine.dataset import ConcatDataset, RepeatDataset + +from mmseg.datasets import MultiImageMixDataset +from mmseg.registry import DATASETS +from mmseg.utils import register_all_modules + +register_all_modules() + + +@DATASETS.register_module() +class ToyDataset: + + def __init__(self, cnt=0): + self.cnt = cnt + + def __item__(self, idx): + return idx + + def __len__(self): + return 100 + + +def test_build_dataset(): + cfg = dict(type='ToyDataset') + dataset = DATASETS.build(cfg) + assert isinstance(dataset, ToyDataset) + assert dataset.cnt == 0 + dataset = DATASETS.build(cfg, default_args=dict(cnt=1)) + assert isinstance(dataset, ToyDataset) + assert dataset.cnt == 1 + + data_root = osp.join(osp.dirname(__file__), '../data/pseudo_dataset') + data_prefix = dict(img_path='imgs/', seg_map_path='gts/') + + # test RepeatDataset + cfg = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=data_prefix, + serialize_data=False) + dataset = DATASETS.build(cfg) + dataset_repeat = RepeatDataset(dataset=dataset, times=5) + assert isinstance(dataset_repeat, RepeatDataset) + assert len(dataset_repeat) == 25 + + # test ConcatDataset + # We use same dir twice for simplicity + # with data_prefix.seg_map_path + cfg1 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=data_prefix, + serialize_data=False) + cfg2 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=data_prefix, + serialize_data=False) + dataset1 = DATASETS.build(cfg1) + dataset2 = DATASETS.build(cfg2) + dataset_concat = ConcatDataset(datasets=[dataset1, dataset2]) + assert isinstance(dataset_concat, ConcatDataset) + assert len(dataset_concat) == 10 + + # test MultiImageMixDataset + dataset = MultiImageMixDataset(dataset=dataset_concat, pipeline=[]) + assert isinstance(dataset, MultiImageMixDataset) + assert len(dataset) == 10 + + cfg = dict(type='ConcatDataset', datasets=[cfg1, cfg2]) + + dataset = MultiImageMixDataset(dataset=cfg, pipeline=[]) + assert isinstance(dataset, MultiImageMixDataset) + assert len(dataset) == 10 + + # with data_prefix.seg_map_path, ann_file + cfg1 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=data_prefix, + ann_file='splits/train.txt', + serialize_data=False) + cfg2 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=data_prefix, + ann_file='splits/val.txt', + serialize_data=False) + + dataset1 = DATASETS.build(cfg1) + dataset2 = DATASETS.build(cfg2) + dataset_concat = ConcatDataset(datasets=[dataset1, dataset2]) + assert isinstance(dataset_concat, ConcatDataset) + assert len(dataset_concat) == 5 + + # test mode + cfg1 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=dict(img_path='imgs/'), + test_mode=True, + metainfo=dict(classes=('pseudo_class', )), + serialize_data=False) + cfg2 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=dict(img_path='imgs/'), + test_mode=True, + metainfo=dict(classes=('pseudo_class', )), + serialize_data=False) + + dataset1 = DATASETS.build(cfg1) + dataset2 = DATASETS.build(cfg2) + dataset_concat = ConcatDataset(datasets=[dataset1, dataset2]) + assert isinstance(dataset_concat, ConcatDataset) + assert len(dataset_concat) == 10 + + # test mode with ann_files + cfg1 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=dict(img_path='imgs/'), + ann_file='splits/val.txt', + test_mode=True, + metainfo=dict(classes=('pseudo_class', )), + serialize_data=False) + cfg2 = dict( + type='BaseSegDataset', + pipeline=[], + data_root=data_root, + data_prefix=dict(img_path='imgs/'), + ann_file='splits/val.txt', + test_mode=True, + metainfo=dict(classes=('pseudo_class', )), + serialize_data=False) + + dataset1 = DATASETS.build(cfg1) + dataset2 = DATASETS.build(cfg2) + dataset_concat = ConcatDataset(datasets=[dataset1, dataset2]) + assert isinstance(dataset_concat, ConcatDataset) + assert len(dataset_concat) == 2 diff --git a/tests/test_datasets/test_formatting.py b/tests/test_datasets/test_formatting.py new file mode 100644 index 0000000000..4babaad269 --- /dev/null +++ b/tests/test_datasets/test_formatting.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import unittest + +import numpy as np +from mmengine.structures import BaseDataElement + +from mmseg.datasets.transforms import PackSegInputs +from mmseg.structures import SegDataSample + + +class TestPackSegInputs(unittest.TestCase): + + def setUp(self): + """Setup the model and optimizer which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + data_prefix = osp.join(osp.dirname(__file__), '../../data') + img_path = osp.join(data_prefix, 'color.jpg') + rng = np.random.RandomState(0) + self.results = { + 'img_path': img_path, + 'ori_shape': (300, 400), + 'pad_shape': (600, 800), + 'img_shape': (600, 800), + 'scale_factor': 2.0, + 'flip': False, + 'flip_direction': 'horizontal', + 'img_norm_cfg': None, + 'img': rng.rand(300, 400), + 'gt_seg_map': rng.rand(300, 400), + } + self.meta_keys = ('img_path', 'ori_shape', 'img_shape', 'pad_shape', + 'scale_factor', 'flip', 'flip_direction') + + def test_transform(self): + transform = PackSegInputs(meta_keys=self.meta_keys) + results = transform(copy.deepcopy(self.results)) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], SegDataSample) + self.assertIsInstance(results['data_samples'].gt_sem_seg, + BaseDataElement) + self.assertEqual(results['data_samples'].ori_shape, + results['data_samples'].gt_sem_seg.shape) + + def test_repr(self): + transform = PackSegInputs(meta_keys=self.meta_keys) + self.assertEqual( + repr(transform), f'PackSegInputs(meta_keys={self.meta_keys})') diff --git a/tests/test_datasets/test_loading.py b/tests/test_datasets/test_loading.py new file mode 100644 index 0000000000..29a594b4a2 --- /dev/null +++ b/tests/test_datasets/test_loading.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import tempfile + +import mmcv +import numpy as np +from mmcv.transforms import LoadImageFromFile + +from mmseg.datasets.transforms import (LoadAnnotations, + LoadBiomedicalAnnotation, + LoadBiomedicalData, + LoadBiomedicalImageFromFile, + LoadImageFromNDArray) + + +class TestLoading: + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.join(osp.dirname(__file__), '../data') + + def test_load_img(self): + results = dict(img_path=osp.join(self.data_prefix, 'color.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['img_path'] == osp.join(self.data_prefix, 'color.jpg') + assert results['img'].shape == (288, 512, 3) + assert results['img'].dtype == np.uint8 + assert results['ori_shape'] == results['img'].shape[:2] + assert repr(transform) == transform.__class__.__name__ + \ + "(ignore_empty=False, to_float32=False, color_type='color'," + \ + " imdecode_backend='cv2', file_client_args={'backend': 'disk'})" + + # to_float32 + transform = LoadImageFromFile(to_float32=True) + results = transform(copy.deepcopy(results)) + assert results['img'].dtype == np.float32 + + # gray image + results = dict(img_path=osp.join(self.data_prefix, 'gray.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512, 3) + assert results['img'].dtype == np.uint8 + + transform = LoadImageFromFile(color_type='unchanged') + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512) + assert results['img'].dtype == np.uint8 + + def test_load_seg(self): + seg_path = osp.join(self.data_prefix, 'seg.png') + results = dict( + seg_map_path=seg_path, reduce_zero_label=True, seg_fields=[]) + transform = LoadAnnotations() + results = transform(copy.deepcopy(results)) + assert results['gt_seg_map'].shape == (288, 512) + assert results['gt_seg_map'].dtype == np.uint8 + assert repr(transform) == transform.__class__.__name__ + \ + "(reduce_zero_label=True,imdecode_backend='pillow')" + \ + "file_client_args={'backend': 'disk'})" + + # reduce_zero_label + transform = LoadAnnotations(reduce_zero_label=True) + results = transform(copy.deepcopy(results)) + assert results['gt_seg_map'].shape == (288, 512) + assert results['gt_seg_map'].dtype == np.uint8 + + def test_load_seg_custom_classes(self): + + test_img = np.random.rand(10, 10) + test_gt = np.zeros_like(test_img) + test_gt[2:4, 2:4] = 1 + test_gt[2:4, 6:8] = 2 + test_gt[6:8, 2:4] = 3 + test_gt[6:8, 6:8] = 4 + + tmp_dir = tempfile.TemporaryDirectory() + img_path = osp.join(tmp_dir.name, 'img.jpg') + gt_path = osp.join(tmp_dir.name, 'gt.png') + + mmcv.imwrite(test_img, img_path) + mmcv.imwrite(test_gt, gt_path) + + # test only train with label with id 3 + results = dict( + img_path=img_path, + seg_map_path=gt_path, + label_map={ + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 0 + }, + reduce_zero_label=False, + seg_fields=[]) + + load_imgs = LoadImageFromFile() + results = load_imgs(copy.deepcopy(results)) + + load_anns = LoadAnnotations() + results = load_anns(copy.deepcopy(results)) + + gt_array = results['gt_seg_map'] + + true_mask = np.zeros_like(gt_array) + true_mask[6:8, 2:4] = 1 + + assert results['seg_fields'] == ['gt_seg_map'] + assert gt_array.shape == (10, 10) + assert gt_array.dtype == np.uint8 + np.testing.assert_array_equal(gt_array, true_mask) + + # test only train with label with id 4 and 3 + results = dict( + img_path=osp.join(self.data_prefix, 'color.jpg'), + seg_map_path=gt_path, + label_map={ + 0: 0, + 1: 0, + 2: 0, + 3: 2, + 4: 1 + }, + reduce_zero_label=False, + seg_fields=[]) + + load_imgs = LoadImageFromFile() + results = load_imgs(copy.deepcopy(results)) + + load_anns = LoadAnnotations() + results = load_anns(copy.deepcopy(results)) + + gt_array = results['gt_seg_map'] + + true_mask = np.zeros_like(gt_array) + true_mask[6:8, 2:4] = 2 + true_mask[6:8, 6:8] = 1 + + assert results['seg_fields'] == ['gt_seg_map'] + assert gt_array.shape == (10, 10) + assert gt_array.dtype == np.uint8 + np.testing.assert_array_equal(gt_array, true_mask) + + # test no custom classes + results = dict( + img_path=img_path, + seg_map_path=gt_path, + reduce_zero_label=False, + seg_fields=[]) + + load_imgs = LoadImageFromFile() + results = load_imgs(copy.deepcopy(results)) + + load_anns = LoadAnnotations() + results = load_anns(copy.deepcopy(results)) + + gt_array = results['gt_seg_map'] + + assert results['seg_fields'] == ['gt_seg_map'] + assert gt_array.shape == (10, 10) + assert gt_array.dtype == np.uint8 + np.testing.assert_array_equal(gt_array, test_gt) + + tmp_dir.cleanup() + + def test_load_image_from_ndarray(self): + results = {'img': np.zeros((256, 256, 3), dtype=np.uint8)} + transform = LoadImageFromNDArray() + results = transform(results) + + assert results['img'].shape == (256, 256, 3) + assert results['img'].dtype == np.uint8 + assert results['img_shape'] == (256, 256) + assert results['ori_shape'] == (256, 256) + + # to_float32 + transform = LoadImageFromNDArray(to_float32=True) + results = transform(copy.deepcopy(results)) + assert results['img'].dtype == np.float32 + + # test repr + transform = LoadImageFromNDArray() + assert repr(transform) == ('LoadImageFromNDArray(' + 'ignore_empty=False, ' + 'to_float32=False, ' + "color_type='color', " + "imdecode_backend='cv2', " + "file_client_args={'backend': 'disk'})") + + def test_load_biomedical_img(self): + results = dict( + img_path=osp.join(self.data_prefix, 'biomedical.nii.gz')) + transform = LoadBiomedicalImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['img_path'] == osp.join(self.data_prefix, + 'biomedical.nii.gz') + assert len(results['img'].shape) == 4 + assert results['img'].dtype == np.float32 + assert results['ori_shape'] == results['img'].shape[1:] + assert repr(transform) == ('LoadBiomedicalImageFromFile(' + "decode_backend='nifti', " + 'to_xyz=False, ' + 'to_float32=True, ' + "file_client_args={'backend': 'disk'})") + + def test_load_biomedical_annotation(self): + results = dict( + seg_map_path=osp.join(self.data_prefix, 'biomedical_ann.nii.gz')) + transform = LoadBiomedicalAnnotation() + results = transform(copy.deepcopy(results)) + assert len(results['gt_seg_map'].shape) == 3 + assert results['gt_seg_map'].dtype == np.float32 + + def test_load_biomedical_data(self): + input_results = dict( + img_path=osp.join(self.data_prefix, 'biomedical.npy')) + transform = LoadBiomedicalData(with_seg=True) + results = transform(copy.deepcopy(input_results)) + assert results['img_path'] == osp.join(self.data_prefix, + 'biomedical.npy') + assert results['img'][0].shape == results['gt_seg_map'].shape + assert results['img'].dtype == np.float32 + assert results['ori_shape'] == results['img'].shape[1:] + assert repr(transform) == ('LoadBiomedicalData(' + 'with_seg=True, ' + "decode_backend='numpy', " + 'to_xyz=False, ' + "file_client_args={'backend': 'disk'})") + + transform = LoadBiomedicalData(with_seg=False) + results = transform(copy.deepcopy(input_results)) + assert len(results['img'].shape) == 4 + assert results.get('gt_seg_map') is None + assert repr(transform) == ('LoadBiomedicalData(' + 'with_seg=False, ' + "decode_backend='numpy', " + 'to_xyz=False, ' + "file_client_args={'backend': 'disk'})") diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py new file mode 100644 index 0000000000..e09b43a642 --- /dev/null +++ b/tests/test_datasets/test_transform.py @@ -0,0 +1,800 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp + +import mmcv +import numpy as np +import pytest +from PIL import Image + +from mmseg.datasets.transforms import * # noqa +from mmseg.datasets.transforms import PhotoMetricDistortion, RandomCrop +from mmseg.registry import TRANSFORMS +from mmseg.utils import register_all_modules + +register_all_modules() + + +def test_resize(): + # Test `Resize`, `RandomResize` and `RandomChoiceResize` from + # MMCV transform. Noted: `RandomResize` has args `scales` but + # `Resize` and `RandomResize` has args `scale`. + transform = dict(type='Resize', scale=(1333, 800), keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + + results = dict() + # (288, 512, 3) + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + resized_results = resize_module(results.copy()) + # img_shape = results['img'].shape[:2] in ``MMCV resize`` function + # so right now it is (750, 1333) rather than (750, 1333, 3) + assert resized_results['img_shape'] == (750, 1333) + + # test keep_ratio=False + transform = dict( + type='RandomResize', + scale=(1280, 800), + ratio_range=(1.0, 1.0), + resize_type='Resize', + keep_ratio=False) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'] == (800, 1280) + + # test `RandomChoiceResize`, which in older mmsegmentation + # `Resize` is multiscale_mode='range' + transform = dict(type='RandomResize', scale=[(1333, 400), (1333, 1200)]) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert max(resized_results['img_shape'][:2]) <= 1333 + assert min(resized_results['img_shape'][:2]) >= 400 + assert min(resized_results['img_shape'][:2]) <= 1200 + + # test RandomChoiceResize, which in older mmsegmentation + # `Resize` is multiscale_mode='value' + transform = dict( + type='RandomChoiceResize', + scales=[(1333, 800), (1333, 400)], + resize_type='Resize', + keep_ratio=False) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'] in [(800, 1333), (400, 1333)] + + transform = dict(type='Resize', scale_factor=(0.9, 1.1), keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert max(resized_results['img_shape'][:2]) <= 1333 * 1.1 + + # test RandomChoiceResize, which `resize_type` is `ResizeShortestEdge` + transform = dict( + type='RandomChoiceResize', + scales=[128, 256, 512], + resize_type='ResizeShortestEdge', + max_size=1333) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'][0] in [128, 256, 512] + + transform = dict( + type='RandomChoiceResize', + scales=[512], + resize_type='ResizeShortestEdge', + max_size=512) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'][1] == 512 + + transform = dict( + type='RandomChoiceResize', + scales=[(128, 256), (256, 512), (512, 1024)], + resize_type='ResizeShortestEdge', + max_size=1333) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'][0] in [128, 256, 512] + + # test scale=None and scale_factor is tuple. + # img shape: (288, 512, 3) + transform = dict( + type='Resize', scale=None, scale_factor=(0.5, 2.0), keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert int(288 * 0.5) <= resized_results['img_shape'][0] <= 288 * 2.0 + assert int(512 * 0.5) <= resized_results['img_shape'][1] <= 512 * 2.0 + + # test minimum resized image shape is 640 + transform = dict(type='Resize', scale=(2560, 640), keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'] == (640, 1138) + + # test minimum resized image shape is 640 when img_scale=(512, 640) + # where should define `scale_factor` in MMCV new ``Resize`` function. + min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1]) + transform = dict( + type='Resize', scale_factor=min_size_ratio, keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'] == (640, 1138) + + # test h > w + img = np.random.randn(512, 288, 3) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1]) + transform = dict( + type='Resize', + scale=(2560, 640), + scale_factor=min_size_ratio, + keep_ratio=True) + resize_module = TRANSFORMS.build(transform) + resized_results = resize_module(results.copy()) + assert resized_results['img_shape'] == (1138, 640) + + +def test_flip(): + # test assertion for invalid prob + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', prob=1.5) + TRANSFORMS.build(transform) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', prob=1.0, direction='horizonta') + TRANSFORMS.build(transform) + + transform = dict(type='RandomFlip', prob=1.0) + flip_module = TRANSFORMS.build(transform) + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + original_seg = copy.deepcopy(seg) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = flip_module(results) + + flip_module = TRANSFORMS.build(transform) + results = flip_module(results) + assert np.equal(original_img, results['img']).all() + assert np.equal(original_seg, results['gt_semantic_seg']).all() + + +def test_random_rotate_flip(): + with pytest.raises(AssertionError): + transform = dict(type='RandomRotFlip', flip_prob=1.5) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict(type='RandomRotFlip', rotate_prob=1.5) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict(type='RandomRotFlip', degree=[20, 20, 20]) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict(type='RandomRotFlip', degree=-20) + TRANSFORMS.build(transform) + + transform = dict( + type='RandomRotFlip', flip_prob=1.0, rotate_prob=0, degree=20) + rot_flip_module = TRANSFORMS.build(transform) + + results = dict() + img = mmcv.imread( + osp.join( + osp.dirname(__file__), + '../data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg'), + 'color') + original_img = copy.deepcopy(img) + seg = np.array( + Image.open( + osp.join( + osp.dirname(__file__), + '../data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png') + )) + original_seg = copy.deepcopy(seg) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + result_flip = rot_flip_module(results) + assert original_img.shape == result_flip['img'].shape + assert original_seg.shape == result_flip['gt_semantic_seg'].shape + + transform = dict( + type='RandomRotFlip', flip_prob=0, rotate_prob=1.0, degree=20) + rot_flip_module = TRANSFORMS.build(transform) + + result_rotate = rot_flip_module(results) + assert original_img.shape == result_rotate['img'].shape + assert original_seg.shape == result_rotate['gt_semantic_seg'].shape + + assert str(transform) == "{'type': 'RandomRotFlip'," \ + " 'flip_prob': 0," \ + " 'rotate_prob': 1.0," \ + " 'degree': 20}" + + +def test_pad(): + # test assertion if both size_divisor and size is None + with pytest.raises(AssertionError): + transform = dict(type='Pad') + TRANSFORMS.build(transform) + + transform = dict(type='Pad', size_divisor=32) + transform = TRANSFORMS.build(transform) + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + # original img already divisible by 32 + assert np.equal(results['img'], original_img).all() + img_shape = results['img'].shape + assert img_shape[0] % 32 == 0 + assert img_shape[1] % 32 == 0 + + +def test_normalize(): + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + transform = dict(type='Normalize', **img_norm_cfg) + transform = TRANSFORMS.build(transform) + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + + mean = np.array(img_norm_cfg['mean']) + std = np.array(img_norm_cfg['std']) + converted_img = (original_img[..., ::-1] - mean) / std + assert np.allclose(results['img'], converted_img) + + +def test_random_crop(): + # test assertion for invalid random crop + with pytest.raises(AssertionError): + RandomCrop(crop_size=(-1, 0)) + + results = dict() + img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color') + seg = np.array(Image.open(osp.join('tests/data/seg.png'))) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + h, w, _ = img.shape + pipeline = RandomCrop(crop_size=(h - 20, w - 20)) + + results = pipeline(results) + assert results['img'].shape[:2] == (h - 20, w - 20) + assert results['img_shape'][:2] == (h - 20, w - 20) + assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20) + + +def test_rgb2gray(): + # test assertion out_channels should be greater than 0 + with pytest.raises(AssertionError): + transform = dict(type='RGB2Gray', out_channels=-1) + TRANSFORMS.build(transform) + # test assertion weights should be tuple[float] + with pytest.raises(AssertionError): + transform = dict(type='RGB2Gray', out_channels=1, weights=1.1) + TRANSFORMS.build(transform) + + # test out_channels is None + transform = dict(type='RGB2Gray') + transform = TRANSFORMS.build(transform) + + assert str(transform) == f'RGB2Gray(' \ + f'out_channels={None}, ' \ + f'weights={(0.299, 0.587, 0.114)})' + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + h, w, c = img.shape + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + assert results['img'].shape == (h, w, c) + assert results['img_shape'] == (h, w, c) + assert results['ori_shape'] == (h, w, c) + + # test out_channels = 2 + transform = dict(type='RGB2Gray', out_channels=2) + transform = TRANSFORMS.build(transform) + + assert str(transform) == f'RGB2Gray(' \ + f'out_channels={2}, ' \ + f'weights={(0.299, 0.587, 0.114)})' + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + h, w, c = img.shape + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + assert results['img'].shape == (h, w, 2) + assert results['img_shape'] == (h, w, 2) + + +def test_photo_metric_distortion(): + results = dict() + img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color') + seg = np.array(Image.open(osp.join('tests/data/seg.png'))) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + pipeline = PhotoMetricDistortion(saturation_range=(1., 1.)) + results = pipeline(results) + + assert (results['gt_semantic_seg'] == seg).all() + assert results['img_shape'] == img.shape + + +def test_rerange(): + # test assertion if min_value or max_value is illegal + with pytest.raises(AssertionError): + transform = dict(type='Rerange', min_value=[0], max_value=[255]) + TRANSFORMS.build(transform) + + # test assertion if min_value >= max_value + with pytest.raises(AssertionError): + transform = dict(type='Rerange', min_value=1, max_value=1) + TRANSFORMS.build(transform) + + # test assertion if img_min_value == img_max_value + with pytest.raises(AssertionError): + transform = dict(type='Rerange', min_value=0, max_value=1) + transform = TRANSFORMS.build(transform) + results = dict() + results['img'] = np.array([[1, 1], [1, 1]]) + transform(results) + + img_rerange_cfg = dict() + transform = dict(type='Rerange', **img_rerange_cfg) + transform = TRANSFORMS.build(transform) + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + + min_value = np.min(original_img) + max_value = np.max(original_img) + converted_img = (original_img - min_value) / (max_value - min_value) * 255 + + assert np.allclose(results['img'], converted_img) + assert str(transform) == f'Rerange(min_value={0}, max_value={255})' + + +def test_CLAHE(): + # test assertion if clip_limit is None + with pytest.raises(AssertionError): + transform = dict(type='CLAHE', clip_limit=None) + TRANSFORMS.build(transform) + + # test assertion if tile_grid_size is illegal + with pytest.raises(AssertionError): + transform = dict(type='CLAHE', tile_grid_size=(8.0, 8.0)) + TRANSFORMS.build(transform) + + # test assertion if tile_grid_size is illegal + with pytest.raises(AssertionError): + transform = dict(type='CLAHE', tile_grid_size=(9, 9, 9)) + TRANSFORMS.build(transform) + + transform = dict(type='CLAHE', clip_limit=2) + transform = TRANSFORMS.build(transform) + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + + converted_img = np.empty(original_img.shape) + for i in range(original_img.shape[2]): + converted_img[:, :, i] = mmcv.clahe( + np.array(original_img[:, :, i], dtype=np.uint8), 2, (8, 8)) + + assert np.allclose(results['img'], converted_img) + assert str(transform) == f'CLAHE(clip_limit={2}, tile_grid_size={(8, 8)})' + + +def test_adjust_gamma(): + # test assertion if gamma <= 0 + with pytest.raises(AssertionError): + transform = dict(type='AdjustGamma', gamma=0) + TRANSFORMS.build(transform) + + # test assertion if gamma is list + with pytest.raises(AssertionError): + transform = dict(type='AdjustGamma', gamma=[1.2]) + TRANSFORMS.build(transform) + + # test with gamma = 1.2 + transform = dict(type='AdjustGamma', gamma=1.2) + transform = TRANSFORMS.build(transform) + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + + inv_gamma = 1.0 / 1.2 + table = np.array([((i / 255.0)**inv_gamma) * 255 + for i in np.arange(0, 256)]).astype('uint8') + converted_img = mmcv.lut_transform( + np.array(original_img, dtype=np.uint8), table) + assert np.allclose(results['img'], converted_img) + assert str(transform) == f'AdjustGamma(gamma={1.2})' + + +def test_rotate(): + # test assertion degree should be tuple[float] or float + with pytest.raises(AssertionError): + transform = dict(type='RandomRotate', prob=0.5, degree=-10) + TRANSFORMS.build(transform) + # test assertion degree should be tuple[float] or float + with pytest.raises(AssertionError): + transform = dict(type='RandomRotate', prob=0.5, degree=(10., 20., 30.)) + TRANSFORMS.build(transform) + + transform = dict(type='RandomRotate', degree=10., prob=1.) + transform = TRANSFORMS.build(transform) + + assert str(transform) == f'RandomRotate(' \ + f'prob={1.}, ' \ + f'degree=({-10.}, {10.}), ' \ + f'pad_val={0}, ' \ + f'seg_pad_val={255}, ' \ + f'center={None}, ' \ + f'auto_bound={False})' + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + h, w, _ = img.shape + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + + results = transform(results) + assert results['img'].shape[:2] == (h, w) + + +def test_seg_rescale(): + results = dict() + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + h, w = seg.shape + + transform = dict(type='SegRescale', scale_factor=1. / 2) + rescale_module = TRANSFORMS.build(transform) + rescale_results = rescale_module(results.copy()) + assert rescale_results['gt_semantic_seg'].shape == (h // 2, w // 2) + + transform = dict(type='SegRescale', scale_factor=1) + rescale_module = TRANSFORMS.build(transform) + rescale_results = rescale_module(results.copy()) + assert rescale_results['gt_semantic_seg'].shape == (h, w) + + +def test_mosaic(): + # test prob + with pytest.raises(AssertionError): + transform = dict(type='RandomMosaic', prob=1.5) + TRANSFORMS.build(transform) + # test assertion for invalid img_scale + with pytest.raises(AssertionError): + transform = dict(type='RandomMosaic', prob=1, img_scale=640) + TRANSFORMS.build(transform) + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + + transform = dict(type='RandomMosaic', prob=1, img_scale=(10, 12)) + mosaic_module = TRANSFORMS.build(transform) + assert 'Mosaic' in repr(mosaic_module) + + # test assertion for invalid mix_results + with pytest.raises(AssertionError): + mosaic_module(results) + + results['mix_results'] = [copy.deepcopy(results)] * 3 + results = mosaic_module(results) + assert results['img'].shape[:2] == (20, 24) + + results = dict() + results['img'] = img[:, :, 0] + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + + transform = dict(type='RandomMosaic', prob=0, img_scale=(10, 12)) + mosaic_module = TRANSFORMS.build(transform) + results['mix_results'] = [copy.deepcopy(results)] * 3 + results = mosaic_module(results) + assert results['img'].shape[:2] == img.shape[:2] + + transform = dict(type='RandomMosaic', prob=1, img_scale=(10, 12)) + mosaic_module = TRANSFORMS.build(transform) + results = mosaic_module(results) + assert results['img'].shape[:2] == (20, 24) + + +def test_cutout(): + # test prob + with pytest.raises(AssertionError): + transform = dict(type='RandomCutOut', prob=1.5, n_holes=1) + TRANSFORMS.build(transform) + # test n_holes + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', prob=0.5, n_holes=(5, 3), cutout_shape=(8, 8)) + TRANSFORMS.build(transform) + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', + prob=0.5, + n_holes=(3, 4, 5), + cutout_shape=(8, 8)) + TRANSFORMS.build(transform) + # test cutout_shape and cutout_ratio + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', prob=0.5, n_holes=1, cutout_shape=8) + TRANSFORMS.build(transform) + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', prob=0.5, n_holes=1, cutout_ratio=0.2) + TRANSFORMS.build(transform) + # either of cutout_shape and cutout_ratio should be given + with pytest.raises(AssertionError): + transform = dict(type='RandomCutOut', prob=0.5, n_holes=1) + TRANSFORMS.build(transform) + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', + prob=0.5, + n_holes=1, + cutout_shape=(2, 2), + cutout_ratio=(0.4, 0.4)) + TRANSFORMS.build(transform) + # test seg_fill_in + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', + prob=0.5, + n_holes=1, + cutout_shape=(8, 8), + seg_fill_in='a') + TRANSFORMS.build(transform) + with pytest.raises(AssertionError): + transform = dict( + type='RandomCutOut', + prob=0.5, + n_holes=1, + cutout_shape=(8, 8), + seg_fill_in=256) + TRANSFORMS.build(transform) + + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') + + seg = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) + + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['pad_shape'] = img.shape + results['img_fields'] = ['img'] + + transform = dict( + type='RandomCutOut', prob=1, n_holes=1, cutout_shape=(10, 10)) + cutout_module = TRANSFORMS.build(transform) + assert 'cutout_shape' in repr(cutout_module) + cutout_result = cutout_module(copy.deepcopy(results)) + assert cutout_result['img'].sum() < img.sum() + + transform = dict( + type='RandomCutOut', prob=1, n_holes=1, cutout_ratio=(0.8, 0.8)) + cutout_module = TRANSFORMS.build(transform) + assert 'cutout_ratio' in repr(cutout_module) + cutout_result = cutout_module(copy.deepcopy(results)) + assert cutout_result['img'].sum() < img.sum() + + transform = dict( + type='RandomCutOut', prob=0, n_holes=1, cutout_ratio=(0.8, 0.8)) + cutout_module = TRANSFORMS.build(transform) + cutout_result = cutout_module(copy.deepcopy(results)) + assert cutout_result['img'].sum() == img.sum() + assert cutout_result['gt_semantic_seg'].sum() == seg.sum() + + transform = dict( + type='RandomCutOut', + prob=1, + n_holes=(2, 4), + cutout_shape=[(10, 10), (15, 15)], + fill_in=(255, 255, 255), + seg_fill_in=None) + cutout_module = TRANSFORMS.build(transform) + cutout_result = cutout_module(copy.deepcopy(results)) + assert cutout_result['img'].sum() > img.sum() + assert cutout_result['gt_semantic_seg'].sum() == seg.sum() + + transform = dict( + type='RandomCutOut', + prob=1, + n_holes=1, + cutout_ratio=(0.8, 0.8), + fill_in=(255, 255, 255), + seg_fill_in=255) + cutout_module = TRANSFORMS.build(transform) + cutout_result = cutout_module(copy.deepcopy(results)) + assert cutout_result['img'].sum() > img.sum() + assert cutout_result['gt_semantic_seg'].sum() > seg.sum() + + +def test_resize_to_multiple(): + transform = dict(type='ResizeToMultiple', size_divisor=32) + transform = TRANSFORMS.build(transform) + + img = np.random.randn(213, 232, 3) + seg = np.random.randint(0, 19, (213, 232)) + results = dict() + results['img'] = img + results['gt_semantic_seg'] = seg + results['seg_fields'] = ['gt_semantic_seg'] + results['img_shape'] = img.shape + results['pad_shape'] = img.shape + + results = transform(results) + assert results['img'].shape == (224, 256, 3) + assert results['gt_semantic_seg'].shape == (224, 256) + assert results['img_shape'] == (224, 256) + + +def test_generate_edge(): + transform = dict(type='GenerateEdge', edge_width=1) + transform = TRANSFORMS.build(transform) + + seg_map = np.array([ + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 2], + [1, 1, 1, 2, 2], + [1, 1, 2, 2, 2], + [1, 2, 2, 2, 2], + [2, 2, 2, 2, 2], + ]) + results = dict() + results['gt_seg_map'] = seg_map + results['img_shape'] = seg_map.shape + + results = transform(results) + assert np.all(results['gt_edge'] == np.array([ + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 1], + [0, 1, 1, 1, 0], + [1, 1, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + ])) diff --git a/tests/test_datasets/test_tta.py b/tests/test_datasets/test_tta.py new file mode 100644 index 0000000000..6a433647a8 --- /dev/null +++ b/tests/test_datasets/test_tta.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# import os.path as osp + +# import mmcv +# import pytest + +# from mmseg.datasets.transforms import * # noqa +# from mmseg.registry import TRANSFORMS + +# TODO +# def test_multi_scale_flip_aug(): +# # test assertion if scales=None, scale_factor=1 (not float). +# with pytest.raises(AssertionError): +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=None, +# scale_factor=1, +# transforms=[dict(type='Resize', keep_ratio=False)], +# ) +# TRANSFORMS.build(tta_transform) + +# # test assertion if scales=None, scale_factor=None. +# with pytest.raises(AssertionError): +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=None, +# scale_factor=None, +# transforms=[dict(type='Resize', keep_ratio=False)], +# ) +# TRANSFORMS.build(tta_transform) + +# # test assertion if scales=(512, 512), scale_factor=1 (not float). +# with pytest.raises(AssertionError): +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=(512, 512), +# scale_factor=1, +# transforms=[dict(type='Resize', keep_ratio=False)], +# ) +# TRANSFORMS.build(tta_transform) +# meta_keys = ('img', 'ori_shape', 'ori_height', 'ori_width', 'pad_shape', +# 'scale_factor', 'scale', 'flip') +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=[(256, 256), (512, 512), (1024, 1024)], +# allow_flip=False, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) + +# results = dict() +# # (288, 512, 3) +# img = mmcv.imread( +# osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') +# results['img'] = img +# results['ori_shape'] = img.shape +# results['ori_height'] = img.shape[0] +# results['ori_width'] = img.shape[1] +# # Set initial values for default meta_keys +# results['pad_shape'] = img.shape +# results['scale_factor'] = 1.0 + +# tta_results = tta_module(results.copy()) +# assert [data_sample.scale +# for data_sample in tta_results['data_sample']] == [(256, 256), +# (512, 512), +# (1024, 1024)] +# assert [data_sample.flip for data_sample in tta_results['data_sample'] +# ] == [False, False, False] + +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=[(256, 256), (512, 512), (1024, 1024)], +# allow_flip=True, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) +# tta_results = tta_module(results.copy()) +# assert [data_sample.scale +# for data_sample in tta_results['data_sample']] == [(256, 256), +# (256, 256), +# (512, 512), +# (512, 512), +# (1024, 1024), +# (1024, 1024)] +# assert [data_sample.flip for data_sample in tta_results['data_sample'] +# ] == [False, True, False, True, False, True] + +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=[(512, 512)], +# allow_flip=False, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) +# tta_results = tta_module(results.copy()) +# assert [tta_results['data_sample'][0].scale] == [(512, 512)] +# assert [tta_results['data_sample'][0].flip] == [False] + +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scales=[(512, 512)], +# allow_flip=True, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) +# tta_results = tta_module(results.copy()) +# assert [data_sample.scale +# for data_sample in tta_results['data_sample']] == [(512, 512), +# (512, 512)] +# assert [data_sample.flip +# for data_sample in tta_results['data_sample']] == [False, True] + +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scale_factor=[0.5, 1.0, 2.0], +# allow_flip=False, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) +# tta_results = tta_module(results.copy()) +# assert [data_sample.scale +# for data_sample in tta_results['data_sample']] == [(256, 144), +# (512, 288), +# (1024, 576)] +# assert [data_sample.flip for data_sample in tta_results['data_sample'] +# ] == [False, False, False] + +# tta_transform = dict( +# type='MultiScaleFlipAug', +# scale_factor=[0.5, 1.0, 2.0], +# allow_flip=True, +# resize_cfg=dict(type='Resize', keep_ratio=False), +# transforms=[dict(type='mmseg.PackSegInputs', meta_keys=meta_keys)], +# ) +# tta_module = TRANSFORMS.build(tta_transform) +# tta_results = tta_module(results.copy()) +# assert [data_sample.scale +# for data_sample in tta_results['data_sample']] == [(256, 144), +# (256, 144), +# (512, 288), +# (512, 288), +# (1024, 576), +# (1024, 576)] +# assert [data_sample.flip for data_sample in tta_results['data_sample'] +# ] == [False, True, False, True, False, True] diff --git a/tests/test_digit_version.py b/tests/test_digit_version.py new file mode 100644 index 0000000000..45daf09ca1 --- /dev/null +++ b/tests/test_digit_version.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg import digit_version + + +def test_digit_version(): + assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) + assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) + assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) + assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) + assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) + assert digit_version('1.0') == digit_version('1.0.0') + assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') + assert digit_version('1.0.0dev') < digit_version('1.0.0a') + assert digit_version('1.0.0a') < digit_version('1.0.0a1') + assert digit_version('1.0.0a') < digit_version('1.0.0b') + assert digit_version('1.0.0b') < digit_version('1.0.0rc') + assert digit_version('1.0.0rc1') < digit_version('1.0.0') + assert digit_version('1.0.0') < digit_version('1.0.0post') + assert digit_version('1.0.0post') < digit_version('1.0.0post1') + assert digit_version('v1') == (1, 0, 0, 0, 0, 0) + assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0) diff --git a/tests/test_engine/test_layer_decay_optimizer_constructor.py b/tests/test_engine/test_layer_decay_optimizer_constructor.py new file mode 100644 index 0000000000..72dc6c5123 --- /dev/null +++ b/tests/test_engine/test_layer_decay_optimizer_constructor.py @@ -0,0 +1,300 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# from copyreg import constructor +import pytest +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.optim.optimizer import build_optim_wrapper + +from mmseg.engine.optimizers.layer_decay_optimizer_constructor import \ + LearningRateDecayOptimizerConstructor +from mmseg.utils import register_all_modules + +register_all_modules() + +base_lr = 1 +decay_rate = 2 +base_wd = 0.05 +weight_decay = 0.05 + +expected_stage_wise_lr_wd_convnext = [{ + 'weight_decay': 0.0, + 'lr_scale': 128 +}, { + 'weight_decay': 0.0, + 'lr_scale': 1 +}, { + 'weight_decay': 0.05, + 'lr_scale': 64 +}, { + 'weight_decay': 0.0, + 'lr_scale': 64 +}, { + 'weight_decay': 0.05, + 'lr_scale': 32 +}, { + 'weight_decay': 0.0, + 'lr_scale': 32 +}, { + 'weight_decay': 0.05, + 'lr_scale': 16 +}, { + 'weight_decay': 0.0, + 'lr_scale': 16 +}, { + 'weight_decay': 0.05, + 'lr_scale': 8 +}, { + 'weight_decay': 0.0, + 'lr_scale': 8 +}, { + 'weight_decay': 0.05, + 'lr_scale': 128 +}, { + 'weight_decay': 0.05, + 'lr_scale': 1 +}] + +expected_layer_wise_lr_wd_convnext = [{ + 'weight_decay': 0.0, + 'lr_scale': 128 +}, { + 'weight_decay': 0.0, + 'lr_scale': 1 +}, { + 'weight_decay': 0.05, + 'lr_scale': 64 +}, { + 'weight_decay': 0.0, + 'lr_scale': 64 +}, { + 'weight_decay': 0.05, + 'lr_scale': 32 +}, { + 'weight_decay': 0.0, + 'lr_scale': 32 +}, { + 'weight_decay': 0.05, + 'lr_scale': 16 +}, { + 'weight_decay': 0.0, + 'lr_scale': 16 +}, { + 'weight_decay': 0.05, + 'lr_scale': 2 +}, { + 'weight_decay': 0.0, + 'lr_scale': 2 +}, { + 'weight_decay': 0.05, + 'lr_scale': 128 +}, { + 'weight_decay': 0.05, + 'lr_scale': 1 +}] + +expected_layer_wise_wd_lr_beit = [{ + 'weight_decay': 0.0, + 'lr_scale': 16 +}, { + 'weight_decay': 0.05, + 'lr_scale': 8 +}, { + 'weight_decay': 0.0, + 'lr_scale': 8 +}, { + 'weight_decay': 0.05, + 'lr_scale': 4 +}, { + 'weight_decay': 0.0, + 'lr_scale': 4 +}, { + 'weight_decay': 0.05, + 'lr_scale': 2 +}, { + 'weight_decay': 0.0, + 'lr_scale': 2 +}, { + 'weight_decay': 0.05, + 'lr_scale': 1 +}, { + 'weight_decay': 0.0, + 'lr_scale': 1 +}] + + +class ToyConvNeXt(nn.Module): + + def __init__(self): + super().__init__() + self.stages = nn.ModuleList() + for i in range(4): + stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True)) + self.stages.append(stage) + self.norm0 = nn.BatchNorm2d(2) + + # add some variables to meet unit test coverate rate + self.cls_token = nn.Parameter(torch.ones(1)) + self.mask_token = nn.Parameter(torch.ones(1)) + self.pos_embed = nn.Parameter(torch.ones(1)) + self.stem_norm = nn.Parameter(torch.ones(1)) + self.downsample_norm0 = nn.BatchNorm2d(2) + self.downsample_norm1 = nn.BatchNorm2d(2) + self.downsample_norm2 = nn.BatchNorm2d(2) + self.lin = nn.Parameter(torch.ones(1)) + self.lin.requires_grad = False + self.downsample_layers = nn.ModuleList() + for _ in range(4): + stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True)) + self.downsample_layers.append(stage) + + +class ToyBEiT(nn.Module): + + def __init__(self): + super().__init__() + # add some variables to meet unit test coverate rate + self.cls_token = nn.Parameter(torch.ones(1)) + self.patch_embed = nn.Parameter(torch.ones(1)) + self.layers = nn.ModuleList() + for _ in range(3): + layer = nn.Conv2d(3, 3, 1) + self.layers.append(layer) + + +class ToyMAE(nn.Module): + + def __init__(self): + super().__init__() + # add some variables to meet unit test coverate rate + self.cls_token = nn.Parameter(torch.ones(1)) + self.patch_embed = nn.Parameter(torch.ones(1)) + self.layers = nn.ModuleList() + for _ in range(3): + layer = nn.Conv2d(3, 3, 1) + self.layers.append(layer) + + +class ToySegmentor(nn.Module): + + def __init__(self, backbone): + super().__init__() + self.backbone = backbone + self.decode_head = nn.Conv2d(2, 2, kernel_size=1, groups=2) + + +class PseudoDataParallel(nn.Module): + + def __init__(self, model): + super().__init__() + self.module = model + + +class ToyViT(nn.Module): + + def __init__(self): + super().__init__() + + +def check_optimizer_lr_wd(optimizer, gt_lr_wd): + assert isinstance(optimizer, torch.optim.AdamW) + assert optimizer.defaults['lr'] == base_lr + assert optimizer.defaults['weight_decay'] == base_wd + param_groups = optimizer.param_groups + print(param_groups) + assert len(param_groups) == len(gt_lr_wd) + for i, param_dict in enumerate(param_groups): + assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay'] + assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale'] + assert param_dict['lr_scale'] == param_dict['lr'] + + +def test_learning_rate_decay_optimizer_constructor(): + + # Test lr wd for ConvNeXT + backbone = ToyConvNeXt() + model = PseudoDataParallel(ToySegmentor(backbone)) + # stagewise decay + stagewise_paramwise_cfg = dict( + decay_rate=decay_rate, decay_type='stage_wise', num_layers=6) + optimizer_cfg = dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05) + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=optimizer_cfg, + paramwise_cfg=stagewise_paramwise_cfg, + constructor='LearningRateDecayOptimizerConstructor') + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + check_optimizer_lr_wd(optim_wrapper.optimizer, + expected_stage_wise_lr_wd_convnext) + # layerwise decay + layerwise_paramwise_cfg = dict( + decay_rate=decay_rate, decay_type='layer_wise', num_layers=6) + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=optimizer_cfg, + paramwise_cfg=layerwise_paramwise_cfg, + constructor='LearningRateDecayOptimizerConstructor') + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + check_optimizer_lr_wd(optim_wrapper.optimizer, + expected_layer_wise_lr_wd_convnext) + + # Test lr wd for BEiT + backbone = ToyBEiT() + model = PseudoDataParallel(ToySegmentor(backbone)) + + layerwise_paramwise_cfg = dict( + decay_rate=decay_rate, decay_type='layer_wise', num_layers=3) + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=optimizer_cfg, + paramwise_cfg=layerwise_paramwise_cfg, + constructor='LearningRateDecayOptimizerConstructor') + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + check_optimizer_lr_wd(optim_wrapper.optimizer, + expected_layer_wise_wd_lr_beit) + + # Test invalidation of lr wd for Vit + backbone = ToyViT() + model = PseudoDataParallel(ToySegmentor(backbone)) + with pytest.raises(NotImplementedError): + optim_constructor = LearningRateDecayOptimizerConstructor( + optim_wrapper_cfg, layerwise_paramwise_cfg) + optim_constructor(model) + with pytest.raises(NotImplementedError): + optim_constructor = LearningRateDecayOptimizerConstructor( + optim_wrapper_cfg, stagewise_paramwise_cfg) + optim_constructor(model) + + # Test lr wd for MAE + backbone = ToyMAE() + model = PseudoDataParallel(ToySegmentor(backbone)) + + layerwise_paramwise_cfg = dict( + decay_rate=decay_rate, decay_type='layer_wise', num_layers=3) + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=optimizer_cfg, + paramwise_cfg=layerwise_paramwise_cfg, + constructor='LearningRateDecayOptimizerConstructor') + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + check_optimizer_lr_wd(optim_wrapper.optimizer, + expected_layer_wise_wd_lr_beit) + + +def test_beit_layer_decay_optimizer_constructor(): + + # paramwise_cfg with BEiTExampleModel + backbone = ToyBEiT() + model = PseudoDataParallel(ToySegmentor(backbone)) + paramwise_cfg = dict(layer_decay_rate=2, num_layers=3) + optim_wrapper_cfg = dict( + type='OptimWrapper', + constructor='LayerDecayOptimizerConstructor', + paramwise_cfg=paramwise_cfg, + optimizer=dict( + type='AdamW', lr=1, betas=(0.9, 0.999), weight_decay=0.05)) + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + # optimizer = optim_wrapper_builder(model) + check_optimizer_lr_wd(optim_wrapper.optimizer, + expected_layer_wise_wd_lr_beit) diff --git a/tests/test_engine/test_optimizer.py b/tests/test_engine/test_optimizer.py new file mode 100644 index 0000000000..af69f5fcbc --- /dev/null +++ b/tests/test_engine/test_optimizer.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmengine.optim import build_optim_wrapper + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.param1 = nn.Parameter(torch.ones(1)) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(4, 2, kernel_size=1) + self.bn = nn.BatchNorm2d(2) + + def forward(self, x): + return x + + +base_lr = 0.01 +base_wd = 0.0001 +momentum = 0.9 + + +def test_build_optimizer(): + model = ExampleModel() + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)) + optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) + # test whether optimizer is successfully built from parent. + assert isinstance(optim_wrapper.optimizer, torch.optim.SGD) diff --git a/tests/test_engine/test_visualization_hook.py b/tests/test_engine/test_visualization_hook.py new file mode 100644 index 0000000000..274b0e547f --- /dev/null +++ b/tests/test_engine/test_visualization_hook.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import Mock + +import torch +from mmengine.structures import PixelData + +from mmseg.engine.hooks import SegVisualizationHook +from mmseg.structures import SegDataSample +from mmseg.visualization import SegLocalVisualizer + + +class TestVisualizationHook(TestCase): + + def setUp(self) -> None: + + h = 288 + w = 512 + num_class = 2 + + SegLocalVisualizer.get_instance('visualizer') + SegLocalVisualizer.dataset_meta = dict( + classes=('background', 'foreground'), + palette=[[120, 120, 120], [6, 230, 230]]) + + data_sample = SegDataSample() + data_sample.set_metainfo({'img_path': 'tests/data/color.jpg'}) + self.data_batch = [{'data_sample': data_sample}] * 2 + + pred_sem_seg_data = dict(data=torch.randint(0, num_class, (1, h, w))) + pred_sem_seg = PixelData(**pred_sem_seg_data) + pred_seg_data_sample = SegDataSample() + pred_seg_data_sample.set_metainfo({'img_path': 'tests/data/color.jpg'}) + pred_seg_data_sample.pred_sem_seg = pred_sem_seg + self.outputs = [pred_seg_data_sample] * 2 + + def test_after_iter(self): + runner = Mock() + runner.iter = 1 + hook = SegVisualizationHook(draw=True, interval=1) + hook._after_iter( + runner, 1, self.data_batch, self.outputs, mode='train') + hook._after_iter(runner, 1, self.data_batch, self.outputs, mode='val') + hook._after_iter(runner, 1, self.data_batch, self.outputs, mode='test') + + def test_after_val_iter(self): + runner = Mock() + runner.iter = 2 + hook = SegVisualizationHook(interval=1) + hook.after_val_iter(runner, 1, self.data_batch, self.outputs) + + hook = SegVisualizationHook(draw=True, interval=1) + hook.after_val_iter(runner, 1, self.data_batch, self.outputs) + + hook = SegVisualizationHook( + draw=True, interval=1, show=True, wait_time=1) + hook.after_val_iter(runner, 1, self.data_batch, self.outputs) + + def test_after_test_iter(self): + runner = Mock() + runner.iter = 3 + hook = SegVisualizationHook(draw=True, interval=1) + hook.after_test_iter(runner, 1, self.data_batch, self.outputs) diff --git a/tests/test_eval_hook.py b/tests/test_eval_hook.py deleted file mode 100644 index 84542ecfe3..0000000000 --- a/tests/test_eval_hook.py +++ /dev/null @@ -1,118 +0,0 @@ -import logging -import tempfile -from unittest.mock import MagicMock, patch - -import mmcv.runner -import pytest -import torch -import torch.nn as nn -from mmcv.runner import obj_from_dict -from torch.utils.data import DataLoader, Dataset - -from mmseg.apis import single_gpu_test -from mmseg.core import DistEvalHook, EvalHook - - -class ExampleDataset(Dataset): - - def __getitem__(self, idx): - results = dict(img=torch.tensor([1]), img_metas=dict()) - return results - - def __len__(self): - return 1 - - -class ExampleModel(nn.Module): - - def __init__(self): - super(ExampleModel, self).__init__() - self.test_cfg = None - self.conv = nn.Conv2d(3, 3, 3) - - def forward(self, img, img_metas, test_mode=False, **kwargs): - return img - - def train_step(self, data_batch, optimizer): - loss = self.forward(**data_batch) - return dict(loss=loss) - - -def test_eval_hook(): - with pytest.raises(TypeError): - test_dataset = ExampleModel() - data_loader = [ - DataLoader( - test_dataset, - batch_size=1, - sampler=None, - num_worker=0, - shuffle=False) - ] - EvalHook(data_loader) - - test_dataset = ExampleDataset() - test_dataset.evaluate = MagicMock(return_value=dict(test='success')) - loader = DataLoader(test_dataset, batch_size=1) - model = ExampleModel() - data_loader = DataLoader( - test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) - optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) - optimizer = obj_from_dict(optim_cfg, torch.optim, - dict(params=model.parameters())) - - # test EvalHook - with tempfile.TemporaryDirectory() as tmpdir: - eval_hook = EvalHook(data_loader) - runner = mmcv.runner.IterBasedRunner( - model=model, - optimizer=optimizer, - work_dir=tmpdir, - logger=logging.getLogger()) - runner.register_hook(eval_hook) - runner.run([loader], [('train', 1)], 1) - test_dataset.evaluate.assert_called_with([torch.tensor([1])], - logger=runner.logger) - - -def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): - results = single_gpu_test(model, data_loader) - return results - - -@patch('mmseg.apis.multi_gpu_test', multi_gpu_test) -def test_dist_eval_hook(): - with pytest.raises(TypeError): - test_dataset = ExampleModel() - data_loader = [ - DataLoader( - test_dataset, - batch_size=1, - sampler=None, - num_worker=0, - shuffle=False) - ] - DistEvalHook(data_loader) - - test_dataset = ExampleDataset() - test_dataset.evaluate = MagicMock(return_value=dict(test='success')) - loader = DataLoader(test_dataset, batch_size=1) - model = ExampleModel() - data_loader = DataLoader( - test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) - optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) - optimizer = obj_from_dict(optim_cfg, torch.optim, - dict(params=model.parameters())) - - # test DistEvalHook - with tempfile.TemporaryDirectory() as tmpdir: - eval_hook = DistEvalHook(data_loader) - runner = mmcv.runner.IterBasedRunner( - model=model, - optimizer=optimizer, - work_dir=tmpdir, - logger=logging.getLogger()) - runner.register_hook(eval_hook) - runner.run([loader], [('train', 1)], 1) - test_dataset.evaluate.assert_called_with([torch.tensor([1])], - logger=runner.logger) diff --git a/tests/test_evaluation/test_metrics/test_citys_metric.py b/tests/test_evaluation/test_metrics/test_citys_metric.py new file mode 100644 index 0000000000..a6d6db5caa --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_citys_metric.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import BaseDataElement, PixelData + +from mmseg.evaluation import CitysMetric +from mmseg.structures import SegDataSample + + +class TestCitysMetric(TestCase): + + def _demo_mm_inputs(self, + batch_size=1, + image_shapes=(3, 128, 256), + num_classes=5): + """Create a superset of inputs needed to run test or train batches. + + Args: + batch_size (int): batch size. Default to 2. + image_shapes (List[tuple], Optional): image shape. + Default to (3, 64, 64) + num_classes (int): number of different classes. + Default to 5. + """ + if isinstance(image_shapes, list): + assert len(image_shapes) == batch_size + else: + image_shapes = [image_shapes] * batch_size + + packed_inputs = [] + for idx in range(batch_size): + image_shape = image_shapes[idx] + _, h, w = image_shape + + mm_inputs = dict() + data_sample = SegDataSample() + gt_semantic_seg = np.random.randint( + 0, num_classes, (1, h, w), dtype=np.uint8) + gt_semantic_seg = torch.LongTensor(gt_semantic_seg) + gt_sem_seg_data = dict(data=gt_semantic_seg) + data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + mm_inputs['data_sample'] = data_sample.to_dict() + mm_inputs['data_sample']['seg_map_path'] = \ + 'tests/data/pseudo_cityscapes_dataset/gtFine/val/\ + frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png' + + mm_inputs['seg_map_path'] = mm_inputs['data_sample'][ + 'seg_map_path'] + packed_inputs.append(mm_inputs) + + return packed_inputs + + def _demo_mm_model_output(self, + batch_size=1, + image_shapes=(3, 128, 256), + num_classes=5): + """Create a superset of inputs needed to run test or train batches. + + Args: + batch_size (int): batch size. Default to 2. + image_shapes (List[tuple], Optional): image shape. + Default to (3, 64, 64) + num_classes (int): number of different classes. + Default to 5. + """ + results_dict = dict() + _, h, w = image_shapes + seg_logit = torch.randn(batch_size, num_classes, h, w) + results_dict['seg_logits'] = seg_logit + seg_pred = np.random.randint( + 0, num_classes, (batch_size, h, w), dtype=np.uint8) + seg_pred = torch.LongTensor(seg_pred) + results_dict['pred_sem_seg'] = seg_pred + + batch_datasampes = [ + SegDataSample() + for _ in range(results_dict['pred_sem_seg'].shape[0]) + ] + for key, value in results_dict.items(): + for i in range(value.shape[0]): + setattr(batch_datasampes[i], key, PixelData(data=value[i])) + + _predictions = [] + for pred in batch_datasampes: + if isinstance(pred, BaseDataElement): + test_data = pred.to_dict() + test_data['img_path'] = \ + 'tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/\ + frankfurt/frankfurt_000000_000294_leftImg8bit.png' + + _predictions.append(test_data) + else: + _predictions.append(pred) + return _predictions + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + + data_batch = self._demo_mm_inputs(2) + predictions = self._demo_mm_model_output(2) + data_samples = [ + dict(**data, **result) + for data, result in zip(data_batch, predictions) + ] + iou_metric = CitysMetric(citys_metrics=['cityscapes']) + iou_metric.process(data_batch, data_samples) + res = iou_metric.evaluate(6) + self.assertIsInstance(res, dict) + # test to_label_id = True + iou_metric = CitysMetric( + citys_metrics=['cityscapes'], to_label_id=True) + iou_metric.process(data_batch, data_samples) + res = iou_metric.evaluate(6) + self.assertIsInstance(res, dict) + import shutil + shutil.rmtree('.format_cityscapes') diff --git a/tests/test_evaluation/test_metrics/test_iou_metric.py b/tests/test_evaluation/test_metrics/test_iou_metric.py new file mode 100644 index 0000000000..a0bc922c31 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_iou_metric.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import PixelData + +from mmseg.evaluation import IoUMetric +from mmseg.structures import SegDataSample + + +class TestIoUMetric(TestCase): + + def _demo_mm_inputs(self, + batch_size=2, + image_shapes=(3, 64, 64), + num_classes=5): + """Create a superset of inputs needed to run test or train batches. + + Args: + batch_size (int): batch size. Default to 2. + image_shapes (List[tuple], Optional): image shape. + Default to (3, 64, 64) + num_classes (int): number of different classes. + Default to 5. + """ + if isinstance(image_shapes, list): + assert len(image_shapes) == batch_size + else: + image_shapes = [image_shapes] * batch_size + + data_samples = [] + for idx in range(batch_size): + image_shape = image_shapes[idx] + _, h, w = image_shape + + data_sample = SegDataSample() + gt_semantic_seg = np.random.randint( + 0, num_classes, (1, h, w), dtype=np.uint8) + gt_semantic_seg = torch.LongTensor(gt_semantic_seg) + gt_sem_seg_data = dict(data=gt_semantic_seg) + data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + + data_samples.append(data_sample.to_dict()) + + return data_samples + + def _demo_mm_model_output(self, + data_samples, + batch_size=2, + image_shapes=(3, 64, 64), + num_classes=5): + + _, h, w = image_shapes + + for data_sample in data_samples: + data_sample['seg_logits'] = dict( + data=torch.randn(num_classes, h, w)) + data_sample['pred_sem_seg'] = dict( + data=torch.randint(0, num_classes, (1, h, w))) + return data_samples + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + + data_samples = self._demo_mm_inputs() + data_samples = self._demo_mm_model_output(data_samples) + + iou_metric = IoUMetric(iou_metrics=['mIoU']) + iou_metric.dataset_meta = dict( + classes=['wall', 'building', 'sky', 'floor', 'tree'], + label_map=dict(), + reduce_zero_label=False) + iou_metric.process([0] * len(data_samples), data_samples) + res = iou_metric.evaluate(6) + self.assertIsInstance(res, dict) diff --git a/tests/test_mean_iou.py b/tests/test_mean_iou.py deleted file mode 100644 index 48a3df8e4c..0000000000 --- a/tests/test_mean_iou.py +++ /dev/null @@ -1,56 +0,0 @@ -import numpy as np - -from mmseg.core.evaluation import mean_iou - - -def get_confusion_matrix(pred_label, label, num_classes, ignore_index): - """Intersection over Union - Args: - pred_label (np.ndarray): 2D predict map - label (np.ndarray): label 2D label map - num_classes (int): number of categories - ignore_index (int): index ignore in evaluation - """ - - mask = (label != ignore_index) - pred_label = pred_label[mask] - label = label[mask] - - n = num_classes - inds = n * label + pred_label - - mat = np.bincount(inds, minlength=n**2).reshape(n, n) - - return mat - - -# This func is deprecated since it's not memory efficient -def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index): - num_imgs = len(results) - assert len(gt_seg_maps) == num_imgs - total_mat = np.zeros((num_classes, num_classes), dtype=np.float) - for i in range(num_imgs): - mat = get_confusion_matrix( - results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index) - total_mat += mat - all_acc = np.diag(total_mat).sum() / total_mat.sum() - acc = np.diag(total_mat) / total_mat.sum(axis=1) - iou = np.diag(total_mat) / ( - total_mat.sum(axis=1) + total_mat.sum(axis=0) - np.diag(total_mat)) - - return all_acc, acc, iou - - -def test_mean_iou(): - pred_size = (10, 30, 30) - num_classes = 19 - ignore_index = 255 - results = np.random.randint(0, num_classes, size=pred_size) - label = np.random.randint(0, num_classes, size=pred_size) - label[:, 2, 5:10] = ignore_index - all_acc, acc, iou = mean_iou(results, label, num_classes, ignore_index) - all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes, - ignore_index) - assert all_acc == all_acc_l - assert np.allclose(acc, acc_l) - assert np.allclose(iou, iou_l) diff --git a/tests/test_models/__init__.py b/tests/test_models/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_backbone.py b/tests/test_models/test_backbone.py deleted file mode 100644 index 00ae43d009..0000000000 --- a/tests/test_models/test_backbone.py +++ /dev/null @@ -1,666 +0,0 @@ -import pytest -import torch -from mmcv.ops import DeformConv2dPack -from mmcv.utils.parrots_wrapper import _BatchNorm -from torch.nn.modules import AvgPool2d, GroupNorm - -from mmseg.models.backbones import ResNet, ResNetV1d, ResNeXt -from mmseg.models.backbones.resnet import BasicBlock, Bottleneck -from mmseg.models.backbones.resnext import Bottleneck as BottleneckX -from mmseg.models.utils import ResLayer - - -def is_block(modules): - """Check if is ResNet building block.""" - if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX)): - return True - return False - - -def is_norm(modules): - """Check if is one of the norms.""" - if isinstance(modules, (GroupNorm, _BatchNorm)): - return True - return False - - -def all_zeros(modules): - """Check if the weight(and bias) is all zero.""" - weight_zero = torch.allclose(modules.weight.data, - torch.zeros_like(modules.weight.data)) - if hasattr(modules, 'bias'): - bias_zero = torch.allclose(modules.bias.data, - torch.zeros_like(modules.bias.data)) - else: - bias_zero = True - - return weight_zero and bias_zero - - -def check_norm_state(modules, train_state): - """Check if norm layer is in correct train state.""" - for mod in modules: - if isinstance(mod, _BatchNorm): - if mod.training != train_state: - return False - return True - - -def test_resnet_basic_block(): - - with pytest.raises(AssertionError): - # Not implemented yet. - dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) - BasicBlock(64, 64, dcn=dcn) - - with pytest.raises(AssertionError): - # Not implemented yet. - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv3') - ] - BasicBlock(64, 64, plugins=plugins) - - with pytest.raises(AssertionError): - # Not implemented yet - plugins = [ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - position='after_conv2') - ] - BasicBlock(64, 64, plugins=plugins) - - # Test BasicBlock with checkpoint forward - block = BasicBlock(16, 16, with_cp=True) - assert block.with_cp - x = torch.randn(1, 16, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 16, 56, 56]) - - # test BasicBlock structure and forward - block = BasicBlock(64, 64) - assert block.conv1.in_channels == 64 - assert block.conv1.out_channels == 64 - assert block.conv1.kernel_size == (3, 3) - assert block.conv2.in_channels == 64 - assert block.conv2.out_channels == 64 - assert block.conv2.kernel_size == (3, 3) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - -def test_resnet_bottleneck(): - - with pytest.raises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - Bottleneck(64, 64, style='tensorflow') - - with pytest.raises(AssertionError): - # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3' - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv4') - ] - Bottleneck(64, 16, plugins=plugins) - - with pytest.raises(AssertionError): - # Need to specify different postfix to avoid duplicate plugin name - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv3'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv3') - ] - Bottleneck(64, 16, plugins=plugins) - - with pytest.raises(KeyError): - # Plugin type is not supported - plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')] - Bottleneck(64, 16, plugins=plugins) - - # Test Bottleneck with checkpoint forward - block = Bottleneck(64, 16, with_cp=True) - assert block.with_cp - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test Bottleneck style - block = Bottleneck(64, 64, stride=2, style='pytorch') - assert block.conv1.stride == (1, 1) - assert block.conv2.stride == (2, 2) - block = Bottleneck(64, 64, stride=2, style='caffe') - assert block.conv1.stride == (2, 2) - assert block.conv2.stride == (1, 1) - - # Test Bottleneck DCN - dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) - with pytest.raises(AssertionError): - Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv')) - block = Bottleneck(64, 64, dcn=dcn) - assert isinstance(block.conv2, DeformConv2dPack) - - # Test Bottleneck forward - block = Bottleneck(64, 16) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test Bottleneck with 1 ContextBlock after conv3 - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv3') - ] - block = Bottleneck(64, 16, plugins=plugins) - assert block.context_block.in_channels == 64 - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test Bottleneck with 1 GeneralizedAttention after conv2 - plugins = [ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - position='after_conv2') - ] - block = Bottleneck(64, 16, plugins=plugins) - assert block.gen_attention_block.in_channels == 16 - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2d - # after conv2, 1 ContextBlock after conv3 - plugins = [ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - position='after_conv2'), - dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - position='after_conv3') - ] - block = Bottleneck(64, 16, plugins=plugins) - assert block.gen_attention_block.in_channels == 16 - assert block.nonlocal_block.in_channels == 16 - assert block.context_block.in_channels == 64 - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after - # conv3 - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), - position='after_conv2'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), - position='after_conv3'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3), - position='after_conv3') - ] - block = Bottleneck(64, 16, plugins=plugins) - assert block.context_block1.in_channels == 16 - assert block.context_block2.in_channels == 64 - assert block.context_block3.in_channels == 64 - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - -def test_resnet_res_layer(): - # Test ResLayer of 3 Bottleneck w\o downsample - layer = ResLayer(Bottleneck, 64, 16, 3) - assert len(layer) == 3 - assert layer[0].conv1.in_channels == 64 - assert layer[0].conv1.out_channels == 16 - for i in range(1, len(layer)): - assert layer[i].conv1.in_channels == 64 - assert layer[i].conv1.out_channels == 16 - for i in range(len(layer)): - assert layer[i].downsample is None - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test ResLayer of 3 Bottleneck with downsample - layer = ResLayer(Bottleneck, 64, 64, 3) - assert layer[0].downsample[0].out_channels == 256 - for i in range(1, len(layer)): - assert layer[i].downsample is None - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 256, 56, 56]) - - # Test ResLayer of 3 Bottleneck with stride=2 - layer = ResLayer(Bottleneck, 64, 64, 3, stride=2) - assert layer[0].downsample[0].out_channels == 256 - assert layer[0].downsample[0].stride == (2, 2) - for i in range(1, len(layer)): - assert layer[i].downsample is None - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 256, 28, 28]) - - # Test ResLayer of 3 Bottleneck with stride=2 and average downsample - layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True) - assert isinstance(layer[0].downsample[0], AvgPool2d) - assert layer[0].downsample[1].out_channels == 256 - assert layer[0].downsample[1].stride == (1, 1) - for i in range(1, len(layer)): - assert layer[i].downsample is None - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 256, 28, 28]) - - # Test ResLayer of 3 Bottleneck with dilation=2 - layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2) - for i in range(len(layer)): - assert layer[i].conv2.dilation == (2, 2) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test ResLayer of 3 Bottleneck with dilation=2, contract_dilation=True - layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, contract_dilation=True) - assert layer[0].conv2.dilation == (1, 1) - for i in range(1, len(layer)): - assert layer[i].conv2.dilation == (2, 2) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - # Test ResLayer of 3 Bottleneck with dilation=2, multi_grid - layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, multi_grid=(1, 2, 4)) - assert layer[0].conv2.dilation == (1, 1) - assert layer[1].conv2.dilation == (2, 2) - assert layer[2].conv2.dilation == (4, 4) - x = torch.randn(1, 64, 56, 56) - x_out = layer(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - -def test_resnet_backbone(): - """Test resnet backbone.""" - with pytest.raises(KeyError): - # ResNet depth should be in [18, 34, 50, 101, 152] - ResNet(20) - - with pytest.raises(AssertionError): - # In ResNet: 1 <= num_stages <= 4 - ResNet(50, num_stages=0) - - with pytest.raises(AssertionError): - # len(stage_with_dcn) == num_stages - dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) - ResNet(50, dcn=dcn, stage_with_dcn=(True, )) - - with pytest.raises(AssertionError): - # len(stage_with_plugin) == num_stages - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True), - position='after_conv3') - ] - ResNet(50, plugins=plugins) - - with pytest.raises(AssertionError): - # In ResNet: 1 <= num_stages <= 4 - ResNet(50, num_stages=5) - - with pytest.raises(AssertionError): - # len(strides) == len(dilations) == num_stages - ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) - - with pytest.raises(TypeError): - # pretrained must be a string path - model = ResNet(50) - model.init_weights(pretrained=0) - - with pytest.raises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - ResNet(50, style='tensorflow') - - # Test ResNet50 norm_eval=True - model = ResNet(50, norm_eval=True) - model.init_weights() - model.train() - assert check_norm_state(model.modules(), False) - - # Test ResNet50 with torchvision pretrained weight - model = ResNet(depth=50, norm_eval=True) - model.init_weights('torchvision://resnet50') - model.train() - assert check_norm_state(model.modules(), False) - - # Test ResNet50 with first stage frozen - frozen_stages = 1 - model = ResNet(50, frozen_stages=frozen_stages) - model.init_weights() - model.train() - assert model.norm1.training is False - for layer in [model.conv1, model.norm1]: - for param in layer.parameters(): - assert param.requires_grad is False - for i in range(1, frozen_stages + 1): - layer = getattr(model, 'layer{}'.format(i)) - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - assert mod.training is False - for param in layer.parameters(): - assert param.requires_grad is False - - # Test ResNet50V1d with first stage frozen - model = ResNetV1d(depth=50, frozen_stages=frozen_stages) - assert len(model.stem) == 9 - model.init_weights() - model.train() - check_norm_state(model.stem, False) - for param in model.stem.parameters(): - assert param.requires_grad is False - for i in range(1, frozen_stages + 1): - layer = getattr(model, 'layer{}'.format(i)) - for mod in layer.modules(): - if isinstance(mod, _BatchNorm): - assert mod.training is False - for param in layer.parameters(): - assert param.requires_grad is False - - # Test ResNet18 forward - model = ResNet(18) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 64, 56, 56]) - assert feat[1].shape == torch.Size([1, 128, 28, 28]) - assert feat[2].shape == torch.Size([1, 256, 14, 14]) - assert feat[3].shape == torch.Size([1, 512, 7, 7]) - - # Test ResNet50 with BatchNorm forward - model = ResNet(50) - for m in model.modules(): - if is_norm(m): - assert isinstance(m, _BatchNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNet50 with layers 1, 2, 3 out forward - model = ResNet(50, out_indices=(0, 1, 2)) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 3 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - - # Test ResNet18 with checkpoint forward - model = ResNet(18, with_cp=True) - for m in model.modules(): - if is_block(m): - assert m.with_cp - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 64, 56, 56]) - assert feat[1].shape == torch.Size([1, 128, 28, 28]) - assert feat[2].shape == torch.Size([1, 256, 14, 14]) - assert feat[3].shape == torch.Size([1, 512, 7, 7]) - - # Test ResNet50 with checkpoint forward - model = ResNet(50, with_cp=True) - for m in model.modules(): - if is_block(m): - assert m.with_cp - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNet50 with GroupNorm forward - model = ResNet( - 50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)) - for m in model.modules(): - if is_norm(m): - assert isinstance(m, GroupNorm) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2d - # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4 - plugins = [ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - stages=(False, True, True, True), - position='after_conv2'), - dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, False), - position='after_conv3') - ] - model = ResNet(50, plugins=plugins) - for m in model.layer1.modules(): - if is_block(m): - assert not hasattr(m, 'context_block') - assert not hasattr(m, 'gen_attention_block') - assert m.nonlocal_block.in_channels == 64 - for m in model.layer2.modules(): - if is_block(m): - assert m.nonlocal_block.in_channels == 128 - assert m.gen_attention_block.in_channels == 128 - assert m.context_block.in_channels == 512 - - for m in model.layer3.modules(): - if is_block(m): - assert m.nonlocal_block.in_channels == 256 - assert m.gen_attention_block.in_channels == 256 - assert m.context_block.in_channels == 1024 - - for m in model.layer4.modules(): - if is_block(m): - assert m.nonlocal_block.in_channels == 512 - assert m.gen_attention_block.in_channels == 512 - assert not hasattr(m, 'context_block') - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after - # conv3 in layers 2, 3, 4 - plugins = [ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), - stages=(False, True, True, False), - position='after_conv3'), - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), - stages=(False, True, True, False), - position='after_conv3') - ] - - model = ResNet(50, plugins=plugins) - for m in model.layer1.modules(): - if is_block(m): - assert not hasattr(m, 'context_block') - assert not hasattr(m, 'context_block1') - assert not hasattr(m, 'context_block2') - for m in model.layer2.modules(): - if is_block(m): - assert not hasattr(m, 'context_block') - assert m.context_block1.in_channels == 512 - assert m.context_block2.in_channels == 512 - - for m in model.layer3.modules(): - if is_block(m): - assert not hasattr(m, 'context_block') - assert m.context_block1.in_channels == 1024 - assert m.context_block2.in_channels == 1024 - - for m in model.layer4.modules(): - if is_block(m): - assert not hasattr(m, 'context_block') - assert not hasattr(m, 'context_block1') - assert not hasattr(m, 'context_block2') - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNet50 zero initialization of residual - model = ResNet(50, zero_init_residual=True) - model.init_weights() - for m in model.modules(): - if isinstance(m, Bottleneck): - assert all_zeros(m.norm3) - elif isinstance(m, BasicBlock): - assert all_zeros(m.norm2) - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - # Test ResNetV1d forward - model = ResNetV1d(depth=50) - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) - - -def test_renext_bottleneck(): - with pytest.raises(AssertionError): - # Style must be in ['pytorch', 'caffe'] - BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow') - - # Test ResNeXt Bottleneck structure - block = BottleneckX( - 64, 64, groups=32, base_width=4, stride=2, style='pytorch') - assert block.conv2.stride == (2, 2) - assert block.conv2.groups == 32 - assert block.conv2.out_channels == 128 - - # Test ResNeXt Bottleneck with DCN - dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) - with pytest.raises(AssertionError): - # conv_cfg must be None if dcn is not None - BottleneckX( - 64, - 64, - groups=32, - base_width=4, - dcn=dcn, - conv_cfg=dict(type='Conv')) - BottleneckX(64, 64, dcn=dcn) - - # Test ResNeXt Bottleneck forward - block = BottleneckX(64, 16, groups=32, base_width=4) - x = torch.randn(1, 64, 56, 56) - x_out = block(x) - assert x_out.shape == torch.Size([1, 64, 56, 56]) - - -def test_resnext_backbone(): - with pytest.raises(KeyError): - # ResNeXt depth should be in [50, 101, 152] - ResNeXt(depth=18) - - # Test ResNeXt with group 32, base_width 4 - model = ResNeXt(depth=50, groups=32, base_width=4) - print(model) - for m in model.modules(): - if is_block(m): - assert m.conv2.groups == 32 - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 224, 224) - feat = model(imgs) - assert len(feat) == 4 - assert feat[0].shape == torch.Size([1, 256, 56, 56]) - assert feat[1].shape == torch.Size([1, 512, 28, 28]) - assert feat[2].shape == torch.Size([1, 1024, 14, 14]) - assert feat[3].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/__init__.py b/tests/test_models/test_backbones/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/test_backbones/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_backbones/test_beit.py b/tests/test_models/test_backbones/test_beit.py new file mode 100644 index 0000000000..59a12c5d09 --- /dev/null +++ b/tests/test_models/test_backbones/test_beit.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones.beit import BEiT +from .utils import check_norm_state + + +def test_beit_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = BEiT() + model.init_weights(pretrained=0) + + with pytest.raises(TypeError): + # img_size must be int or tuple + model = BEiT(img_size=512.0) + + with pytest.raises(TypeError): + # out_indices must be int ,list or tuple + model = BEiT(out_indices=1.) + + with pytest.raises(AssertionError): + # The length of img_size tuple must be lower than 3. + BEiT(img_size=(224, 224, 224)) + + with pytest.raises(TypeError): + # Pretrained must be None or Str. + BEiT(pretrained=123) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = BEiT(img_size=(224, )) + model.init_weights() + model(imgs) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = BEiT(img_size=(224, 224)) + model(imgs) + + # Test norm_eval = True + model = BEiT(norm_eval=True) + model.train() + + # Test BEiT backbone with input size of 224 and patch size of 16 + model = BEiT() + model.init_weights() + model.train() + + # Test qv_bias + model = BEiT(qv_bias=False) + model.train() + + # Test out_indices = list + model = BEiT(out_indices=[2, 4, 8, 12]) + model.train() + + assert check_norm_state(model.modules(), True) + + # Test image size = (224, 224) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test BEiT backbone with input size of 256 and patch size of 16 + model = BEiT(img_size=(256, 256)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 16, 16) + + # Test BEiT backbone with input size of 32 and patch size of 16 + model = BEiT(img_size=(32, 32)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 2, 2) + + # Test unbalanced size input image + model = BEiT(img_size=(112, 224)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 112, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 7, 14) + + # Test irregular input image + model = BEiT(img_size=(234, 345)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 234, 345) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 21) + + # Test init_values=0 + model = BEiT(init_values=0) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test final norm + model = BEiT(final_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test patch norm + model = BEiT(patch_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + +def test_beit_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = BEiT(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = BEiT( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # test resize_rel_pos_embed + value = torch.randn(732, 16) + ckpt = { + 'state_dict': { + 'layers.0.attn.relative_position_index': 0, + 'layers.0.attn.relative_position_bias_table': value + } + } + model = BEiT(img_size=(512, 512)) + # If scipy is installed, this AttributeError would not be raised. + from mmengine.utils import is_installed + if not is_installed('scipy'): + with pytest.raises(AttributeError): + model.resize_rel_pos_embed(ckpt) + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = BEiT(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = BEiT(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = BEiT( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + model = BEiT(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + model = BEiT(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = BEiT( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + model = BEiT(pretrained=123, init_cfg=123) diff --git a/tests/test_models/test_backbones/test_bisenetv1.py b/tests/test_models/test_backbones/test_bisenetv1.py new file mode 100644 index 0000000000..c0677493d6 --- /dev/null +++ b/tests/test_models/test_backbones/test_bisenetv1.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import BiSeNetV1 +from mmseg.models.backbones.bisenetv1 import (AttentionRefinementModule, + ContextPath, FeatureFusionModule, + SpatialPath) + + +def test_bisenetv1_backbone(): + # Test BiSeNetV1 Standard Forward + backbone_cfg = dict( + type='ResNet', + in_channels=3, + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_eval=False, + style='pytorch', + contract_dilation=True) + model = BiSeNetV1(in_channels=3, backbone_cfg=backbone_cfg) + model.init_weights() + model.train() + batch_size = 2 + imgs = torch.randn(batch_size, 3, 64, 128) + feat = model(imgs) + + assert len(feat) == 3 + # output for segment Head + assert feat[0].shape == torch.Size([batch_size, 256, 8, 16]) + # for auxiliary head 1 + assert feat[1].shape == torch.Size([batch_size, 128, 8, 16]) + # for auxiliary head 2 + assert feat[2].shape == torch.Size([batch_size, 128, 4, 8]) + + # Test input with rare shape + batch_size = 2 + imgs = torch.randn(batch_size, 3, 95, 27) + feat = model(imgs) + assert len(feat) == 3 + + with pytest.raises(AssertionError): + # BiSeNetV1 spatial path channel constraints. + BiSeNetV1( + backbone_cfg=backbone_cfg, + in_channels=3, + spatial_channels=(16, 16, 16)) + + with pytest.raises(AssertionError): + # BiSeNetV1 context path constraints. + BiSeNetV1( + backbone_cfg=backbone_cfg, + in_channels=3, + context_channels=(16, 32, 64, 128)) + + +def test_bisenetv1_spatial_path(): + with pytest.raises(AssertionError): + # BiSeNetV1 spatial path channel constraints. + SpatialPath(num_channels=(16, 16, 16), in_channels=3) + + +def test_bisenetv1_context_path(): + backbone_cfg = dict( + type='ResNet', + in_channels=3, + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_eval=False, + style='pytorch', + contract_dilation=True) + + with pytest.raises(AssertionError): + # BiSeNetV1 context path constraints. + ContextPath( + backbone_cfg=backbone_cfg, context_channels=(16, 32, 64, 128)) + + +def test_bisenetv1_attention_refinement_module(): + x_arm = AttentionRefinementModule(32, 8) + assert x_arm.conv_layer.in_channels == 32 + assert x_arm.conv_layer.out_channels == 8 + assert x_arm.conv_layer.kernel_size == (3, 3) + x = torch.randn(2, 32, 8, 16) + x_out = x_arm(x) + assert x_out.shape == torch.Size([2, 8, 8, 16]) + + +def test_bisenetv1_feature_fusion_module(): + ffm = FeatureFusionModule(16, 32) + assert ffm.conv1.in_channels == 16 + assert ffm.conv1.out_channels == 32 + assert ffm.conv1.kernel_size == (1, 1) + assert ffm.gap.output_size == (1, 1) + assert ffm.conv_atten[0].in_channels == 32 + assert ffm.conv_atten[0].out_channels == 32 + assert ffm.conv_atten[0].kernel_size == (1, 1) + + ffm = FeatureFusionModule(16, 16) + x1 = torch.randn(2, 8, 8, 16) + x2 = torch.randn(2, 8, 8, 16) + x_out = ffm(x1, x2) + assert x_out.shape == torch.Size([2, 16, 8, 16]) diff --git a/tests/test_models/test_backbones/test_bisenetv2.py b/tests/test_models/test_backbones/test_bisenetv2.py new file mode 100644 index 0000000000..cf2dfb3253 --- /dev/null +++ b/tests/test_models/test_backbones/test_bisenetv2.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import ConvModule + +from mmseg.models.backbones import BiSeNetV2 +from mmseg.models.backbones.bisenetv2 import (BGALayer, DetailBranch, + SemanticBranch) + + +def test_bisenetv2_backbone(): + # Test BiSeNetV2 Standard Forward + model = BiSeNetV2() + model.init_weights() + model.train() + batch_size = 2 + imgs = torch.randn(batch_size, 3, 128, 256) + feat = model(imgs) + + assert len(feat) == 5 + # output for segment Head + assert feat[0].shape == torch.Size([batch_size, 128, 16, 32]) + # for auxiliary head 1 + assert feat[1].shape == torch.Size([batch_size, 16, 32, 64]) + # for auxiliary head 2 + assert feat[2].shape == torch.Size([batch_size, 32, 16, 32]) + # for auxiliary head 3 + assert feat[3].shape == torch.Size([batch_size, 64, 8, 16]) + # for auxiliary head 4 + assert feat[4].shape == torch.Size([batch_size, 128, 4, 8]) + + # Test input with rare shape + batch_size = 2 + imgs = torch.randn(batch_size, 3, 95, 27) + feat = model(imgs) + assert len(feat) == 5 + + +def test_bisenetv2_DetailBranch(): + x = torch.randn(1, 3, 32, 64) + detail_branch = DetailBranch(detail_channels=(64, 16, 32)) + assert isinstance(detail_branch.detail_branch[0][0], ConvModule) + x_out = detail_branch(x) + assert x_out.shape == torch.Size([1, 32, 4, 8]) + + +def test_bisenetv2_SemanticBranch(): + semantic_branch = SemanticBranch(semantic_channels=(16, 32, 64, 128)) + assert semantic_branch.stage1.pool.stride == 2 + + +def test_bisenetv2_BGALayer(): + x_a = torch.randn(1, 8, 8, 16) + x_b = torch.randn(1, 8, 2, 4) + bga = BGALayer(out_channels=8) + assert isinstance(bga.conv, ConvModule) + x_out = bga(x_a, x_b) + assert x_out.shape == torch.Size([1, 8, 8, 16]) diff --git a/tests/test_models/test_backbones/test_blocks.py b/tests/test_models/test_backbones/test_blocks.py new file mode 100644 index 0000000000..7a65d272cf --- /dev/null +++ b/tests/test_models/test_backbones/test_blocks.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import pytest +import torch +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + +from mmseg.models.utils import (InvertedResidual, InvertedResidualV3, SELayer, + make_divisible) + + +def test_make_divisible(): + # test with min_value = None + assert make_divisible(10, 4) == 12 + assert make_divisible(9, 4) == 12 + assert make_divisible(1, 4) == 4 + + # test with min_value = 8 + assert make_divisible(10, 4, 8) == 12 + assert make_divisible(9, 4, 8) == 12 + assert make_divisible(1, 4, 8) == 8 + + +def test_inv_residual(): + with pytest.raises(AssertionError): + # test stride assertion. + InvertedResidual(32, 32, 3, 4) + + # test default config with res connection. + # set expand_ratio = 4, stride = 1 and inp=oup. + inv_module = InvertedResidual(32, 32, 1, 4) + assert inv_module.use_res_connect + assert inv_module.conv[0].kernel_size == (1, 1) + assert inv_module.conv[0].padding == 0 + assert inv_module.conv[1].kernel_size == (3, 3) + assert inv_module.conv[1].padding == 1 + assert inv_module.conv[0].with_norm + assert inv_module.conv[1].with_norm + x = torch.rand(1, 32, 64, 64) + output = inv_module(x) + assert output.shape == (1, 32, 64, 64) + + # test inv_residual module without res connection. + # set expand_ratio = 4, stride = 2. + inv_module = InvertedResidual(32, 32, 2, 4) + assert not inv_module.use_res_connect + assert inv_module.conv[0].kernel_size == (1, 1) + x = torch.rand(1, 32, 64, 64) + output = inv_module(x) + assert output.shape == (1, 32, 32, 32) + + # test expand_ratio == 1 + inv_module = InvertedResidual(32, 32, 1, 1) + assert inv_module.conv[0].kernel_size == (3, 3) + x = torch.rand(1, 32, 64, 64) + output = inv_module(x) + assert output.shape == (1, 32, 64, 64) + + # test with checkpoint forward + inv_module = InvertedResidual(32, 32, 1, 1, with_cp=True) + assert inv_module.with_cp + x = torch.rand(1, 32, 64, 64, requires_grad=True) + output = inv_module(x) + assert output.shape == (1, 32, 64, 64) + + +def test_inv_residualv3(): + with pytest.raises(AssertionError): + # test stride assertion. + InvertedResidualV3(32, 32, 16, stride=3) + + with pytest.raises(AssertionError): + # test assertion. + InvertedResidualV3(32, 32, 16, with_expand_conv=False) + + # test with se_cfg=None, with_expand_conv=False + inv_module = InvertedResidualV3(32, 32, 32, with_expand_conv=False) + + assert inv_module.with_res_shortcut is True + assert inv_module.with_se is False + assert inv_module.with_expand_conv is False + assert not hasattr(inv_module, 'expand_conv') + assert isinstance(inv_module.depthwise_conv.conv, torch.nn.Conv2d) + assert inv_module.depthwise_conv.conv.kernel_size == (3, 3) + assert inv_module.depthwise_conv.conv.stride == (1, 1) + assert inv_module.depthwise_conv.conv.padding == (1, 1) + assert isinstance(inv_module.depthwise_conv.bn, torch.nn.BatchNorm2d) + assert isinstance(inv_module.depthwise_conv.activate, torch.nn.ReLU) + assert inv_module.linear_conv.conv.kernel_size == (1, 1) + assert inv_module.linear_conv.conv.stride == (1, 1) + assert inv_module.linear_conv.conv.padding == (0, 0) + assert isinstance(inv_module.linear_conv.bn, torch.nn.BatchNorm2d) + + x = torch.rand(1, 32, 64, 64) + output = inv_module(x) + assert output.shape == (1, 32, 64, 64) + + # test with se_cfg and with_expand_conv + se_cfg = dict( + channels=16, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))) + act_cfg = dict(type='HSwish') + inv_module = InvertedResidualV3( + 32, 40, 16, 3, 2, se_cfg=se_cfg, act_cfg=act_cfg) + assert inv_module.with_res_shortcut is False + assert inv_module.with_se is True + assert inv_module.with_expand_conv is True + assert inv_module.expand_conv.conv.kernel_size == (1, 1) + assert inv_module.expand_conv.conv.stride == (1, 1) + assert inv_module.expand_conv.conv.padding == (0, 0) + + assert isinstance(inv_module.depthwise_conv.conv, + mmcv.cnn.bricks.Conv2dAdaptivePadding) + assert inv_module.depthwise_conv.conv.kernel_size == (3, 3) + assert inv_module.depthwise_conv.conv.stride == (2, 2) + assert inv_module.depthwise_conv.conv.padding == (0, 0) + assert isinstance(inv_module.depthwise_conv.bn, torch.nn.BatchNorm2d) + + assert inv_module.linear_conv.conv.kernel_size == (1, 1) + assert inv_module.linear_conv.conv.stride == (1, 1) + assert inv_module.linear_conv.conv.padding == (0, 0) + assert isinstance(inv_module.linear_conv.bn, torch.nn.BatchNorm2d) + + if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.7')): + # Note: Use PyTorch official HSwish + # when torch>=1.7 after MMCV >= 1.4.5. + # Hardswish is not supported when PyTorch version < 1.6. + # And Hardswish in PyTorch 1.6 does not support inplace. + # More details could be found from: + # https://github.com/open-mmlab/mmcv/pull/1709 + assert isinstance(inv_module.expand_conv.activate, mmcv.cnn.HSwish) + assert isinstance(inv_module.depthwise_conv.activate, mmcv.cnn.HSwish) + else: + assert isinstance(inv_module.expand_conv.activate, torch.nn.Hardswish) + assert isinstance(inv_module.depthwise_conv.activate, + torch.nn.Hardswish) + + x = torch.rand(1, 32, 64, 64) + output = inv_module(x) + assert output.shape == (1, 40, 32, 32) + + # test with checkpoint forward + inv_module = InvertedResidualV3( + 32, 40, 16, 3, 2, se_cfg=se_cfg, act_cfg=act_cfg, with_cp=True) + assert inv_module.with_cp + x = torch.randn(2, 32, 64, 64, requires_grad=True) + output = inv_module(x) + assert output.shape == (2, 40, 32, 32) + + +def test_se_layer(): + with pytest.raises(AssertionError): + # test act_cfg assertion. + SELayer(32, act_cfg=(dict(type='ReLU'), )) + + # test config with channels = 16. + se_layer = SELayer(16) + assert se_layer.conv1.conv.kernel_size == (1, 1) + assert se_layer.conv1.conv.stride == (1, 1) + assert se_layer.conv1.conv.padding == (0, 0) + assert isinstance(se_layer.conv1.activate, torch.nn.ReLU) + assert se_layer.conv2.conv.kernel_size == (1, 1) + assert se_layer.conv2.conv.stride == (1, 1) + assert se_layer.conv2.conv.padding == (0, 0) + assert isinstance(se_layer.conv2.activate, mmcv.cnn.HSigmoid) + + x = torch.rand(1, 16, 64, 64) + output = se_layer(x) + assert output.shape == (1, 16, 64, 64) + + # test config with channels = 16, act_cfg = dict(type='ReLU'). + se_layer = SELayer(16, act_cfg=dict(type='ReLU')) + assert se_layer.conv1.conv.kernel_size == (1, 1) + assert se_layer.conv1.conv.stride == (1, 1) + assert se_layer.conv1.conv.padding == (0, 0) + assert isinstance(se_layer.conv1.activate, torch.nn.ReLU) + assert se_layer.conv2.conv.kernel_size == (1, 1) + assert se_layer.conv2.conv.stride == (1, 1) + assert se_layer.conv2.conv.padding == (0, 0) + assert isinstance(se_layer.conv2.activate, torch.nn.ReLU) + + x = torch.rand(1, 16, 64, 64) + output = se_layer(x) + assert output.shape == (1, 16, 64, 64) diff --git a/tests/test_models/test_backbones/test_cgnet.py b/tests/test_models/test_backbones/test_cgnet.py new file mode 100644 index 0000000000..f938525d0a --- /dev/null +++ b/tests/test_models/test_backbones/test_cgnet.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import CGNet +from mmseg.models.backbones.cgnet import (ContextGuidedBlock, + GlobalContextExtractor) + + +def test_cgnet_GlobalContextExtractor(): + block = GlobalContextExtractor(16, 16, with_cp=True) + x = torch.randn(2, 16, 64, 64, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([2, 16, 64, 64]) + + +def test_cgnet_context_guided_block(): + with pytest.raises(AssertionError): + # cgnet ContextGuidedBlock GlobalContextExtractor channel and reduction + # constraints. + ContextGuidedBlock(8, 8) + + # test cgnet ContextGuidedBlock with checkpoint forward + block = ContextGuidedBlock( + 16, 16, act_cfg=dict(type='PReLU'), with_cp=True) + assert block.with_cp + x = torch.randn(2, 16, 64, 64, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([2, 16, 64, 64]) + + # test cgnet ContextGuidedBlock without checkpoint forward + block = ContextGuidedBlock(32, 32) + assert not block.with_cp + x = torch.randn(3, 32, 32, 32) + x_out = block(x) + assert x_out.shape == torch.Size([3, 32, 32, 32]) + + # test cgnet ContextGuidedBlock with down sampling + block = ContextGuidedBlock(32, 32, downsample=True) + assert block.conv1x1.conv.in_channels == 32 + assert block.conv1x1.conv.out_channels == 32 + assert block.conv1x1.conv.kernel_size == (3, 3) + assert block.conv1x1.conv.stride == (2, 2) + assert block.conv1x1.conv.padding == (1, 1) + + assert block.f_loc.in_channels == 32 + assert block.f_loc.out_channels == 32 + assert block.f_loc.kernel_size == (3, 3) + assert block.f_loc.stride == (1, 1) + assert block.f_loc.padding == (1, 1) + assert block.f_loc.groups == 32 + assert block.f_loc.dilation == (1, 1) + assert block.f_loc.bias is None + + assert block.f_sur.in_channels == 32 + assert block.f_sur.out_channels == 32 + assert block.f_sur.kernel_size == (3, 3) + assert block.f_sur.stride == (1, 1) + assert block.f_sur.padding == (2, 2) + assert block.f_sur.groups == 32 + assert block.f_sur.dilation == (2, 2) + assert block.f_sur.bias is None + + assert block.bottleneck.in_channels == 64 + assert block.bottleneck.out_channels == 32 + assert block.bottleneck.kernel_size == (1, 1) + assert block.bottleneck.stride == (1, 1) + assert block.bottleneck.bias is None + + x = torch.randn(1, 32, 32, 32) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 16, 16]) + + # test cgnet ContextGuidedBlock without down sampling + block = ContextGuidedBlock(32, 32, downsample=False) + assert block.conv1x1.conv.in_channels == 32 + assert block.conv1x1.conv.out_channels == 16 + assert block.conv1x1.conv.kernel_size == (1, 1) + assert block.conv1x1.conv.stride == (1, 1) + assert block.conv1x1.conv.padding == (0, 0) + + assert block.f_loc.in_channels == 16 + assert block.f_loc.out_channels == 16 + assert block.f_loc.kernel_size == (3, 3) + assert block.f_loc.stride == (1, 1) + assert block.f_loc.padding == (1, 1) + assert block.f_loc.groups == 16 + assert block.f_loc.dilation == (1, 1) + assert block.f_loc.bias is None + + assert block.f_sur.in_channels == 16 + assert block.f_sur.out_channels == 16 + assert block.f_sur.kernel_size == (3, 3) + assert block.f_sur.stride == (1, 1) + assert block.f_sur.padding == (2, 2) + assert block.f_sur.groups == 16 + assert block.f_sur.dilation == (2, 2) + assert block.f_sur.bias is None + + x = torch.randn(1, 32, 32, 32) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 32, 32]) + + +def test_cgnet_backbone(): + with pytest.raises(AssertionError): + # check invalid num_channels + CGNet(num_channels=(32, 64, 128, 256)) + + with pytest.raises(AssertionError): + # check invalid num_blocks + CGNet(num_blocks=(3, 21, 3)) + + with pytest.raises(AssertionError): + # check invalid dilation + CGNet(num_blocks=2) + + with pytest.raises(AssertionError): + # check invalid reduction + CGNet(reductions=16) + + with pytest.raises(AssertionError): + # check invalid num_channels and reduction + CGNet(num_channels=(32, 64, 128), reductions=(64, 129)) + + # Test CGNet with default settings + model = CGNet() + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([2, 35, 112, 112]) + assert feat[1].shape == torch.Size([2, 131, 56, 56]) + assert feat[2].shape == torch.Size([2, 256, 28, 28]) + + # Test CGNet with norm_eval True and with_cp True + model = CGNet(norm_eval=True, with_cp=True) + with pytest.raises(TypeError): + # check invalid pretrained + model.init_weights(pretrained=8) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([2, 35, 112, 112]) + assert feat[1].shape == torch.Size([2, 131, 56, 56]) + assert feat[2].shape == torch.Size([2, 256, 28, 28]) diff --git a/tests/test_models/test_backbones/test_erfnet.py b/tests/test_models/test_backbones/test_erfnet.py new file mode 100644 index 0000000000..6ae7345e15 --- /dev/null +++ b/tests/test_models/test_backbones/test_erfnet.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import ERFNet +from mmseg.models.backbones.erfnet import (DownsamplerBlock, NonBottleneck1d, + UpsamplerBlock) + + +def test_erfnet_backbone(): + # Test ERFNet Standard Forward. + model = ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + model.init_weights() + model.train() + batch_size = 2 + imgs = torch.randn(batch_size, 3, 256, 512) + output = model(imgs) + + # output for segment Head + assert output[0].shape == torch.Size([batch_size, 16, 128, 256]) + + # Test input with rare shape + batch_size = 2 + imgs = torch.randn(batch_size, 3, 527, 279) + output = model(imgs) + assert len(output[0]) == batch_size + + with pytest.raises(AssertionError): + # Number of encoder downsample block and decoder upsample block. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(128, 64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + with pytest.raises(AssertionError): + # Number of encoder downsample block and encoder Non-bottleneck block. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8, 10), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + with pytest.raises(AssertionError): + # Number of encoder downsample block and + # channels of encoder Non-bottleneck block. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128, 256), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + + with pytest.raises(AssertionError): + # Number of encoder Non-bottleneck block and number of its channels. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8, 3), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + with pytest.raises(AssertionError): + # Number of decoder upsample block and decoder Non-bottleneck block. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2, 3), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + ) + with pytest.raises(AssertionError): + # Number of decoder Non-bottleneck block and number of its channels. + ERFNet( + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16, 8), + dropout_ratio=0.1, + ) + + +def test_erfnet_downsampler_block(): + x_db = DownsamplerBlock(16, 64) + assert x_db.conv.in_channels == 16 + assert x_db.conv.out_channels == 48 + assert len(x_db.bn.weight) == 64 + assert x_db.pool.kernel_size == 2 + assert x_db.pool.stride == 2 + + +def test_erfnet_non_bottleneck_1d(): + x_nb1d = NonBottleneck1d(16, 0, 1) + assert x_nb1d.convs_layers[0].in_channels == 16 + assert x_nb1d.convs_layers[0].out_channels == 16 + assert x_nb1d.convs_layers[2].in_channels == 16 + assert x_nb1d.convs_layers[2].out_channels == 16 + assert x_nb1d.convs_layers[5].in_channels == 16 + assert x_nb1d.convs_layers[5].out_channels == 16 + assert x_nb1d.convs_layers[7].in_channels == 16 + assert x_nb1d.convs_layers[7].out_channels == 16 + assert x_nb1d.convs_layers[9].p == 0 + + +def test_erfnet_upsampler_block(): + x_ub = UpsamplerBlock(64, 16) + assert x_ub.conv.in_channels == 64 + assert x_ub.conv.out_channels == 16 + assert len(x_ub.bn.weight) == 16 diff --git a/tests/test_models/test_backbones/test_fast_scnn.py b/tests/test_models/test_backbones/test_fast_scnn.py new file mode 100644 index 0000000000..7ee638b510 --- /dev/null +++ b/tests/test_models/test_backbones/test_fast_scnn.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import FastSCNN + + +def test_fastscnn_backbone(): + with pytest.raises(AssertionError): + # Fast-SCNN channel constraints. + FastSCNN( + 3, (32, 48), + 64, (64, 96, 128), (2, 2, 1), + global_out_channels=127, + higher_in_channels=64, + lower_in_channels=128) + + # Test FastSCNN Standard Forward + model = FastSCNN( + in_channels=3, + downsample_dw_channels=(4, 6), + global_in_channels=8, + global_block_channels=(8, 12, 16), + global_block_strides=(2, 2, 1), + global_out_channels=16, + higher_in_channels=8, + lower_in_channels=16, + fusion_out_channels=16, + ) + model.init_weights() + model.train() + batch_size = 4 + imgs = torch.randn(batch_size, 3, 64, 128) + feat = model(imgs) + + assert len(feat) == 3 + # higher-res + assert feat[0].shape == torch.Size([batch_size, 8, 8, 16]) + # lower-res + assert feat[1].shape == torch.Size([batch_size, 16, 2, 4]) + # FFM output + assert feat[2].shape == torch.Size([batch_size, 16, 8, 16]) diff --git a/tests/test_models/test_backbones/test_hrnet.py b/tests/test_models/test_backbones/test_hrnet.py new file mode 100644 index 0000000000..3e35515390 --- /dev/null +++ b/tests/test_models/test_backbones/test_hrnet.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmseg.models.backbones.hrnet import HRModule, HRNet +from mmseg.models.backbones.resnet import BasicBlock, Bottleneck + + +@pytest.mark.parametrize('block', [BasicBlock, Bottleneck]) +def test_hrmodule(block): + # Test multiscale forward + num_channles = (32, 64) + in_channels = [c * block.expansion for c in num_channles] + hrmodule = HRModule( + num_branches=2, + blocks=block, + in_channels=in_channels, + num_blocks=(4, 4), + num_channels=num_channles, + ) + + feats = [ + torch.randn(1, in_channels[0], 64, 64), + torch.randn(1, in_channels[1], 32, 32) + ] + feats = hrmodule(feats) + + assert len(feats) == 2 + assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64]) + assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32]) + + # Test single scale forward + num_channles = (32, 64) + in_channels = [c * block.expansion for c in num_channles] + hrmodule = HRModule( + num_branches=2, + blocks=block, + in_channels=in_channels, + num_blocks=(4, 4), + num_channels=num_channles, + multiscale_output=False, + ) + + feats = [ + torch.randn(1, in_channels[0], 64, 64), + torch.randn(1, in_channels[1], 32, 32) + ] + feats = hrmodule(feats) + + assert len(feats) == 1 + assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64]) + + +def test_hrnet_backbone(): + # only have 3 stages + extra = dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128))) + + with pytest.raises(AssertionError): + # HRNet now only support 4 stages + HRNet(extra=extra) + extra['stage4'] = dict( + num_modules=3, + num_branches=3, # should be 4 + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256)) + + with pytest.raises(AssertionError): + # len(num_blocks) should equal num_branches + HRNet(extra=extra) + + extra['stage4']['num_branches'] = 4 + + # Test hrnetv2p_w32 + model = HRNet(extra=extra) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + assert len(feats) == 4 + assert feats[0].shape == torch.Size([1, 32, 16, 16]) + assert feats[3].shape == torch.Size([1, 256, 2, 2]) + + # Test single scale output + model = HRNet(extra=extra, multiscale_output=False) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + assert len(feats) == 1 + assert feats[0].shape == torch.Size([1, 32, 16, 16]) + + # Test HRNET with two stage frozen + frozen_stages = 2 + model = HRNet(extra, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + if i == 1: + layer = getattr(model, f'layer{i}') + transition = getattr(model, f'transition{i}') + elif i == 4: + layer = getattr(model, f'stage{i}') + else: + layer = getattr(model, f'stage{i}') + transition = getattr(model, f'transition{i}') + + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + for mod in transition.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in transition.parameters(): + assert param.requires_grad is False diff --git a/tests/test_models/test_backbones/test_icnet.py b/tests/test_models/test_backbones/test_icnet.py new file mode 100644 index 0000000000..a96d8d86fb --- /dev/null +++ b/tests/test_models/test_backbones/test_icnet.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import ICNet + + +def test_icnet_backbone(): + with pytest.raises(TypeError): + # Must give backbone dict in config file. + ICNet( + in_channels=3, + layer_channels=(128, 512), + light_branch_middle_channels=8, + psp_out_channels=128, + out_channels=(16, 128, 128), + backbone_cfg=None) + + # Test ICNet Standard Forward + model = ICNet( + layer_channels=(128, 512), + backbone_cfg=dict( + type='ResNetV1c', + in_channels=3, + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + style='pytorch', + contract_dilation=True), + ) + assert hasattr(model.backbone, + 'maxpool') and model.backbone.maxpool.ceil_mode is True + model.init_weights() + model.train() + batch_size = 2 + imgs = torch.randn(batch_size, 3, 32, 64) + feat = model(imgs) + + assert model.psp_modules[0][0].output_size == 1 + assert model.psp_modules[1][0].output_size == 2 + assert model.psp_modules[2][0].output_size == 3 + assert model.psp_bottleneck.padding == 1 + assert model.conv_sub1[0].padding == 1 + + assert len(feat) == 3 + assert feat[0].shape == torch.Size([batch_size, 64, 4, 8]) diff --git a/tests/test_models/test_backbones/test_mae.py b/tests/test_models/test_backbones/test_mae.py new file mode 100644 index 0000000000..16f52b54b4 --- /dev/null +++ b/tests/test_models/test_backbones/test_mae.py @@ -0,0 +1,186 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones.mae import MAE +from .utils import check_norm_state + + +def test_mae_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MAE() + model.init_weights(pretrained=0) + + with pytest.raises(TypeError): + # img_size must be int or tuple + model = MAE(img_size=512.0) + + with pytest.raises(TypeError): + # out_indices must be int ,list or tuple + model = MAE(out_indices=1.) + + with pytest.raises(AssertionError): + # The length of img_size tuple must be lower than 3. + MAE(img_size=(224, 224, 224)) + + with pytest.raises(TypeError): + # Pretrained must be None or Str. + MAE(pretrained=123) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = MAE(img_size=(224, )) + model.init_weights() + model(imgs) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = MAE(img_size=(224, 224)) + model(imgs) + + # Test norm_eval = True + model = MAE(norm_eval=True) + model.train() + + # Test BEiT backbone with input size of 224 and patch size of 16 + model = MAE() + model.init_weights() + model.train() + + # Test out_indices = list + model = MAE(out_indices=[2, 4, 8, 12]) + model.train() + + assert check_norm_state(model.modules(), True) + + # Test image size = (224, 224) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test MAE backbone with input size of 256 and patch size of 16 + model = MAE(img_size=(256, 256)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 16, 16) + + # Test MAE backbone with input size of 32 and patch size of 16 + model = MAE(img_size=(32, 32)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 2, 2) + + # Test unbalanced size input image + model = MAE(img_size=(112, 224)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 112, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 7, 14) + + # Test irregular input image + model = MAE(img_size=(234, 345)) + model.init_weights() + model.train() + imgs = torch.randn(1, 3, 234, 345) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 21) + + # Test init_values=0 + model = MAE(init_values=0) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test final norm + model = MAE(final_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test patch norm + model = MAE(patch_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + +def test_mae_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = MAE(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = MAE( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # test resize_rel_pos_embed + value = torch.randn(732, 16) + abs_pos_embed_value = torch.rand(1, 17, 768) + ckpt = { + 'state_dict': { + 'layers.0.attn.relative_position_index': 0, + 'layers.0.attn.relative_position_bias_table': value, + 'pos_embed': abs_pos_embed_value + } + } + model = MAE(img_size=(512, 512)) + # If scipy is installed, this AttributeError would not be raised. + from mmengine.utils import is_installed + if not is_installed('scipy'): + with pytest.raises(AttributeError): + model.resize_rel_pos_embed(ckpt) + + # test resize abs pos embed + ckpt = model.resize_abs_pos_embed(ckpt['state_dict']) + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = MAE(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = MAE(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = MAE( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + model = MAE(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + model = MAE(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = MAE( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + model = MAE(pretrained=123, init_cfg=123) diff --git a/tests/test_models/test_backbones/test_mit.py b/tests/test_models/test_backbones/test_mit.py new file mode 100644 index 0000000000..72f74fe209 --- /dev/null +++ b/tests/test_models/test_backbones/test_mit.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import MixVisionTransformer +from mmseg.models.backbones.mit import (EfficientMultiheadAttention, MixFFN, + TransformerEncoderLayer) + + +def test_mit(): + with pytest.raises(TypeError): + # Pretrained represents pretrain url and must be str or None. + MixVisionTransformer(pretrained=123) + + # Test normal input + H, W = (224, 224) + temp = torch.randn((1, 3, H, W)) + model = MixVisionTransformer( + embed_dims=32, num_heads=[1, 2, 5, 8], out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert outs[0].shape == (1, 32, H // 4, W // 4) + assert outs[1].shape == (1, 64, H // 8, W // 8) + assert outs[2].shape == (1, 160, H // 16, W // 16) + assert outs[3].shape == (1, 256, H // 32, W // 32) + + # Test non-squared input + H, W = (224, 256) + temp = torch.randn((1, 3, H, W)) + outs = model(temp) + assert outs[0].shape == (1, 32, H // 4, W // 4) + assert outs[1].shape == (1, 64, H // 8, W // 8) + assert outs[2].shape == (1, 160, H // 16, W // 16) + assert outs[3].shape == (1, 256, H // 32, W // 32) + + # Test MixFFN + FFN = MixFFN(64, 128) + hw_shape = (32, 32) + token_len = 32 * 32 + temp = torch.randn((1, token_len, 64)) + # Self identity + out = FFN(temp, hw_shape) + assert out.shape == (1, token_len, 64) + # Out identity + outs = FFN(temp, hw_shape, temp) + assert out.shape == (1, token_len, 64) + + # Test EfficientMHA + MHA = EfficientMultiheadAttention(64, 2) + hw_shape = (32, 32) + token_len = 32 * 32 + temp = torch.randn((1, token_len, 64)) + # Self identity + out = MHA(temp, hw_shape) + assert out.shape == (1, token_len, 64) + # Out identity + outs = MHA(temp, hw_shape, temp) + assert out.shape == (1, token_len, 64) + + # Test TransformerEncoderLayer with checkpoint forward + block = TransformerEncoderLayer( + embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) + assert block.with_cp + x = torch.randn(1, 56 * 56, 64) + x_out = block(x, (56, 56)) + assert x_out.shape == torch.Size([1, 56 * 56, 64]) + + +def test_mit_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = MixVisionTransformer(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = MixVisionTransformer( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = MixVisionTransformer(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = MixVisionTransformer(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + MixVisionTransformer( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + MixVisionTransformer(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + MixVisionTransformer(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + MixVisionTransformer( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + MixVisionTransformer(pretrained=123, init_cfg=123) diff --git a/tests/test_models/test_backbones/test_mobilenet_v3.py b/tests/test_models/test_backbones/test_mobilenet_v3.py new file mode 100644 index 0000000000..769ee14bc2 --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import MobileNetV3 + + +def test_mobilenet_v3(): + with pytest.raises(AssertionError): + # check invalid arch + MobileNetV3('big') + + with pytest.raises(AssertionError): + # check invalid reduction_factor + MobileNetV3(reduction_factor=0) + + with pytest.raises(ValueError): + # check invalid out_indices + MobileNetV3(out_indices=(0, 1, 15)) + + with pytest.raises(ValueError): + # check invalid frozen_stages + MobileNetV3(frozen_stages=15) + + with pytest.raises(TypeError): + # check invalid pretrained + model = MobileNetV3() + model.init_weights(pretrained=8) + + # Test MobileNetV3 with default settings + model = MobileNetV3() + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 56, 56) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (2, 16, 28, 28) + assert feat[1].shape == (2, 16, 14, 14) + assert feat[2].shape == (2, 576, 7, 7) + + # Test MobileNetV3 with arch = 'large' + model = MobileNetV3(arch='large', out_indices=(1, 3, 16)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 56, 56) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (2, 16, 28, 28) + assert feat[1].shape == (2, 24, 14, 14) + assert feat[2].shape == (2, 960, 7, 7) + + # Test MobileNetV3 with norm_eval True, with_cp True and frozen_stages=5 + model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5) + with pytest.raises(TypeError): + # check invalid pretrained + model.init_weights(pretrained=8) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 56, 56) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (2, 16, 28, 28) + assert feat[1].shape == (2, 16, 14, 14) + assert feat[2].shape == (2, 576, 7, 7) diff --git a/tests/test_models/test_backbones/test_resnest.py b/tests/test_models/test_backbones/test_resnest.py new file mode 100644 index 0000000000..3013f34fcc --- /dev/null +++ b/tests/test_models/test_backbones/test_resnest.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import ResNeSt +from mmseg.models.backbones.resnest import Bottleneck as BottleneckS + + +def test_resnest_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') + + # Test ResNeSt Bottleneck structure + block = BottleneckS( + 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') + assert block.avd_layer.stride == 2 + assert block.conv2.channels == 256 + + # Test ResNeSt Bottleneck forward + block = BottleneckS(64, 16, radix=2, reduction_factor=4) + x = torch.randn(2, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([2, 64, 56, 56]) + + +def test_resnest_backbone(): + with pytest.raises(KeyError): + # ResNeSt depth should be in [50, 101, 152, 200] + ResNeSt(depth=18) + + # Test ResNeSt with radix 2, reduction_factor 4 + model = ResNeSt( + depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([2, 256, 56, 56]) + assert feat[1].shape == torch.Size([2, 512, 28, 28]) + assert feat[2].shape == torch.Size([2, 1024, 14, 14]) + assert feat[3].shape == torch.Size([2, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py new file mode 100644 index 0000000000..f2f24ba568 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet.py @@ -0,0 +1,575 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.ops import DeformConv2dPack +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm +from torch.nn.modules import AvgPool2d, GroupNorm + +from mmseg.models.backbones import ResNet, ResNetV1d +from mmseg.models.backbones.resnet import BasicBlock, Bottleneck +from mmseg.models.utils import ResLayer +from .utils import all_zeros, check_norm_state, is_block, is_norm + + +def test_resnet_basic_block(): + with pytest.raises(AssertionError): + # Not implemented yet. + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + BasicBlock(64, 64, dcn=dcn) + + with pytest.raises(AssertionError): + # Not implemented yet. + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + BasicBlock(64, 64, plugins=plugins) + + with pytest.raises(AssertionError): + # Not implemented yet + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + BasicBlock(64, 64, plugins=plugins) + + # Test BasicBlock with checkpoint forward + block = BasicBlock(16, 16, with_cp=True) + assert block.with_cp + x = torch.randn(1, 16, 28, 28) + x_out = block(x) + assert x_out.shape == torch.Size([1, 16, 28, 28]) + + # test BasicBlock structure and forward + block = BasicBlock(32, 32) + assert block.conv1.in_channels == 32 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (3, 3) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 32, 28, 28) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 28, 28]) + + +def test_resnet_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + Bottleneck(64, 64, style='tensorflow') + + with pytest.raises(AssertionError): + # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3' + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv4') + ] + Bottleneck(64, 16, plugins=plugins) + + with pytest.raises(AssertionError): + # Need to specify different postfix to avoid duplicate plugin name + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + Bottleneck(64, 16, plugins=plugins) + + with pytest.raises(KeyError): + # Plugin type is not supported + plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')] + Bottleneck(64, 16, plugins=plugins) + + # Test Bottleneck with checkpoint forward + block = Bottleneck(64, 16, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = Bottleneck(64, 64, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Test Bottleneck DCN + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + with pytest.raises(AssertionError): + Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv')) + block = Bottleneck(64, 64, dcn=dcn) + assert isinstance(block.conv2, DeformConv2dPack) + + # Test Bottleneck forward + block = Bottleneck(64, 16) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck with 1 ContextBlock after conv3 + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + block = Bottleneck(64, 16, plugins=plugins) + assert block.context_block.in_channels == 64 + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck with 1 GeneralizedAttention after conv2 + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + block = Bottleneck(64, 16, plugins=plugins) + assert block.gen_attention_block.in_channels == 16 + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2d + # after conv2, 1 ContextBlock after conv3 + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2'), + dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + block = Bottleneck(64, 16, plugins=plugins) + assert block.gen_attention_block.in_channels == 16 + assert block.nonlocal_block.in_channels == 16 + assert block.context_block.in_channels == 64 + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after + # conv3 + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), + position='after_conv2'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), + position='after_conv3'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3), + position='after_conv3') + ] + block = Bottleneck(64, 16, plugins=plugins) + assert block.context_block1.in_channels == 16 + assert block.context_block2.in_channels == 64 + assert block.context_block3.in_channels == 64 + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnet_res_layer(): + # Test ResLayer of 3 Bottleneck w\o downsample + layer = ResLayer(Bottleneck, 64, 16, 3) + assert len(layer) == 3 + assert layer[0].conv1.in_channels == 64 + assert layer[0].conv1.out_channels == 16 + for i in range(1, len(layer)): + assert layer[i].conv1.in_channels == 64 + assert layer[i].conv1.out_channels == 16 + for i in range(len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 Bottleneck with downsample + layer = ResLayer(Bottleneck, 64, 64, 3) + assert layer[0].downsample[0].out_channels == 256 + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 56, 56]) + + # Test ResLayer of 3 Bottleneck with stride=2 + layer = ResLayer(Bottleneck, 64, 64, 3, stride=2) + assert layer[0].downsample[0].out_channels == 256 + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + # Test ResLayer of 3 Bottleneck with stride=2 and average downsample + layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True) + assert isinstance(layer[0].downsample[0], AvgPool2d) + assert layer[0].downsample[1].out_channels == 256 + assert layer[0].downsample[1].stride == (1, 1) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + # Test ResLayer of 3 Bottleneck with dilation=2 + layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2) + for i in range(len(layer)): + assert layer[i].conv2.dilation == (2, 2) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 Bottleneck with dilation=2, contract_dilation=True + layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, contract_dilation=True) + assert layer[0].conv2.dilation == (1, 1) + for i in range(1, len(layer)): + assert layer[i].conv2.dilation == (2, 2) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 Bottleneck with dilation=2, multi_grid + layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, multi_grid=(1, 2, 4)) + assert layer[0].conv2.dilation == (1, 1) + assert layer[1].conv2.dilation == (2, 2) + assert layer[2].conv2.dilation == (4, 4) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnet_backbone(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # len(stage_with_dcn) == num_stages + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + ResNet(50, dcn=dcn, stage_with_dcn=(True, )) + + with pytest.raises(AssertionError): + # len(stage_with_plugin) == num_stages + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True), + position='after_conv3') + ] + ResNet(50, plugins=plugins) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(18, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(18, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = ResNet(18, pretrained=0) + model.init_weights() + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ResNet(50, style='tensorflow') + + # Test ResNet18 norm_eval=True + model = ResNet(18, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet18 with torchvision pretrained weight + model = ResNet( + depth=18, norm_eval=True, pretrained='torchvision://resnet18') + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet18 with first stage frozen + frozen_stages = 1 + model = ResNet(18, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18V1d with first stage frozen + model = ResNetV1d(depth=18, frozen_stages=frozen_stages) + assert len(model.stem) == 9 + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18 forward + model = ResNet(18) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNet18 with BatchNorm forward + model = ResNet(18) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNet18 with layers 1, 2, 3 out forward + model = ResNet(18, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 112, 112) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([1, 64, 28, 28]) + assert feat[1].shape == torch.Size([1, 128, 14, 14]) + assert feat[2].shape == torch.Size([1, 256, 7, 7]) + + # Test ResNet18 with checkpoint forward + model = ResNet(18, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNet18 with checkpoint forward + model = ResNet(18, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNet18 with GroupNorm forward + model = ResNet( + 18, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2d + # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4 + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + stages=(False, True, True, True), + position='after_conv2'), + dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, False), + position='after_conv3') + ] + model = ResNet(50, plugins=plugins) + for m in model.layer1.modules(): + if is_block(m): + assert not hasattr(m, 'context_block') + assert not hasattr(m, 'gen_attention_block') + assert m.nonlocal_block.in_channels == 64 + for m in model.layer2.modules(): + if is_block(m): + assert m.nonlocal_block.in_channels == 128 + assert m.gen_attention_block.in_channels == 128 + assert m.context_block.in_channels == 512 + + for m in model.layer3.modules(): + if is_block(m): + assert m.nonlocal_block.in_channels == 256 + assert m.gen_attention_block.in_channels == 256 + assert m.context_block.in_channels == 1024 + + for m in model.layer4.modules(): + if is_block(m): + assert m.nonlocal_block.in_channels == 512 + assert m.gen_attention_block.in_channels == 512 + assert not hasattr(m, 'context_block') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after + # conv3 in layers 2, 3, 4 + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), + stages=(False, True, True, False), + position='after_conv3'), + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), + stages=(False, True, True, False), + position='after_conv3') + ] + + model = ResNet(50, plugins=plugins) + for m in model.layer1.modules(): + if is_block(m): + assert not hasattr(m, 'context_block') + assert not hasattr(m, 'context_block1') + assert not hasattr(m, 'context_block2') + for m in model.layer2.modules(): + if is_block(m): + assert not hasattr(m, 'context_block') + assert m.context_block1.in_channels == 512 + assert m.context_block2.in_channels == 512 + + for m in model.layer3.modules(): + if is_block(m): + assert not hasattr(m, 'context_block') + assert m.context_block1.in_channels == 1024 + assert m.context_block2.in_channels == 1024 + + for m in model.layer4.modules(): + if is_block(m): + assert not hasattr(m, 'context_block') + assert not hasattr(m, 'context_block1') + assert not hasattr(m, 'context_block2') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNet18 zero initialization of residual + model = ResNet(18, zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert all_zeros(m.norm2) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) + + # Test ResNetV1d forward + model = ResNetV1d(depth=18) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 64, 56, 56]) + assert feat[1].shape == torch.Size([1, 128, 28, 28]) + assert feat[2].shape == torch.Size([1, 256, 14, 14]) + assert feat[3].shape == torch.Size([1, 512, 7, 7]) diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py new file mode 100644 index 0000000000..2aecaf0d3d --- /dev/null +++ b/tests/test_models/test_backbones/test_resnext.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import ResNeXt +from mmseg.models.backbones.resnext import Bottleneck as BottleneckX +from .utils import is_block + + +def test_renext_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 64, groups=32, base_width=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + + # Test ResNeXt Bottleneck with DCN + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + with pytest.raises(AssertionError): + # conv_cfg must be None if dcn is not None + BottleneckX( + 64, + 64, + groups=32, + base_width=4, + dcn=dcn, + conv_cfg=dict(type='Conv')) + BottleneckX(64, 64, dcn=dcn) + + # Test ResNeXt Bottleneck forward + block = BottleneckX(64, 16, groups=32, base_width=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnext_backbone(): + with pytest.raises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, base_width 4 + model = ResNeXt(depth=50, groups=32, base_width=4) + print(model) + for m in model.modules(): + if is_block(m): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_stdc.py b/tests/test_models/test_backbones/test_stdc.py new file mode 100644 index 0000000000..1e3862b0b3 --- /dev/null +++ b/tests/test_models/test_backbones/test_stdc.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import STDCContextPathNet +from mmseg.models.backbones.stdc import (AttentionRefinementModule, + FeatureFusionModule, STDCModule, + STDCNet) + + +def test_stdc_context_path_net(): + # Test STDCContextPathNet Standard Forward + model = STDCContextPathNet( + backbone_cfg=dict( + type='STDCNet', + stdc_type='STDCNet1', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='cat', + num_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + with_final_conv=True), + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4)) + model.init_weights() + model.train() + batch_size = 2 + imgs = torch.randn(batch_size, 3, 256, 512) + feat = model(imgs) + + assert len(feat) == 4 + # output for segment Head + assert feat[0].shape == torch.Size([batch_size, 256, 32, 64]) + # for auxiliary head 1 + assert feat[1].shape == torch.Size([batch_size, 128, 16, 32]) + # for auxiliary head 2 + assert feat[2].shape == torch.Size([batch_size, 128, 32, 64]) + # for auxiliary head 3 + assert feat[3].shape == torch.Size([batch_size, 256, 32, 64]) + + # Test input with rare shape + batch_size = 2 + imgs = torch.randn(batch_size, 3, 527, 279) + model = STDCContextPathNet( + backbone_cfg=dict( + type='STDCNet', + stdc_type='STDCNet1', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='add', + num_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + with_final_conv=False), + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4)) + model.init_weights() + model.train() + feat = model(imgs) + assert len(feat) == 4 + + +def test_stdcnet(): + with pytest.raises(AssertionError): + # STDC backbone constraints. + STDCNet( + stdc_type='STDCNet3', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='cat', + num_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + with_final_conv=False) + + with pytest.raises(AssertionError): + # STDC bottleneck type constraints. + STDCNet( + stdc_type='STDCNet1', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='dog', + num_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + with_final_conv=False) + + with pytest.raises(AssertionError): + # STDC channels length constraints. + STDCNet( + stdc_type='STDCNet1', + in_channels=3, + channels=(16, 32, 64, 256, 512, 1024), + bottleneck_type='cat', + num_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + with_final_conv=False) + + +def test_feature_fusion_module(): + x_ffm = FeatureFusionModule(in_channels=64, out_channels=32) + assert x_ffm.conv0.in_channels == 64 + assert x_ffm.attention[1].in_channels == 32 + assert x_ffm.attention[2].in_channels == 8 + assert x_ffm.attention[2].out_channels == 32 + x1 = torch.randn(2, 32, 32, 64) + x2 = torch.randn(2, 32, 32, 64) + x_out = x_ffm(x1, x2) + assert x_out.shape == torch.Size([2, 32, 32, 64]) + + +def test_attention_refinement_module(): + x_arm = AttentionRefinementModule(128, 32) + assert x_arm.conv_layer.in_channels == 128 + assert x_arm.atten_conv_layer[1].conv.out_channels == 32 + x = torch.randn(2, 128, 32, 64) + x_out = x_arm(x) + assert x_out.shape == torch.Size([2, 32, 32, 64]) + + +def test_stdc_module(): + x_stdc = STDCModule(in_channels=32, out_channels=32, stride=4) + assert x_stdc.layers[0].conv.in_channels == 32 + assert x_stdc.layers[3].conv.out_channels == 4 + x = torch.randn(2, 32, 32, 64) + x_out = x_stdc(x) + assert x_out.shape == torch.Size([2, 32, 32, 64]) diff --git a/tests/test_models/test_backbones/test_swin.py b/tests/test_models/test_backbones/test_swin.py new file mode 100644 index 0000000000..8d14d47ab5 --- /dev/null +++ b/tests/test_models/test_backbones/test_swin.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones.swin import SwinBlock, SwinTransformer + + +def test_swin_block(): + # test SwinBlock structure and forward + block = SwinBlock(embed_dims=32, num_heads=4, feedforward_channels=128) + assert block.ffn.embed_dims == 32 + assert block.attn.w_msa.num_heads == 4 + assert block.ffn.feedforward_channels == 128 + x = torch.randn(1, 56 * 56, 32) + x_out = block(x, (56, 56)) + assert x_out.shape == torch.Size([1, 56 * 56, 32]) + + # Test BasicBlock with checkpoint forward + block = SwinBlock( + embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) + assert block.with_cp + x = torch.randn(1, 56 * 56, 64) + x_out = block(x, (56, 56)) + assert x_out.shape == torch.Size([1, 56 * 56, 64]) + + +def test_swin_transformer(): + """Test Swin Transformer backbone.""" + + with pytest.raises(TypeError): + # Pretrained arg must be str or None. + SwinTransformer(pretrained=123) + + with pytest.raises(AssertionError): + # Because swin uses non-overlapping patch embed, so the stride of patch + # embed must be equal to patch size. + SwinTransformer(strides=(2, 2, 2, 2), patch_size=4) + + # test pretrained image size + with pytest.raises(AssertionError): + SwinTransformer(pretrain_img_size=(112, 112, 112)) + + # Test absolute position embedding + temp = torch.randn((1, 3, 112, 112)) + model = SwinTransformer(pretrain_img_size=112, use_abs_pos_embed=True) + model.init_weights() + model(temp) + + # Test patch norm + model = SwinTransformer(patch_norm=False) + model(temp) + + # Test normal inference + temp = torch.randn((1, 3, 256, 256)) + model = SwinTransformer() + outs = model(temp) + assert outs[0].shape == (1, 96, 64, 64) + assert outs[1].shape == (1, 192, 32, 32) + assert outs[2].shape == (1, 384, 16, 16) + assert outs[3].shape == (1, 768, 8, 8) + + # Test abnormal inference size + temp = torch.randn((1, 3, 255, 255)) + model = SwinTransformer() + outs = model(temp) + assert outs[0].shape == (1, 96, 64, 64) + assert outs[1].shape == (1, 192, 32, 32) + assert outs[2].shape == (1, 384, 16, 16) + assert outs[3].shape == (1, 768, 8, 8) + + # Test abnormal inference size + temp = torch.randn((1, 3, 112, 137)) + model = SwinTransformer() + outs = model(temp) + assert outs[0].shape == (1, 96, 28, 35) + assert outs[1].shape == (1, 192, 14, 18) + assert outs[2].shape == (1, 384, 7, 9) + assert outs[3].shape == (1, 768, 4, 5) + + # Test frozen + model = SwinTransformer(frozen_stages=4) + model.train() + for p in model.parameters(): + assert not p.requires_grad + + # Test absolute position embedding frozen + model = SwinTransformer(frozen_stages=4, use_abs_pos_embed=True) + model.train() + for p in model.parameters(): + assert not p.requires_grad + + # Test Swin with checkpoint forward + temp = torch.randn((1, 3, 56, 56)) + model = SwinTransformer(with_cp=True) + for m in model.modules(): + if isinstance(m, SwinBlock): + assert m.with_cp + model.init_weights() + model.train() + model(temp) diff --git a/tests/test_models/test_backbones/test_timm_backbone.py b/tests/test_models/test_backbones/test_timm_backbone.py new file mode 100644 index 0000000000..85ef9aa56f --- /dev/null +++ b/tests/test_models/test_backbones/test_timm_backbone.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones import TIMMBackbone +from .utils import check_norm_state + + +def test_timm_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = TIMMBackbone() + model.init_weights(pretrained=0) + + # Test different norm_layer, can be: 'SyncBN', 'BN2d', 'GN', 'LN', 'IN' + # Test resnet18 from timm, norm_layer='BN2d' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='BN2d') + + # Test resnet18 from timm, norm_layer='SyncBN' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='SyncBN') + + # Test resnet18 from timm, features_only=True, output_stride=32 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 64, 112, 112)) + assert feats[1] == torch.Size((1, 64, 56, 56)) + assert feats[2] == torch.Size((1, 128, 28, 28)) + assert feats[3] == torch.Size((1, 256, 14, 14)) + assert feats[4] == torch.Size((1, 512, 7, 7)) + + # Test resnet18 from timm, features_only=True, output_stride=16 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=16) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 64, 112, 112)) + assert feats[1] == torch.Size((1, 64, 56, 56)) + assert feats[2] == torch.Size((1, 128, 28, 28)) + assert feats[3] == torch.Size((1, 256, 14, 14)) + assert feats[4] == torch.Size((1, 512, 14, 14)) + + # Test resnet18 from timm, features_only=True, output_stride=8 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 64, 112, 112)) + assert feats[1] == torch.Size((1, 64, 56, 56)) + assert feats[2] == torch.Size((1, 128, 28, 28)) + assert feats[3] == torch.Size((1, 256, 28, 28)) + assert feats[4] == torch.Size((1, 512, 28, 28)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True) + + # Test resnetv2_50x1_bitm from timm, features_only=True, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 64, 4, 4)) + assert feats[1] == torch.Size((1, 256, 2, 2)) + assert feats[2] == torch.Size((1, 512, 1, 1)) + assert feats[3] == torch.Size((1, 1024, 1, 1)) + assert feats[4] == torch.Size((1, 2048, 1, 1)) + + # Test resnetv2_50x3_bitm from timm, features_only=True, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x3_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 192, 4, 4)) + assert feats[1] == torch.Size((1, 768, 2, 2)) + assert feats[2] == torch.Size((1, 1536, 1, 1)) + assert feats[3] == torch.Size((1, 3072, 1, 1)) + assert feats[4] == torch.Size((1, 6144, 1, 1)) + + # Test resnetv2_101x1_bitm from timm, features_only=True, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_101x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + feats = [feat.shape for feat in feats] + assert len(feats) == 5 + assert feats[0] == torch.Size((1, 64, 4, 4)) + assert feats[1] == torch.Size((1, 256, 2, 2)) + assert feats[2] == torch.Size((1, 512, 1, 1)) + assert feats[3] == torch.Size((1, 1024, 1, 1)) + assert feats[4] == torch.Size((1, 2048, 1, 1)) diff --git a/tests/test_models/test_backbones/test_twins.py b/tests/test_models/test_backbones/test_twins.py new file mode 100644 index 0000000000..aa3eaf9f45 --- /dev/null +++ b/tests/test_models/test_backbones/test_twins.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones.twins import (PCPVT, SVT, + ConditionalPositionEncoding, + LocallyGroupedSelfAttention) + + +def test_pcpvt(): + # Test normal input + H, W = (224, 224) + temp = torch.randn((1, 3, H, W)) + model = PCPVT( + embed_dims=[32, 64, 160, 256], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False) + model.init_weights() + outs = model(temp) + assert outs[0].shape == (1, 32, H // 4, W // 4) + assert outs[1].shape == (1, 64, H // 8, W // 8) + assert outs[2].shape == (1, 160, H // 16, W // 16) + assert outs[3].shape == (1, 256, H // 32, W // 32) + + +def test_svt(): + # Test normal input + H, W = (224, 224) + temp = torch.randn((1, 3, H, W)) + model = SVT( + embed_dims=[32, 64, 128], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + qkv_bias=False, + depths=[4, 4, 4], + windiow_sizes=[7, 7, 7], + norm_after_stage=True) + + model.init_weights() + outs = model(temp) + assert outs[0].shape == (1, 32, H // 4, W // 4) + assert outs[1].shape == (1, 64, H // 8, W // 8) + assert outs[2].shape == (1, 128, H // 16, W // 16) + + +def test_svt_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = SVT(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = SVT( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = SVT(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = SVT(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = SVT( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + model = SVT(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + model = SVT(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = SVT( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + model = SVT(pretrained=123, init_cfg=123) + + +def test_pcpvt_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = PCPVT(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = PCPVT( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = PCPVT(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = PCPVT(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = PCPVT( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + model = PCPVT(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + model = PCPVT(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = PCPVT( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + model = PCPVT(pretrained=123, init_cfg=123) + + +def test_locallygrouped_self_attention_module(): + LSA = LocallyGroupedSelfAttention(embed_dims=32, window_size=3) + outs = LSA(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 3136, 32]) + + +def test_conditional_position_encoding_module(): + CPE = ConditionalPositionEncoding(in_channels=32, embed_dims=32, stride=2) + outs = CPE(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 784, 32]) diff --git a/tests/test_models/test_backbones/test_unet.py b/tests/test_models/test_backbones/test_unet.py new file mode 100644 index 0000000000..d0eaccd393 --- /dev/null +++ b/tests/test_models/test_backbones/test_unet.py @@ -0,0 +1,825 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.cnn import ConvModule + +from mmseg.models.backbones.unet import (BasicConvBlock, DeconvModule, + InterpConv, UNet, UpConvBlock) +from mmseg.models.utils import Upsample +from mmseg.utils import register_all_modules +from .utils import check_norm_state + +register_all_modules() + + +def test_unet_basic_conv_block(): + with pytest.raises(AssertionError): + # Not implemented yet. + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + BasicConvBlock(64, 64, dcn=dcn) + + with pytest.raises(AssertionError): + # Not implemented yet. + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + BasicConvBlock(64, 64, plugins=plugins) + + with pytest.raises(AssertionError): + # Not implemented yet + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + BasicConvBlock(64, 64, plugins=plugins) + + # test BasicConvBlock with checkpoint forward + block = BasicConvBlock(16, 16, with_cp=True) + assert block.with_cp + x = torch.randn(1, 16, 64, 64, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 16, 64, 64]) + + block = BasicConvBlock(16, 16, with_cp=False) + assert not block.with_cp + x = torch.randn(1, 16, 64, 64) + x_out = block(x) + assert x_out.shape == torch.Size([1, 16, 64, 64]) + + # test BasicConvBlock with stride convolution to downsample + block = BasicConvBlock(16, 16, stride=2) + x = torch.randn(1, 16, 64, 64) + x_out = block(x) + assert x_out.shape == torch.Size([1, 16, 32, 32]) + + # test BasicConvBlock structure and forward + block = BasicConvBlock(16, 64, num_convs=3, dilation=3) + assert block.convs[0].conv.in_channels == 16 + assert block.convs[0].conv.out_channels == 64 + assert block.convs[0].conv.kernel_size == (3, 3) + assert block.convs[0].conv.dilation == (1, 1) + assert block.convs[0].conv.padding == (1, 1) + + assert block.convs[1].conv.in_channels == 64 + assert block.convs[1].conv.out_channels == 64 + assert block.convs[1].conv.kernel_size == (3, 3) + assert block.convs[1].conv.dilation == (3, 3) + assert block.convs[1].conv.padding == (3, 3) + + assert block.convs[2].conv.in_channels == 64 + assert block.convs[2].conv.out_channels == 64 + assert block.convs[2].conv.kernel_size == (3, 3) + assert block.convs[2].conv.dilation == (3, 3) + assert block.convs[2].conv.padding == (3, 3) + + +def test_deconv_module(): + with pytest.raises(AssertionError): + # kernel_size should be greater than or equal to scale_factor and + # (kernel_size - scale_factor) should be even numbers + DeconvModule(64, 32, kernel_size=1, scale_factor=2) + + with pytest.raises(AssertionError): + # kernel_size should be greater than or equal to scale_factor and + # (kernel_size - scale_factor) should be even numbers + DeconvModule(64, 32, kernel_size=3, scale_factor=2) + + with pytest.raises(AssertionError): + # kernel_size should be greater than or equal to scale_factor and + # (kernel_size - scale_factor) should be even numbers + DeconvModule(64, 32, kernel_size=5, scale_factor=4) + + # test DeconvModule with checkpoint forward and upsample 2X. + block = DeconvModule(64, 32, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 128, 128, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + block = DeconvModule(64, 32, with_cp=False) + assert not block.with_cp + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test DeconvModule with different kernel size for upsample 2X. + x = torch.randn(1, 64, 64, 64) + block = DeconvModule(64, 32, kernel_size=2, scale_factor=2) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 128, 128]) + + block = DeconvModule(64, 32, kernel_size=6, scale_factor=2) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 128, 128]) + + # test DeconvModule with different kernel size for upsample 4X. + x = torch.randn(1, 64, 64, 64) + block = DeconvModule(64, 32, kernel_size=4, scale_factor=4) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + block = DeconvModule(64, 32, kernel_size=6, scale_factor=4) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + +def test_interp_conv(): + # test InterpConv with checkpoint forward and upsample 2X. + block = InterpConv(64, 32, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 128, 128, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + block = InterpConv(64, 32, with_cp=False) + assert not block.with_cp + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test InterpConv with conv_first=False for upsample 2X. + block = InterpConv(64, 32, conv_first=False) + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert isinstance(block.interp_upsample[0], Upsample) + assert isinstance(block.interp_upsample[1], ConvModule) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test InterpConv with conv_first=True for upsample 2X. + block = InterpConv(64, 32, conv_first=True) + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert isinstance(block.interp_upsample[0], ConvModule) + assert isinstance(block.interp_upsample[1], Upsample) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test InterpConv with bilinear upsample for upsample 2X. + block = InterpConv( + 64, + 32, + conv_first=False, + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False)) + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert isinstance(block.interp_upsample[0], Upsample) + assert isinstance(block.interp_upsample[1], ConvModule) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + assert block.interp_upsample[0].mode == 'bilinear' + + # test InterpConv with nearest upsample for upsample 2X. + block = InterpConv( + 64, + 32, + conv_first=False, + upsample_cfg=dict(scale_factor=2, mode='nearest')) + x = torch.randn(1, 64, 128, 128) + x_out = block(x) + assert isinstance(block.interp_upsample[0], Upsample) + assert isinstance(block.interp_upsample[1], ConvModule) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + assert block.interp_upsample[0].mode == 'nearest' + + +def test_up_conv_block(): + with pytest.raises(AssertionError): + # Not implemented yet. + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + UpConvBlock(BasicConvBlock, 64, 32, 32, dcn=dcn) + + with pytest.raises(AssertionError): + # Not implemented yet. + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + UpConvBlock(BasicConvBlock, 64, 32, 32, plugins=plugins) + + with pytest.raises(AssertionError): + # Not implemented yet + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + UpConvBlock(BasicConvBlock, 64, 32, 32, plugins=plugins) + + # test UpConvBlock with checkpoint forward and upsample 2X. + block = UpConvBlock(BasicConvBlock, 64, 32, 32, with_cp=True) + skip_x = torch.randn(1, 32, 256, 256, requires_grad=True) + x = torch.randn(1, 64, 128, 128, requires_grad=True) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test UpConvBlock with upsample=True for upsample 2X. The spatial size of + # skip_x is 2X larger than x. + block = UpConvBlock( + BasicConvBlock, 64, 32, 32, upsample_cfg=dict(type='InterpConv')) + skip_x = torch.randn(1, 32, 256, 256) + x = torch.randn(1, 64, 128, 128) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test UpConvBlock with upsample=False for upsample 2X. The spatial size of + # skip_x is the same as that of x. + block = UpConvBlock(BasicConvBlock, 64, 32, 32, upsample_cfg=None) + skip_x = torch.randn(1, 32, 256, 256) + x = torch.randn(1, 64, 256, 256) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test UpConvBlock with different upsample method for upsample 2X. + # The upsample method is interpolation upsample (bilinear or nearest). + block = UpConvBlock( + BasicConvBlock, + 64, + 32, + 32, + upsample_cfg=dict( + type='InterpConv', + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False))) + skip_x = torch.randn(1, 32, 256, 256) + x = torch.randn(1, 64, 128, 128) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test UpConvBlock with different upsample method for upsample 2X. + # The upsample method is deconvolution upsample. + block = UpConvBlock( + BasicConvBlock, + 64, + 32, + 32, + upsample_cfg=dict(type='DeconvModule', kernel_size=4, scale_factor=2)) + skip_x = torch.randn(1, 32, 256, 256) + x = torch.randn(1, 64, 128, 128) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + # test BasicConvBlock structure and forward + block = UpConvBlock( + conv_block=BasicConvBlock, + in_channels=64, + skip_channels=32, + out_channels=32, + num_convs=3, + dilation=3, + upsample_cfg=dict( + type='InterpConv', + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False))) + skip_x = torch.randn(1, 32, 256, 256) + x = torch.randn(1, 64, 128, 128) + x_out = block(skip_x, x) + assert x_out.shape == torch.Size([1, 32, 256, 256]) + + assert block.conv_block.convs[0].conv.in_channels == 64 + assert block.conv_block.convs[0].conv.out_channels == 32 + assert block.conv_block.convs[0].conv.kernel_size == (3, 3) + assert block.conv_block.convs[0].conv.dilation == (1, 1) + assert block.conv_block.convs[0].conv.padding == (1, 1) + + assert block.conv_block.convs[1].conv.in_channels == 32 + assert block.conv_block.convs[1].conv.out_channels == 32 + assert block.conv_block.convs[1].conv.kernel_size == (3, 3) + assert block.conv_block.convs[1].conv.dilation == (3, 3) + assert block.conv_block.convs[1].conv.padding == (3, 3) + + assert block.conv_block.convs[2].conv.in_channels == 32 + assert block.conv_block.convs[2].conv.out_channels == 32 + assert block.conv_block.convs[2].conv.kernel_size == (3, 3) + assert block.conv_block.convs[2].conv.dilation == (3, 3) + assert block.conv_block.convs[2].conv.padding == (3, 3) + + assert block.upsample.interp_upsample[1].conv.in_channels == 64 + assert block.upsample.interp_upsample[1].conv.out_channels == 32 + assert block.upsample.interp_upsample[1].conv.kernel_size == (1, 1) + assert block.upsample.interp_upsample[1].conv.dilation == (1, 1) + assert block.upsample.interp_upsample[1].conv.padding == (0, 0) + + +def test_unet(): + with pytest.raises(AssertionError): + # Not implemented yet. + dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) + UNet(3, 64, 5, dcn=dcn) + + with pytest.raises(AssertionError): + # Not implemented yet. + plugins = [ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + position='after_conv3') + ] + UNet(3, 64, 5, plugins=plugins) + + with pytest.raises(AssertionError): + # Not implemented yet + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + UNet(3, 64, 5, plugins=plugins) + + with pytest.raises(AssertionError): + # Check whether the input image size can be divisible by the whole + # downsample rate of the encoder. The whole downsample rate of this + # case is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=4, + strides=(1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2), + dec_num_convs=(2, 2, 2), + downsamples=(True, True, True), + enc_dilations=(1, 1, 1, 1), + dec_dilations=(1, 1, 1)) + x = torch.randn(2, 3, 65, 65) + unet(x) + + with pytest.raises(AssertionError): + # Check whether the input image size can be divisible by the whole + # downsample rate of the encoder. The whole downsample rate of this + # case is 16. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 65, 65) + unet(x) + + with pytest.raises(AssertionError): + # Check whether the input image size can be divisible by the whole + # downsample rate of the encoder. The whole downsample rate of this + # case is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 65, 65) + unet(x) + + with pytest.raises(AssertionError): + # Check whether the input image size can be divisible by the whole + # downsample rate of the encoder. The whole downsample rate of this + # case is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 2, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 65, 65) + unet(x) + + with pytest.raises(AssertionError): + # Check whether the input image size can be divisible by the whole + # downsample rate of the encoder. The whole downsample rate of this + # case is 32. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=6, + strides=(1, 1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2, 2), + downsamples=(True, True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1, 1)) + x = torch.randn(2, 3, 65, 65) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(strides)=num_stages + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(enc_num_convs)=num_stages + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(dec_num_convs)=num_stages-1 + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(downsamples)=num_stages-1 + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(enc_dilations)=num_stages + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + with pytest.raises(AssertionError): + # Check if num_stages matches strides, len(dec_dilations)=num_stages-1 + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1, 1)) + x = torch.randn(2, 3, 64, 64) + unet(x) + + # test UNet norm_eval=True + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + norm_eval=True) + unet.train() + assert check_norm_state(unet.modules(), False) + + # test UNet norm_eval=False + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + norm_eval=False) + unet.train() + assert check_norm_state(unet.modules(), True) + + # test UNet forward and outputs. The whole downsample rate is 16. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 8, 8]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 16, 16]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 2, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 16, 16]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 4. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 32, 32]) + assert x_outs[1].shape == torch.Size([2, 32, 32, 32]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 4. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 32, 32]) + assert x_outs[1].shape == torch.Size([2, 32, 32, 32]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 16, 16]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 4. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 32, 32]) + assert x_outs[1].shape == torch.Size([2, 32, 32, 32]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 2. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, False, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 64, 64]) + assert x_outs[1].shape == torch.Size([2, 32, 64, 64]) + assert x_outs[2].shape == torch.Size([2, 16, 64, 64]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 1. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(False, False, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 128, 128]) + assert x_outs[1].shape == torch.Size([2, 32, 128, 128]) + assert x_outs[2].shape == torch.Size([2, 16, 128, 128]) + assert x_outs[3].shape == torch.Size([2, 8, 128, 128]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 16. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 8, 8]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 16, 16]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 8. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 2, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 16, 16]) + assert x_outs[1].shape == torch.Size([2, 32, 16, 16]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet forward and outputs. The whole downsample rate is 4. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1)) + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 32, 32]) + assert x_outs[1].shape == torch.Size([2, 32, 32, 32]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) + + # test UNet init_weights method. + unet = UNet( + in_channels=3, + base_channels=4, + num_stages=5, + strides=(1, 2, 2, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, False, False), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + pretrained=None) + unet.init_weights() + x = torch.randn(2, 3, 128, 128) + x_outs = unet(x) + assert x_outs[0].shape == torch.Size([2, 64, 32, 32]) + assert x_outs[1].shape == torch.Size([2, 32, 32, 32]) + assert x_outs[2].shape == torch.Size([2, 16, 32, 32]) + assert x_outs[3].shape == torch.Size([2, 8, 64, 64]) + assert x_outs[4].shape == torch.Size([2, 4, 128, 128]) diff --git a/tests/test_models/test_backbones/test_vit.py b/tests/test_models/test_backbones/test_vit.py new file mode 100644 index 0000000000..0d1ba70009 --- /dev/null +++ b/tests/test_models/test_backbones/test_vit.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.backbones.vit import (TransformerEncoderLayer, + VisionTransformer) +from .utils import check_norm_state + + +def test_vit_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = VisionTransformer() + model.init_weights(pretrained=0) + + with pytest.raises(TypeError): + # img_size must be int or tuple + model = VisionTransformer(img_size=512.0) + + with pytest.raises(TypeError): + # out_indices must be int ,list or tuple + model = VisionTransformer(out_indices=1.) + + with pytest.raises(TypeError): + # test upsample_pos_embed function + x = torch.randn(1, 196) + VisionTransformer.resize_pos_embed(x, 512, 512, 224, 224, 'bilinear') + + with pytest.raises(AssertionError): + # The length of img_size tuple must be lower than 3. + VisionTransformer(img_size=(224, 224, 224)) + + with pytest.raises(TypeError): + # Pretrained must be None or Str. + VisionTransformer(pretrained=123) + + with pytest.raises(AssertionError): + # with_cls_token must be True when output_cls_token == True + VisionTransformer(with_cls_token=False, output_cls_token=True) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = VisionTransformer(img_size=(224, )) + model.init_weights() + model(imgs) + + # Test img_size isinstance tuple + imgs = torch.randn(1, 3, 224, 224) + model = VisionTransformer(img_size=(224, 224)) + model(imgs) + + # Test norm_eval = True + model = VisionTransformer(norm_eval=True) + model.train() + + # Test ViT backbone with input size of 224 and patch size of 16 + model = VisionTransformer() + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + # Test normal size input image + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test large size input image + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 16, 16) + + # Test small size input image + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 2, 2) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test unbalanced size input image + imgs = torch.randn(1, 3, 112, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 7, 14) + + # Test irregular input image + imgs = torch.randn(1, 3, 234, 345) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 15, 22) + + # Test with_cp=True + model = VisionTransformer(with_cp=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test with_cls_token=False + model = VisionTransformer(with_cls_token=False) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test final norm + model = VisionTransformer(final_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test patch norm + model = VisionTransformer(patch_norm=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[-1].shape == (1, 768, 14, 14) + + # Test output_cls_token + model = VisionTransformer(with_cls_token=True, output_cls_token=True) + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0][0].shape == (1, 768, 14, 14) + assert feat[0][1].shape == (1, 768) + + # Test TransformerEncoderLayer with checkpoint forward + block = TransformerEncoderLayer( + embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) + assert block.with_cp + x = torch.randn(1, 56 * 56, 64) + x_out = block(x) + assert x_out.shape == torch.Size([1, 56 * 56, 64]) + + +def test_vit_init(): + path = 'PATH_THAT_DO_NOT_EXIST' + # Test all combinations of pretrained and init_cfg + # pretrained=None, init_cfg=None + model = VisionTransformer(pretrained=None, init_cfg=None) + assert model.init_cfg is None + model.init_weights() + + # pretrained=None + # init_cfg loads pretrain from an non-existent file + model = VisionTransformer( + pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained=None + # init_cfg=123, whose type is unsupported + model = VisionTransformer(pretrained=None, init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg=None + model = VisionTransformer(pretrained=path, init_cfg=None) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # pretrained loads pretrain from an non-existent file + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = VisionTransformer( + pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path)) + with pytest.raises(AssertionError): + model = VisionTransformer(pretrained=path, init_cfg=123) + + # pretrain=123, whose type is unsupported + # init_cfg=None + with pytest.raises(TypeError): + model = VisionTransformer(pretrained=123, init_cfg=None) + + # pretrain=123, whose type is unsupported + # init_cfg loads pretrain from an non-existent file + with pytest.raises(AssertionError): + model = VisionTransformer( + pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path)) + + # pretrain=123, whose type is unsupported + # init_cfg=123, whose type is unsupported + with pytest.raises(AssertionError): + model = VisionTransformer(pretrained=123, init_cfg=123) diff --git a/tests/test_models/test_backbones/utils.py b/tests/test_models/test_backbones/utils.py new file mode 100644 index 0000000000..54b6404c60 --- /dev/null +++ b/tests/test_models/test_backbones/utils.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.models.backbones.resnet import BasicBlock, Bottleneck +from mmseg.models.backbones.resnext import Bottleneck as BottleneckX + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX)): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.allclose(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.allclose(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True diff --git a/tests/test_models/test_data_preprocessor.py b/tests/test_models/test_data_preprocessor.py new file mode 100644 index 0000000000..d05eef1c7d --- /dev/null +++ b/tests/test_models/test_data_preprocessor.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.structures import PixelData + +from mmseg.models import SegDataPreProcessor +from mmseg.structures import SegDataSample + + +class TestSegDataPreProcessor(TestCase): + + def test_init(self): + # test mean is None + processor = SegDataPreProcessor() + self.assertTrue(not hasattr(processor, 'mean')) + self.assertTrue(processor._enable_normalize is False) + + # test mean is not None + processor = SegDataPreProcessor(mean=[0, 0, 0], std=[1, 1, 1]) + self.assertTrue(hasattr(processor, 'mean')) + self.assertTrue(hasattr(processor, 'std')) + self.assertTrue(processor._enable_normalize) + + # please specify both mean and std + with self.assertRaises(AssertionError): + SegDataPreProcessor(mean=[0, 0, 0]) + + # bgr2rgb and rgb2bgr cannot be set to True at the same time + with self.assertRaises(AssertionError): + SegDataPreProcessor(bgr_to_rgb=True, rgb_to_bgr=True) + + def test_forward(self): + data_sample = SegDataSample() + data_sample.gt_sem_seg = PixelData( + **{'data': torch.randint(0, 10, (1, 11, 10))}) + processor = SegDataPreProcessor( + mean=[0, 0, 0], std=[1, 1, 1], size=(20, 20)) + data = { + 'inputs': [ + torch.randint(0, 256, (3, 11, 10)), + torch.randint(0, 256, (3, 11, 10)) + ], + 'data_samples': [data_sample, data_sample] + } + out = processor(data, training=True) + self.assertEqual(out['inputs'].shape, (2, 3, 20, 20)) + self.assertEqual(len(out['data_samples']), 2) + + # test predict with padding + processor = SegDataPreProcessor( + mean=[0, 0, 0], + std=[1, 1, 1], + size=(20, 20), + test_cfg=dict(size_divisor=15)) + data = { + 'inputs': [ + torch.randint(0, 256, (3, 11, 10)), + ], + 'data_samples': [data_sample] + } + out = processor(data, training=False) + self.assertEqual(out['inputs'].shape[2] % 15, 0) + self.assertEqual(out['inputs'].shape[3] % 15, 0) diff --git a/tests/test_models/test_forward.py b/tests/test_models/test_forward.py index 620b82e64d..ab88e4393a 100644 --- a/tests/test_models/test_forward.py +++ b/tests/test_models/test_forward.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. """pytest tests/test_forward.py.""" import copy from os.path import dirname, exists, join @@ -7,43 +8,63 @@ import pytest import torch import torch.nn as nn -from mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm +from mmengine.model.utils import revert_sync_batchnorm +from mmengine.structures import PixelData +from mmengine.utils import is_list_of, is_tuple_of +from torch import Tensor +from mmseg.structures import SegDataSample +from mmseg.utils import register_all_modules -def _demo_mm_inputs(input_shape=(2, 3, 8, 16), num_classes=10): +register_all_modules() + + +def _demo_mm_inputs(batch_size=2, image_shapes=(3, 32, 32), num_classes=5): """Create a superset of inputs needed to run test or train batches. Args: - input_shape (tuple): - input batch dimensions - - num_classes (int): - number of semantic classes + batch_size (int): batch size. Default to 2. + image_shapes (List[tuple], Optional): image shape. + Default to (3, 128, 128) + num_classes (int): number of different labels a + box might have. Default to 10. """ - (N, C, H, W) = input_shape - - rng = np.random.RandomState(0) - - imgs = rng.rand(*input_shape) - segs = rng.randint( - low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8) - - img_metas = [{ - 'img_shape': (H, W, C), - 'ori_shape': (H, W, C), - 'pad_shape': (H, W, C), - 'filename': '.png', - 'scale_factor': 1.0, - 'flip': False, - 'flip_direction': 'horizontal' - } for _ in range(N)] - - mm_inputs = { - 'imgs': torch.FloatTensor(imgs), - 'img_metas': img_metas, - 'gt_semantic_seg': torch.LongTensor(segs) - } - return mm_inputs + if isinstance(image_shapes, list): + assert len(image_shapes) == batch_size + else: + image_shapes = [image_shapes] * batch_size + + inputs = [] + data_samples = [] + for idx in range(batch_size): + image_shape = image_shapes[idx] + c, h, w = image_shape + image = np.random.randint(0, 255, size=image_shape, dtype=np.uint8) + + mm_input = torch.from_numpy(image) + + img_meta = { + 'img_id': idx, + 'img_shape': image_shape[1:], + 'ori_shape': image_shape[1:], + 'pad_shape': image_shape[1:], + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False, + 'flip_direction': None, + } + + data_sample = SegDataSample() + data_sample.set_metainfo(img_meta) + + gt_semantic_seg = np.random.randint( + 0, num_classes, (1, h, w), dtype=np.uint8) + gt_semantic_seg = torch.LongTensor(gt_semantic_seg) + gt_sem_seg_data = dict(data=gt_semantic_seg) + data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + inputs.append(mm_input) + data_samples.append(data_sample) + return dict(inputs=inputs, data_samples=data_samples) def _get_config_directory(): @@ -63,7 +84,7 @@ def _get_config_directory(): def _get_config_module(fname): """Load a configuration as a python module.""" - from mmcv import Config + from mmengine import Config config_dpath = _get_config_directory() config_fpath = join(config_dpath, fname) config_mod = Config.fromfile(config_fpath) @@ -76,81 +97,101 @@ def _get_segmentor_cfg(fname): These are deep copied to allow for safe modification of parameters without influencing other tests. """ - import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) - train_cfg = mmcv.Config(copy.deepcopy(config.train_cfg)) - test_cfg = mmcv.Config(copy.deepcopy(config.test_cfg)) - return model, train_cfg, test_cfg + return model def test_pspnet_forward(): _test_encoder_decoder_forward( - 'pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py') + 'pspnet/pspnet_r18-d8_4xb2-80k_cityscapes-512x1024.py') def test_fcn_forward(): - _test_encoder_decoder_forward('fcn/fcn_r50-d8_512x1024_40k_cityscapes.py') + _test_encoder_decoder_forward( + 'fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py') def test_deeplabv3_forward(): _test_encoder_decoder_forward( - 'deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py') + 'deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-512x1024.py') def test_deeplabv3plus_forward(): _test_encoder_decoder_forward( - 'deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py') + 'deeplabv3plus/deeplabv3plus_r18-d8_4xb2-80k_cityscapes-512x1024.py') def test_gcnet_forward(): _test_encoder_decoder_forward( - 'gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py') + 'gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py') def test_ann_forward(): - _test_encoder_decoder_forward('ann/ann_r50-d8_512x1024_40k_cityscapes.py') + _test_encoder_decoder_forward( + 'ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py') def test_ccnet_forward(): if not torch.cuda.is_available(): pytest.skip('CCNet requires CUDA') _test_encoder_decoder_forward( - 'ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py') + 'ccnet/ccnet_r50-d8_4xb2-40k_cityscapes-512x1024.py') def test_danet_forward(): _test_encoder_decoder_forward( - 'danet/danet_r50-d8_512x1024_40k_cityscapes.py') + 'danet/danet_r50-d8_4xb2-40k_cityscapes-512x1024.py') def test_nonlocal_net_forward(): _test_encoder_decoder_forward( - 'nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py') + 'nonlocal_net/nonlocal_r50-d8_4xb2-40k_cityscapes-512x1024.py') def test_upernet_forward(): _test_encoder_decoder_forward( - 'upernet/upernet_r50_512x1024_40k_cityscapes.py') + 'upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py') def test_hrnet_forward(): - _test_encoder_decoder_forward('hrnet/fcn_hr18s_512x1024_40k_cityscapes.py') + _test_encoder_decoder_forward( + 'hrnet/fcn_hr18s_4xb2-40k_cityscapes-512x1024.py') def test_ocrnet_forward(): _test_encoder_decoder_forward( - 'ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py') + 'ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py') def test_psanet_forward(): _test_encoder_decoder_forward( - 'psanet/psanet_r50-d8_512x1024_40k_cityscapes.py') + 'psanet/psanet_r50-d8_4xb2-40k_cityscapes-512x1024.py') + + +def test_sem_fpn_forward(): + _test_encoder_decoder_forward( + 'sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py') -def test_encnet_forward(): +def test_mobilenet_v2_forward(): _test_encoder_decoder_forward( - 'encnet/encnet_r50-d8_512x1024_40k_cityscapes.py') + 'mobilenet_v2/mobilenet-v2-d8_pspnet_4xb2-80k_cityscapes-512x1024.py') + + +def test_dnlnet_forward(): + _test_encoder_decoder_forward( + 'dnlnet/dnl_r50-d8_4xb2-40k_cityscapes-512x1024.py') + + +def test_emanet_forward(): + _test_encoder_decoder_forward( + 'emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py') + + +def test_isanet_forward(): + _test_encoder_decoder_forward( + 'isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py') def get_world_size(process_group): @@ -162,68 +203,62 @@ def _check_input_dim(self, inputs): pass -def _convert_batchnorm(module): - module_output = module - if isinstance(module, SyncBatchNorm): - # to be consistent with SyncBN, we hack dim check function in BN - module_output = _BatchNorm(module.num_features, module.eps, - module.momentum, module.affine, - module.track_running_stats) - if module.affine: - module_output.weight.data = module.weight.data.clone().detach() - module_output.bias.data = module.bias.data.clone().detach() - # keep requires_grad unchanged - module_output.weight.requires_grad = module.weight.requires_grad - module_output.bias.requires_grad = module.bias.requires_grad - module_output.running_mean = module.running_mean - module_output.running_var = module.running_var - module_output.num_batches_tracked = module.num_batches_tracked - for name, child in module.named_children(): - module_output.add_module(name, _convert_batchnorm(child)) - del module - return module_output - - @patch('torch.nn.modules.batchnorm._BatchNorm._check_input_dim', _check_input_dim) @patch('torch.distributed.get_world_size', get_world_size) def _test_encoder_decoder_forward(cfg_file): - model, train_cfg, test_cfg = _get_segmentor_cfg(cfg_file) + model = _get_segmentor_cfg(cfg_file) model['pretrained'] = None - test_cfg['mode'] = 'whole' + model['test_cfg']['mode'] = 'whole' from mmseg.models import build_segmentor - segmentor = build_segmentor(model, train_cfg=train_cfg, test_cfg=test_cfg) + segmentor = build_segmentor(model) + segmentor.init_weights() if isinstance(segmentor.decode_head, nn.ModuleList): num_classes = segmentor.decode_head[-1].num_classes else: num_classes = segmentor.decode_head.num_classes # batch_size=2 for BatchNorm - input_shape = (2, 3, 32, 32) - mm_inputs = _demo_mm_inputs(input_shape, num_classes=num_classes) - - imgs = mm_inputs.pop('imgs') - img_metas = mm_inputs.pop('img_metas') - gt_semantic_seg = mm_inputs['gt_semantic_seg'] - + packed_inputs = _demo_mm_inputs( + batch_size=2, image_shapes=(3, 32, 32), num_classes=num_classes) # convert to cuda Tensor if applicable if torch.cuda.is_available(): segmentor = segmentor.cuda() - imgs = imgs.cuda() - gt_semantic_seg = gt_semantic_seg.cuda() else: - segmentor = _convert_batchnorm(segmentor) + segmentor = revert_sync_batchnorm(segmentor) # Test forward train - losses = segmentor.forward( - imgs, img_metas, gt_semantic_seg=gt_semantic_seg, return_loss=True) + data = segmentor.data_preprocessor(packed_inputs, True) + losses = segmentor.forward(**data, mode='loss') assert isinstance(losses, dict) - # Test forward test + packed_inputs = _demo_mm_inputs( + batch_size=1, image_shapes=(3, 32, 32), num_classes=num_classes) + data = segmentor.data_preprocessor(packed_inputs, False) with torch.no_grad(): segmentor.eval() - # pack into lists - img_list = [img[None, :] for img in imgs] - img_meta_list = [[img_meta] for img_meta in img_metas] - segmentor.forward(img_list, img_meta_list, return_loss=False) + # Test forward predict + batch_results = segmentor.forward(**data, mode='predict') + assert len(batch_results) == 1 + assert is_list_of(batch_results, SegDataSample) + assert batch_results[0].pred_sem_seg.shape == (32, 32) + assert batch_results[0].seg_logits.data.shape == (num_classes, 32, 32) + assert batch_results[0].gt_sem_seg.shape == (32, 32) + + # Test forward tensor + batch_results = segmentor.forward(**data, mode='tensor') + assert isinstance(batch_results, Tensor) or is_tuple_of( + batch_results, Tensor) + + # Test forward predict without ground truth + data.pop('data_samples') + batch_results = segmentor.forward(**data, mode='predict') + assert len(batch_results) == 1 + assert is_list_of(batch_results, SegDataSample) + assert batch_results[0].pred_sem_seg.shape == (32, 32) + + # Test forward tensor without ground truth + batch_results = segmentor.forward(**data, mode='tensor') + assert isinstance(batch_results, Tensor) or is_tuple_of( + batch_results, Tensor) diff --git a/tests/test_models/test_heads.py b/tests/test_models/test_heads.py deleted file mode 100644 index 935239438f..0000000000 --- a/tests/test_models/test_heads.py +++ /dev/null @@ -1,541 +0,0 @@ -from unittest.mock import patch - -import pytest -import torch -from mmcv.cnn import ConvModule -from mmcv.utils.parrots_wrapper import SyncBatchNorm - -from mmseg.models.decode_heads import (ANNHead, ASPPHead, CCHead, DAHead, - DepthwiseSeparableASPPHead, EncHead, - FCNHead, GCHead, NLHead, OCRHead, - PSAHead, PSPHead, UPerHead) -from mmseg.models.decode_heads.decode_head import BaseDecodeHead - - -def _conv_has_norm(module, sync_bn): - for m in module.modules(): - if isinstance(m, ConvModule): - if not m.with_norm: - return False - if sync_bn: - if not isinstance(m.bn, SyncBatchNorm): - return False - return True - - -def to_cuda(module, data): - module = module.cuda() - if isinstance(data, list): - for i in range(len(data)): - data[i] = data[i].cuda() - return module, data - - -@patch.multiple(BaseDecodeHead, __abstractmethods__=set()) -def test_decode_head(): - - with pytest.raises(AssertionError): - # default input_transform doesn't accept multiple inputs - BaseDecodeHead([32, 16], 16, num_classes=19) - - with pytest.raises(AssertionError): - # default input_transform doesn't accept multiple inputs - BaseDecodeHead(32, 16, num_classes=19, in_index=[-1, -2]) - - with pytest.raises(AssertionError): - # supported mode is resize_concat only - BaseDecodeHead(32, 16, num_classes=19, input_transform='concat') - - with pytest.raises(AssertionError): - # in_channels should be list|tuple - BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat') - - with pytest.raises(AssertionError): - # in_index should be list|tuple - BaseDecodeHead([32], - 16, - in_index=-1, - num_classes=19, - input_transform='resize_concat') - - with pytest.raises(AssertionError): - # len(in_index) should equal len(in_channels) - BaseDecodeHead([32, 16], - 16, - num_classes=19, - in_index=[-1], - input_transform='resize_concat') - - # test default dropout - head = BaseDecodeHead(32, 16, num_classes=19) - assert hasattr(head, 'dropout') and head.dropout.p == 0.1 - - # test set dropout - head = BaseDecodeHead(32, 16, num_classes=19, drop_out_ratio=0.2) - assert hasattr(head, 'dropout') and head.dropout.p == 0.2 - - # test no input_transform - inputs = [torch.randn(1, 32, 45, 45)] - head = BaseDecodeHead(32, 16, num_classes=19) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.in_channels == 32 - assert head.input_transform is None - transformed_inputs = head._transform_inputs(inputs) - assert transformed_inputs.shape == (1, 32, 45, 45) - - # test input_transform = resize_concat - inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)] - head = BaseDecodeHead([32, 16], - 16, - num_classes=19, - in_index=[0, 1], - input_transform='resize_concat') - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.in_channels == 48 - assert head.input_transform == 'resize_concat' - transformed_inputs = head._transform_inputs(inputs) - assert transformed_inputs.shape == (1, 48, 45, 45) - - -def test_fcn_head(): - - with pytest.raises(AssertionError): - # num_convs must be larger than 0 - FCNHead(num_classes=19, num_convs=0) - - # test no norm_cfg - head = FCNHead(in_channels=32, channels=16, num_classes=19) - for m in head.modules(): - if isinstance(m, ConvModule): - assert not m.with_norm - - # test with norm_cfg - head = FCNHead( - in_channels=32, - channels=16, - num_classes=19, - norm_cfg=dict(type='SyncBN')) - for m in head.modules(): - if isinstance(m, ConvModule): - assert m.with_norm and isinstance(m.bn, SyncBatchNorm) - - # test concat_input=False - inputs = [torch.randn(1, 32, 45, 45)] - head = FCNHead( - in_channels=32, channels=16, num_classes=19, concat_input=False) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert len(head.convs) == 2 - assert not head.concat_input and not hasattr(head, 'conv_cat') - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - # test concat_input=True - inputs = [torch.randn(1, 32, 45, 45)] - head = FCNHead( - in_channels=32, channels=16, num_classes=19, concat_input=True) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert len(head.convs) == 2 - assert head.concat_input - assert head.conv_cat.in_channels == 48 - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - # test kernel_size=3 - inputs = [torch.randn(1, 32, 45, 45)] - head = FCNHead(in_channels=32, channels=16, num_classes=19) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - for i in range(len(head.convs)): - assert head.convs[i].kernel_size == (3, 3) - assert head.convs[i].padding == 1 - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - # test kernel_size=1 - inputs = [torch.randn(1, 32, 45, 45)] - head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - for i in range(len(head.convs)): - assert head.convs[i].kernel_size == (1, 1) - assert head.convs[i].padding == 0 - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - # test num_conv - inputs = [torch.randn(1, 32, 45, 45)] - head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert len(head.convs) == 1 - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_psp_head(): - - with pytest.raises(AssertionError): - # pool_scales must be list|tuple - PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1) - - # test no norm_cfg - head = PSPHead(in_channels=32, channels=16, num_classes=19) - assert not _conv_has_norm(head, sync_bn=False) - - # test with norm_cfg - head = PSPHead( - in_channels=32, - channels=16, - num_classes=19, - norm_cfg=dict(type='SyncBN')) - assert _conv_has_norm(head, sync_bn=True) - - inputs = [torch.randn(1, 32, 45, 45)] - head = PSPHead( - in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3)) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.psp_modules[0][0].output_size == 1 - assert head.psp_modules[1][0].output_size == 2 - assert head.psp_modules[2][0].output_size == 3 - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_aspp_head(): - - with pytest.raises(AssertionError): - # pool_scales must be list|tuple - ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=1) - - # test no norm_cfg - head = ASPPHead(in_channels=32, channels=16, num_classes=19) - assert not _conv_has_norm(head, sync_bn=False) - - # test with norm_cfg - head = ASPPHead( - in_channels=32, - channels=16, - num_classes=19, - norm_cfg=dict(type='SyncBN')) - assert _conv_has_norm(head, sync_bn=True) - - inputs = [torch.randn(1, 32, 45, 45)] - head = ASPPHead( - in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24)) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.aspp_modules[0].conv.dilation == (1, 1) - assert head.aspp_modules[1].conv.dilation == (12, 12) - assert head.aspp_modules[2].conv.dilation == (24, 24) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_psa_head(): - - with pytest.raises(AssertionError): - # psa_type must be in 'bi-direction', 'collect', 'distribute' - PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - psa_type='gather') - - # test no norm_cfg - head = PSAHead( - in_channels=32, channels=16, num_classes=19, mask_size=(39, 39)) - assert not _conv_has_norm(head, sync_bn=False) - - # test with norm_cfg - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - norm_cfg=dict(type='SyncBN')) - assert _conv_has_norm(head, sync_bn=True) - - # test 'bi-direction' psa_type - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, channels=16, num_classes=19, mask_size=(39, 39)) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'bi-direction' psa_type, shrink_factor=1 - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - shrink_factor=1) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'bi-direction' psa_type with soft_max - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - psa_softmax=True) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'collect' psa_type - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - psa_type='collect') - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'collect' psa_type, shrink_factor=1 - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - shrink_factor=1, - psa_type='collect') - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'collect' psa_type, shrink_factor=1, compact=True - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - psa_type='collect', - shrink_factor=1, - compact=True) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - # test 'distribute' psa_type - inputs = [torch.randn(1, 32, 39, 39)] - head = PSAHead( - in_channels=32, - channels=16, - num_classes=19, - mask_size=(39, 39), - psa_type='distribute') - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 39, 39) - - -def test_gc_head(): - head = GCHead(in_channels=32, channels=16, num_classes=19) - assert len(head.convs) == 2 - assert hasattr(head, 'gc_block') - inputs = [torch.randn(1, 32, 45, 45)] - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_nl_head(): - head = NLHead(in_channels=32, channels=16, num_classes=19) - assert len(head.convs) == 2 - assert hasattr(head, 'nl_block') - inputs = [torch.randn(1, 32, 45, 45)] - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_cc_head(): - head = CCHead(in_channels=32, channels=16, num_classes=19) - assert len(head.convs) == 2 - assert hasattr(head, 'cca') - if not torch.cuda.is_available(): - pytest.skip('CCHead requires CUDA') - inputs = [torch.randn(1, 32, 45, 45)] - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_uper_head(): - - with pytest.raises(AssertionError): - # fpn_in_channels must be list|tuple - UPerHead(in_channels=32, channels=16, num_classes=19) - - # test no norm_cfg - head = UPerHead( - in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1]) - assert not _conv_has_norm(head, sync_bn=False) - - # test with norm_cfg - head = UPerHead( - in_channels=[32, 16], - channels=16, - num_classes=19, - norm_cfg=dict(type='SyncBN'), - in_index=[-2, -1]) - assert _conv_has_norm(head, sync_bn=True) - - inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)] - head = UPerHead( - in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1]) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - -def test_ann_head(): - - inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)] - head = ANNHead( - in_channels=[16, 32], - channels=16, - num_classes=19, - in_index=[-2, -1], - project_channels=8) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 21, 21) - - -def test_da_head(): - - inputs = [torch.randn(1, 32, 45, 45)] - head = DAHead(in_channels=32, channels=16, num_classes=19, pam_channels=8) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert isinstance(outputs, tuple) and len(outputs) == 3 - for output in outputs: - assert output.shape == (1, head.num_classes, 45, 45) - test_output = head.forward_test(inputs, None, None) - assert test_output.shape == (1, head.num_classes, 45, 45) - - -def test_ocr_head(): - - inputs = [torch.randn(1, 32, 45, 45)] - ocr_head = OCRHead( - in_channels=32, channels=16, num_classes=19, ocr_channels=8) - fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19) - if torch.cuda.is_available(): - head, inputs = to_cuda(ocr_head, inputs) - head, inputs = to_cuda(fcn_head, inputs) - prev_output = fcn_head(inputs) - output = ocr_head(inputs, prev_output) - assert output.shape == (1, ocr_head.num_classes, 45, 45) - - -def test_enc_head(): - # with se_loss, w.o. lateral - inputs = [torch.randn(1, 32, 21, 21)] - head = EncHead( - in_channels=[32], channels=16, num_classes=19, in_index=[-1]) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert isinstance(outputs, tuple) and len(outputs) == 2 - assert outputs[0].shape == (1, head.num_classes, 21, 21) - assert outputs[1].shape == (1, head.num_classes) - - # w.o se_loss, w.o. lateral - inputs = [torch.randn(1, 32, 21, 21)] - head = EncHead( - in_channels=[32], - channels=16, - use_se_loss=False, - num_classes=19, - in_index=[-1]) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 21, 21) - - # with se_loss, with lateral - inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)] - head = EncHead( - in_channels=[16, 32], - channels=16, - add_lateral=True, - num_classes=19, - in_index=[-2, -1]) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - outputs = head(inputs) - assert isinstance(outputs, tuple) and len(outputs) == 2 - assert outputs[0].shape == (1, head.num_classes, 21, 21) - assert outputs[1].shape == (1, head.num_classes) - test_output = head.forward_test(inputs, None, None) - assert test_output.shape == (1, head.num_classes, 21, 21) - - -def test_dw_aspp_head(): - - # test w.o. c1 - inputs = [torch.randn(1, 32, 45, 45)] - head = DepthwiseSeparableASPPHead( - c1_in_channels=0, - c1_channels=0, - in_channels=32, - channels=16, - num_classes=19, - dilations=(1, 12, 24)) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.c1_bottleneck is None - assert head.aspp_modules[0].conv.dilation == (1, 1) - assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12) - assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) - - # test with c1 - inputs = [torch.randn(1, 8, 45, 45), torch.randn(1, 32, 21, 21)] - head = DepthwiseSeparableASPPHead( - c1_in_channels=8, - c1_channels=4, - in_channels=32, - channels=16, - num_classes=19, - dilations=(1, 12, 24)) - if torch.cuda.is_available(): - head, inputs = to_cuda(head, inputs) - assert head.c1_bottleneck.in_channels == 8 - assert head.c1_bottleneck.out_channels == 4 - assert head.aspp_modules[0].conv.dilation == (1, 1) - assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12) - assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24) - outputs = head(inputs) - assert outputs.shape == (1, head.num_classes, 45, 45) diff --git a/tests/test_models/test_heads/__init__.py b/tests/test_models/test_heads/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/test_heads/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_heads/test_ann_head.py b/tests/test_models/test_heads/test_ann_head.py new file mode 100644 index 0000000000..c1e44bc685 --- /dev/null +++ b/tests/test_models/test_heads/test_ann_head.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import ANNHead +from .utils import to_cuda + + +def test_ann_head(): + + inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)] + head = ANNHead( + in_channels=[4, 8], + channels=2, + num_classes=19, + in_index=[-2, -1], + project_channels=8) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 21, 21) diff --git a/tests/test_models/test_heads/test_apc_head.py b/tests/test_models/test_heads/test_apc_head.py new file mode 100644 index 0000000000..dc55ccc1d5 --- /dev/null +++ b/tests/test_models/test_heads/test_apc_head.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import APCHead +from .utils import _conv_has_norm, to_cuda + + +def test_apc_head(): + + with pytest.raises(AssertionError): + # pool_scales must be list|tuple + APCHead(in_channels=8, channels=2, num_classes=19, pool_scales=1) + + # test no norm_cfg + head = APCHead(in_channels=8, channels=2, num_classes=19) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = APCHead( + in_channels=8, + channels=2, + num_classes=19, + norm_cfg=dict(type='SyncBN')) + assert _conv_has_norm(head, sync_bn=True) + + # fusion=True + inputs = [torch.randn(1, 8, 45, 45)] + head = APCHead( + in_channels=8, + channels=2, + num_classes=19, + pool_scales=(1, 2, 3), + fusion=True) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.fusion is True + assert head.acm_modules[0].pool_scale == 1 + assert head.acm_modules[1].pool_scale == 2 + assert head.acm_modules[2].pool_scale == 3 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) + + # fusion=False + inputs = [torch.randn(1, 8, 45, 45)] + head = APCHead( + in_channels=8, + channels=2, + num_classes=19, + pool_scales=(1, 2, 3), + fusion=False) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.fusion is False + assert head.acm_modules[0].pool_scale == 1 + assert head.acm_modules[1].pool_scale == 2 + assert head.acm_modules[2].pool_scale == 3 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) diff --git a/tests/test_models/test_heads/test_aspp_head.py b/tests/test_models/test_heads/test_aspp_head.py new file mode 100644 index 0000000000..db9e89324f --- /dev/null +++ b/tests/test_models/test_heads/test_aspp_head.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import ASPPHead, DepthwiseSeparableASPPHead +from .utils import _conv_has_norm, to_cuda + + +def test_aspp_head(): + + with pytest.raises(AssertionError): + # pool_scales must be list|tuple + ASPPHead(in_channels=8, channels=4, num_classes=19, dilations=1) + + # test no norm_cfg + head = ASPPHead(in_channels=8, channels=4, num_classes=19) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = ASPPHead( + in_channels=8, + channels=4, + num_classes=19, + norm_cfg=dict(type='SyncBN')) + assert _conv_has_norm(head, sync_bn=True) + + inputs = [torch.randn(1, 8, 45, 45)] + head = ASPPHead( + in_channels=8, channels=4, num_classes=19, dilations=(1, 12, 24)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.aspp_modules[0].conv.dilation == (1, 1) + assert head.aspp_modules[1].conv.dilation == (12, 12) + assert head.aspp_modules[2].conv.dilation == (24, 24) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) + + +def test_dw_aspp_head(): + + # test w.o. c1 + inputs = [torch.randn(1, 8, 45, 45)] + head = DepthwiseSeparableASPPHead( + c1_in_channels=0, + c1_channels=0, + in_channels=8, + channels=4, + num_classes=19, + dilations=(1, 12, 24)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.c1_bottleneck is None + assert head.aspp_modules[0].conv.dilation == (1, 1) + assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12) + assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) + + # test with c1 + inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 16, 21, 21)] + head = DepthwiseSeparableASPPHead( + c1_in_channels=4, + c1_channels=2, + in_channels=16, + channels=8, + num_classes=19, + dilations=(1, 12, 24)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.c1_bottleneck.in_channels == 4 + assert head.c1_bottleneck.out_channels == 2 + assert head.aspp_modules[0].conv.dilation == (1, 1) + assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12) + assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) diff --git a/tests/test_models/test_heads/test_cc_head.py b/tests/test_models/test_heads/test_cc_head.py new file mode 100644 index 0000000000..06304172db --- /dev/null +++ b/tests/test_models/test_heads/test_cc_head.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import CCHead +from .utils import to_cuda + + +def test_cc_head(): + head = CCHead(in_channels=16, channels=8, num_classes=19) + assert len(head.convs) == 2 + assert hasattr(head, 'cca') + if not torch.cuda.is_available(): + pytest.skip('CCHead requires CUDA') + inputs = [torch.randn(1, 16, 23, 23)] + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_decode_head.py b/tests/test_models/test_heads/test_decode_head.py new file mode 100644 index 0000000000..88e6bed10f --- /dev/null +++ b/tests/test_models/test_heads/test_decode_head.py @@ -0,0 +1,193 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import patch + +import pytest +import torch +from mmengine.structures import PixelData + +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.structures import SegDataSample +from .utils import to_cuda + + +@patch.multiple(BaseDecodeHead, __abstractmethods__=set()) +def test_decode_head(): + + with pytest.raises(AssertionError): + # default input_transform doesn't accept multiple inputs + BaseDecodeHead([32, 16], 16, num_classes=19) + + with pytest.raises(AssertionError): + # default input_transform doesn't accept multiple inputs + BaseDecodeHead(32, 16, num_classes=19, in_index=[-1, -2]) + + with pytest.raises(AssertionError): + # supported mode is resize_concat only + BaseDecodeHead(32, 16, num_classes=19, input_transform='concat') + + with pytest.raises(AssertionError): + # in_channels should be list|tuple + BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat') + + with pytest.raises(AssertionError): + # in_index should be list|tuple + BaseDecodeHead([32], + 16, + in_index=-1, + num_classes=19, + input_transform='resize_concat') + + with pytest.raises(AssertionError): + # len(in_index) should equal len(in_channels) + BaseDecodeHead([32, 16], + 16, + num_classes=19, + in_index=[-1], + input_transform='resize_concat') + + with pytest.raises(ValueError): + # out_channels should be equal to num_classes + BaseDecodeHead(32, 16, num_classes=19, out_channels=18) + + # test out_channels + head = BaseDecodeHead(32, 16, num_classes=2) + assert head.out_channels == 2 + + # test out_channels == 1 and num_classes == 2 + head = BaseDecodeHead(32, 16, num_classes=2, out_channels=1) + assert head.out_channels == 1 and head.num_classes == 2 + + # test default dropout + head = BaseDecodeHead(32, 16, num_classes=19) + assert hasattr(head, 'dropout') and head.dropout.p == 0.1 + + # test set dropout + head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2) + assert hasattr(head, 'dropout') and head.dropout.p == 0.2 + + # test no input_transform + inputs = [torch.randn(1, 32, 45, 45)] + head = BaseDecodeHead(32, 16, num_classes=19) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.in_channels == 32 + assert head.input_transform is None + transformed_inputs = head._transform_inputs(inputs) + assert transformed_inputs.shape == (1, 32, 45, 45) + + # test input_transform = resize_concat + inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)] + head = BaseDecodeHead([32, 16], + 16, + num_classes=19, + in_index=[0, 1], + input_transform='resize_concat') + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.in_channels == 48 + assert head.input_transform == 'resize_concat' + transformed_inputs = head._transform_inputs(inputs) + assert transformed_inputs.shape == (1, 48, 45, 45) + + # test multi-loss, loss_decode is dict + with pytest.raises(TypeError): + # loss_decode must be a dict or sequence of dict. + BaseDecodeHead(3, 16, num_classes=19, loss_decode=['CrossEntropyLoss']) + + inputs = torch.randn(2, 19, 8, 8).float() + data_samples = [ + SegDataSample(gt_sem_seg=PixelData(data=torch.ones(64, 64).long())) + for _ in range(2) + ] + + head = BaseDecodeHead( + 3, + 16, + num_classes=19, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + loss = head.loss_by_feat( + seg_logits=inputs, batch_data_samples=data_samples) + assert 'loss_ce' in loss + + # test multi-loss, loss_decode is list of dict + inputs = torch.randn(2, 19, 8, 8).float() + data_samples = [ + SegDataSample(gt_sem_seg=PixelData(data=torch.ones(64, 64).long())) + for _ in range(2) + ] + head = BaseDecodeHead( + 3, + 16, + num_classes=19, + loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_1'), + dict(type='CrossEntropyLoss', loss_name='loss_2') + ]) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + + loss = head.loss_by_feat( + seg_logits=inputs, batch_data_samples=data_samples) + assert 'loss_1' in loss + assert 'loss_2' in loss + + # 'loss_decode' must be a dict or sequence of dict + with pytest.raises(TypeError): + BaseDecodeHead(3, 16, num_classes=19, loss_decode=['CrossEntropyLoss']) + with pytest.raises(TypeError): + BaseDecodeHead(3, 16, num_classes=19, loss_decode=0) + + # test multi-loss, loss_decode is list of dict + inputs = torch.randn(2, 19, 8, 8).float() + data_samples = [ + SegDataSample(gt_sem_seg=PixelData(data=torch.ones(64, 64).long())) + for _ in range(2) + ] + head = BaseDecodeHead( + 3, + 16, + num_classes=19, + loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_1'), + dict(type='CrossEntropyLoss', loss_name='loss_2'), + dict(type='CrossEntropyLoss', loss_name='loss_3'))) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + loss = head.loss_by_feat( + seg_logits=inputs, batch_data_samples=data_samples) + assert 'loss_1' in loss + assert 'loss_2' in loss + assert 'loss_3' in loss + + # test multi-loss, loss_decode is list of dict, names of them are identical + inputs = torch.randn(2, 19, 8, 8).float() + data_samples = [ + SegDataSample(gt_sem_seg=PixelData(data=torch.ones(64, 64).long())) + for _ in range(2) + ] + head = BaseDecodeHead( + 3, + 16, + num_classes=19, + loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_ce'), + dict(type='CrossEntropyLoss', loss_name='loss_ce'), + dict(type='CrossEntropyLoss', loss_name='loss_ce'))) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + loss_3 = head.loss_by_feat( + seg_logits=inputs, batch_data_samples=data_samples) + + head = BaseDecodeHead( + 3, + 16, + num_classes=19, + loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_ce'))) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + loss = head.loss_by_feat( + seg_logits=inputs, batch_data_samples=data_samples) + assert 'loss_ce' in loss + assert 'loss_ce' in loss_3 + assert loss_3['loss_ce'] == 3 * loss['loss_ce'] diff --git a/tests/test_models/test_heads/test_dm_head.py b/tests/test_models/test_heads/test_dm_head.py new file mode 100644 index 0000000000..a922ff7295 --- /dev/null +++ b/tests/test_models/test_heads/test_dm_head.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import DMHead +from .utils import _conv_has_norm, to_cuda + + +def test_dm_head(): + + with pytest.raises(AssertionError): + # filter_sizes must be list|tuple + DMHead(in_channels=8, channels=4, num_classes=19, filter_sizes=1) + + # test no norm_cfg + head = DMHead(in_channels=8, channels=4, num_classes=19) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = DMHead( + in_channels=8, + channels=4, + num_classes=19, + norm_cfg=dict(type='SyncBN')) + assert _conv_has_norm(head, sync_bn=True) + + # fusion=True + inputs = [torch.randn(1, 8, 23, 23)] + head = DMHead( + in_channels=8, + channels=4, + num_classes=19, + filter_sizes=(1, 3, 5), + fusion=True) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.fusion is True + assert head.dcm_modules[0].filter_size == 1 + assert head.dcm_modules[1].filter_size == 3 + assert head.dcm_modules[2].filter_size == 5 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # fusion=False + inputs = [torch.randn(1, 8, 23, 23)] + head = DMHead( + in_channels=8, + channels=4, + num_classes=19, + filter_sizes=(1, 3, 5), + fusion=False) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.fusion is False + assert head.dcm_modules[0].filter_size == 1 + assert head.dcm_modules[1].filter_size == 3 + assert head.dcm_modules[2].filter_size == 5 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_dnl_head.py b/tests/test_models/test_heads/test_dnl_head.py new file mode 100644 index 0000000000..720cb07fc6 --- /dev/null +++ b/tests/test_models/test_heads/test_dnl_head.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import DNLHead +from .utils import to_cuda + + +def test_dnl_head(): + # DNL with 'embedded_gaussian' mode + head = DNLHead(in_channels=8, channels=4, num_classes=19) + assert len(head.convs) == 2 + assert hasattr(head, 'dnl_block') + assert head.dnl_block.temperature == 0.05 + inputs = [torch.randn(1, 8, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # NonLocal2d with 'dot_product' mode + head = DNLHead( + in_channels=8, channels=4, num_classes=19, mode='dot_product') + inputs = [torch.randn(1, 8, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # NonLocal2d with 'gaussian' mode + head = DNLHead(in_channels=8, channels=4, num_classes=19, mode='gaussian') + inputs = [torch.randn(1, 8, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # NonLocal2d with 'concatenation' mode + head = DNLHead( + in_channels=8, channels=4, num_classes=19, mode='concatenation') + inputs = [torch.randn(1, 8, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_dpt_head.py b/tests/test_models/test_heads/test_dpt_head.py new file mode 100644 index 0000000000..0a6af610e1 --- /dev/null +++ b/tests/test_models/test_heads/test_dpt_head.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import DPTHead + + +def test_dpt_head(): + + with pytest.raises(AssertionError): + # input_transform must be 'multiple_select' + head = DPTHead( + in_channels=[768, 768, 768, 768], + channels=4, + num_classes=19, + in_index=[0, 1, 2, 3]) + + head = DPTHead( + in_channels=[768, 768, 768, 768], + channels=4, + num_classes=19, + in_index=[0, 1, 2, 3], + input_transform='multiple_select') + + inputs = [[torch.randn(4, 768, 2, 2), + torch.randn(4, 768)] for _ in range(4)] + output = head(inputs) + assert output.shape == torch.Size((4, 19, 16, 16)) + + # test readout operation + head = DPTHead( + in_channels=[768, 768, 768, 768], + channels=4, + num_classes=19, + in_index=[0, 1, 2, 3], + input_transform='multiple_select', + readout_type='add') + output = head(inputs) + assert output.shape == torch.Size((4, 19, 16, 16)) + + head = DPTHead( + in_channels=[768, 768, 768, 768], + channels=4, + num_classes=19, + in_index=[0, 1, 2, 3], + input_transform='multiple_select', + readout_type='project') + output = head(inputs) + assert output.shape == torch.Size((4, 19, 16, 16)) diff --git a/tests/test_models/test_heads/test_ema_head.py b/tests/test_models/test_heads/test_ema_head.py new file mode 100644 index 0000000000..1811cd2bb2 --- /dev/null +++ b/tests/test_models/test_heads/test_ema_head.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import EMAHead +from .utils import to_cuda + + +def test_emanet_head(): + head = EMAHead( + in_channels=4, + ema_channels=3, + channels=2, + num_stages=3, + num_bases=2, + num_classes=19) + for param in head.ema_mid_conv.parameters(): + assert not param.requires_grad + assert hasattr(head, 'ema_module') + inputs = [torch.randn(1, 4, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_fcn_head.py b/tests/test_models/test_heads/test_fcn_head.py new file mode 100644 index 0000000000..664b543e07 --- /dev/null +++ b/tests/test_models/test_heads/test_fcn_head.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmengine.utils.dl_utils.parrots_wrapper import SyncBatchNorm + +from mmseg.models.decode_heads import DepthwiseSeparableFCNHead, FCNHead +from .utils import to_cuda + + +def test_fcn_head(): + + with pytest.raises(AssertionError): + # num_convs must be not less than 0 + FCNHead(num_classes=19, num_convs=-1) + + # test no norm_cfg + head = FCNHead(in_channels=8, channels=4, num_classes=19) + for m in head.modules(): + if isinstance(m, ConvModule): + assert not m.with_norm + + # test with norm_cfg + head = FCNHead( + in_channels=8, + channels=4, + num_classes=19, + norm_cfg=dict(type='SyncBN')) + for m in head.modules(): + if isinstance(m, ConvModule): + assert m.with_norm and isinstance(m.bn, SyncBatchNorm) + + # test concat_input=False + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead( + in_channels=8, channels=4, num_classes=19, concat_input=False) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert len(head.convs) == 2 + assert not head.concat_input and not hasattr(head, 'conv_cat') + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # test concat_input=True + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead( + in_channels=8, channels=4, num_classes=19, concat_input=True) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert len(head.convs) == 2 + assert head.concat_input + assert head.conv_cat.in_channels == 12 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # test kernel_size=3 + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead(in_channels=8, channels=4, num_classes=19) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + for i in range(len(head.convs)): + assert head.convs[i].kernel_size == (3, 3) + assert head.convs[i].padding == 1 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # test kernel_size=1 + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead(in_channels=8, channels=4, num_classes=19, kernel_size=1) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + for i in range(len(head.convs)): + assert head.convs[i].kernel_size == (1, 1) + assert head.convs[i].padding == 0 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # test num_conv + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead(in_channels=8, channels=4, num_classes=19, num_convs=1) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert len(head.convs) == 1 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + # test num_conv = 0 + inputs = [torch.randn(1, 8, 23, 23)] + head = FCNHead( + in_channels=8, + channels=8, + num_classes=19, + num_convs=0, + concat_input=False) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert isinstance(head.convs, torch.nn.Identity) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) + + +def test_sep_fcn_head(): + # test sep_fcn_head with concat_input=False + head = DepthwiseSeparableFCNHead( + in_channels=128, + channels=128, + concat_input=False, + num_classes=19, + in_index=-1, + norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01)) + x = [torch.rand(2, 128, 8, 8)] + output = head(x) + assert output.shape == (2, head.num_classes, 8, 8) + assert not head.concat_input + assert isinstance(head.convs[0], DepthwiseSeparableConvModule) + assert isinstance(head.convs[1], DepthwiseSeparableConvModule) + assert head.conv_seg.kernel_size == (1, 1) + + head = DepthwiseSeparableFCNHead( + in_channels=64, + channels=64, + concat_input=True, + num_classes=19, + in_index=-1, + norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01)) + x = [torch.rand(3, 64, 8, 8)] + output = head(x) + assert output.shape == (3, head.num_classes, 8, 8) + assert head.concat_input + assert isinstance(head.convs[0], DepthwiseSeparableConvModule) + assert isinstance(head.convs[1], DepthwiseSeparableConvModule) diff --git a/tests/test_models/test_heads/test_gc_head.py b/tests/test_models/test_heads/test_gc_head.py new file mode 100644 index 0000000000..c62ac9ae74 --- /dev/null +++ b/tests/test_models/test_heads/test_gc_head.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import GCHead +from .utils import to_cuda + + +def test_gc_head(): + head = GCHead(in_channels=4, channels=4, num_classes=19) + assert len(head.convs) == 2 + assert hasattr(head, 'gc_block') + inputs = [torch.randn(1, 4, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_isa_head.py b/tests/test_models/test_heads/test_isa_head.py new file mode 100644 index 0000000000..b177f6d23e --- /dev/null +++ b/tests/test_models/test_heads/test_isa_head.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import ISAHead +from .utils import to_cuda + + +def test_isa_head(): + + inputs = [torch.randn(1, 8, 23, 23)] + isa_head = ISAHead( + in_channels=8, + channels=4, + num_classes=19, + isa_channels=4, + down_factor=(8, 8)) + if torch.cuda.is_available(): + isa_head, inputs = to_cuda(isa_head, inputs) + output = isa_head(inputs) + assert output.shape == (1, isa_head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_lraspp_head.py b/tests/test_models/test_heads/test_lraspp_head.py new file mode 100644 index 0000000000..a46e6a19a2 --- /dev/null +++ b/tests/test_models/test_heads/test_lraspp_head.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import LRASPPHead + + +def test_lraspp_head(): + with pytest.raises(ValueError): + # check invalid input_transform + LRASPPHead( + in_channels=(4, 4, 123), + in_index=(0, 1, 2), + channels=32, + input_transform='resize_concat', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + + with pytest.raises(AssertionError): + # check invalid branch_channels + LRASPPHead( + in_channels=(4, 4, 123), + in_index=(0, 1, 2), + channels=32, + branch_channels=64, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + + # test with default settings + lraspp_head = LRASPPHead( + in_channels=(4, 4, 123), + in_index=(0, 1, 2), + channels=32, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + inputs = [ + torch.randn(2, 4, 45, 45), + torch.randn(2, 4, 28, 28), + torch.randn(2, 123, 14, 14) + ] + with pytest.raises(RuntimeError): + # check invalid inputs + output = lraspp_head(inputs) + + inputs = [ + torch.randn(2, 4, 111, 111), + torch.randn(2, 4, 77, 77), + torch.randn(2, 123, 55, 55) + ] + output = lraspp_head(inputs) + assert output.shape == (2, 19, 111, 111) diff --git a/tests/test_models/test_heads/test_mask2former_head.py b/tests/test_models/test_heads/test_mask2former_head.py new file mode 100644 index 0000000000..079e94ed97 --- /dev/null +++ b/tests/test_models/test_heads/test_mask2former_head.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine import Config +from mmengine.structures import PixelData + +from mmseg.models.decode_heads import Mask2FormerHead +from mmseg.structures import SegDataSample +from mmseg.utils import SampleList +from .utils import to_cuda + + +def test_mask2former_head(): + num_classes = 19 + cfg = dict( + in_channels=[96, 192, 384, 768], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + num_transformer_feat_level=3, + align_corners=False, + pixel_decoder=dict( + type='mmdet.MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='mmdet.DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='mmdet.BaseTransformerLayer', + attn_cfgs=dict( + type='mmdet.MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', num_feats=128, + normalize=True), + transformer_decoder=dict( + type='mmdet.DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='mmdet.MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='mmdet.DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='mmdet.HungarianAssigner', + match_costs=[ + dict(type='mmdet.ClassificationCost', weight=2.0), + dict( + type='mmdet.CrossEntropyLossCost', + weight=5.0, + use_sigmoid=True), + dict( + type='mmdet.DiceCost', + weight=5.0, + pred_act=True, + eps=1.0) + ]), + sampler=dict(type='mmdet.MaskPseudoSampler'))) + cfg = Config(cfg) + head = Mask2FormerHead(**cfg) + + inputs = [ + torch.rand((2, 96, 8, 8)), + torch.rand((2, 192, 4, 4)), + torch.rand((2, 384, 2, 2)), + torch.rand((2, 768, 1, 1)) + ] + + data_samples: SampleList = [] + for i in range(2): + data_sample = SegDataSample() + img_meta = {} + img_meta['img_shape'] = (32, 32) + img_meta['ori_shape'] = (32, 32) + data_sample.gt_sem_seg = PixelData( + data=torch.randint(0, num_classes, (1, 32, 32))) + data_sample.set_metainfo(img_meta) + data_samples.append(data_sample) + + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + for data_sample in data_samples: + data_sample.gt_sem_seg.data = data_sample.gt_sem_seg.data.cuda() + + loss_dict = head.loss(inputs, data_samples, None) + assert isinstance(loss_dict, dict) + + batch_img_metas = [] + for data_sample in data_samples: + batch_img_metas.append(data_sample.metainfo) + + seg_logits = head.predict(inputs, batch_img_metas, None) + assert seg_logits.shape == torch.Size((2, num_classes, 32, 32)) diff --git a/tests/test_models/test_heads/test_maskformer_head.py b/tests/test_models/test_heads/test_maskformer_head.py new file mode 100644 index 0000000000..fe4bf96fea --- /dev/null +++ b/tests/test_models/test_heads/test_maskformer_head.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from os.path import dirname, join + +import torch +from mmengine import Config +from mmengine.structures import PixelData + +from mmseg.registry import MODELS +from mmseg.structures import SegDataSample +from mmseg.utils import register_all_modules + + +def test_maskformer_head(): + register_all_modules() + repo_dpath = dirname(dirname(__file__)) + cfg = Config.fromfile( + join( + repo_dpath, + '../../configs/maskformer/maskformer_r50-d32_8xb2-160k_ade20k-512x512.py' # noqa + )) + cfg.model.train_cfg = None + decode_head = MODELS.build(cfg.model.decode_head) + inputs = (torch.randn(1, 256, 32, 32), torch.randn(1, 512, 16, 16), + torch.randn(1, 1024, 8, 8), torch.randn(1, 2048, 4, 4)) + # test inference + batch_img_metas = [ + dict( + scale_factor=(1.0, 1.0), + img_shape=(512, 683), + ori_shape=(512, 683)) + ] + test_cfg = dict(mode='whole') + output = decode_head.predict(inputs, batch_img_metas, test_cfg) + assert output.shape == (1, 150, 512, 683) + + # test training + inputs = (torch.randn(2, 256, 32, 32), torch.randn(2, 512, 16, 16), + torch.randn(2, 1024, 8, 8), torch.randn(2, 2048, 4, 4)) + batch_data_samples = [] + img_meta = { + 'img_shape': (512, 512), + 'ori_shape': (480, 640), + 'pad_shape': (512, 512), + 'scale_factor': (1.425, 1.425), + } + for _ in range(2): + data_sample = SegDataSample( + gt_sem_seg=PixelData(data=torch.ones(512, 512).long())) + data_sample.set_metainfo(img_meta) + batch_data_samples.append(data_sample) + train_cfg = {} + losses = decode_head.loss(inputs, batch_data_samples, train_cfg) + assert (loss in losses.keys() + for loss in ('loss_cls', 'loss_mask', 'loss_dice')) diff --git a/tests/test_models/test_heads/test_nl_head.py b/tests/test_models/test_heads/test_nl_head.py new file mode 100644 index 0000000000..d4ef0b9db3 --- /dev/null +++ b/tests/test_models/test_heads/test_nl_head.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import NLHead +from .utils import to_cuda + + +def test_nl_head(): + head = NLHead(in_channels=8, channels=4, num_classes=19) + assert len(head.convs) == 2 + assert hasattr(head, 'nl_block') + inputs = [torch.randn(1, 8, 23, 23)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_ocr_head.py b/tests/test_models/test_heads/test_ocr_head.py new file mode 100644 index 0000000000..5e5d669b14 --- /dev/null +++ b/tests/test_models/test_heads/test_ocr_head.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import FCNHead, OCRHead +from .utils import to_cuda + + +def test_ocr_head(): + + inputs = [torch.randn(1, 8, 23, 23)] + ocr_head = OCRHead( + in_channels=8, channels=4, num_classes=19, ocr_channels=8) + fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19) + if torch.cuda.is_available(): + head, inputs = to_cuda(ocr_head, inputs) + head, inputs = to_cuda(fcn_head, inputs) + prev_output = fcn_head(inputs) + output = ocr_head(inputs, prev_output) + assert output.shape == (1, ocr_head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_psa_head.py b/tests/test_models/test_heads/test_psa_head.py new file mode 100644 index 0000000000..34f592b026 --- /dev/null +++ b/tests/test_models/test_heads/test_psa_head.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import PSAHead +from .utils import _conv_has_norm, to_cuda + + +def test_psa_head(): + + with pytest.raises(AssertionError): + # psa_type must be in 'bi-direction', 'collect', 'distribute' + PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + psa_type='gather') + + # test no norm_cfg + head = PSAHead( + in_channels=4, channels=2, num_classes=19, mask_size=(13, 13)) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + norm_cfg=dict(type='SyncBN')) + assert _conv_has_norm(head, sync_bn=True) + + # test 'bi-direction' psa_type + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, channels=2, num_classes=19, mask_size=(13, 13)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'bi-direction' psa_type, shrink_factor=1 + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + shrink_factor=1) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'bi-direction' psa_type with soft_max + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + psa_softmax=True) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'collect' psa_type + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + psa_type='collect') + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'collect' psa_type, shrink_factor=1 + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + shrink_factor=1, + psa_type='collect') + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'collect' psa_type, shrink_factor=1, compact=True + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + psa_type='collect', + shrink_factor=1, + compact=True) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) + + # test 'distribute' psa_type + inputs = [torch.randn(1, 4, 13, 13)] + head = PSAHead( + in_channels=4, + channels=2, + num_classes=19, + mask_size=(13, 13), + psa_type='distribute') + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 13, 13) diff --git a/tests/test_models/test_heads/test_psp_head.py b/tests/test_models/test_heads/test_psp_head.py new file mode 100644 index 0000000000..fde4087c8e --- /dev/null +++ b/tests/test_models/test_heads/test_psp_head.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import PSPHead +from .utils import _conv_has_norm, to_cuda + + +def test_psp_head(): + + with pytest.raises(AssertionError): + # pool_scales must be list|tuple + PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1) + + # test no norm_cfg + head = PSPHead(in_channels=4, channels=2, num_classes=19) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = PSPHead( + in_channels=4, + channels=2, + num_classes=19, + norm_cfg=dict(type='SyncBN')) + assert _conv_has_norm(head, sync_bn=True) + + inputs = [torch.randn(1, 4, 23, 23)] + head = PSPHead( + in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3)) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + assert head.psp_modules[0][0].output_size == 1 + assert head.psp_modules[1][0].output_size == 2 + assert head.psp_modules[2][0].output_size == 3 + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 23, 23) diff --git a/tests/test_models/test_heads/test_segformer_head.py b/tests/test_models/test_heads/test_segformer_head.py new file mode 100644 index 0000000000..73afaba2ca --- /dev/null +++ b/tests/test_models/test_heads/test_segformer_head.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import SegformerHead + + +def test_segformer_head(): + with pytest.raises(AssertionError): + # `in_channels` must have same length as `in_index` + SegformerHead( + in_channels=(1, 2, 3), in_index=(0, 1), channels=5, num_classes=2) + + H, W = (64, 64) + in_channels = (32, 64, 160, 256) + shapes = [(H // 2**(i + 2), W // 2**(i + 2)) + for i in range(len(in_channels))] + model = SegformerHead( + in_channels=in_channels, + in_index=[0, 1, 2, 3], + channels=256, + num_classes=19) + + with pytest.raises(IndexError): + # in_index must match the input feature maps. + inputs = [ + torch.randn((1, in_channel, *shape)) + for in_channel, shape in zip(in_channels, shapes) + ][:3] + temp = model(inputs) + + # Normal Input + # ((1, 32, 16, 16), (1, 64, 8, 8), (1, 160, 4, 4), (1, 256, 2, 2) + inputs = [ + torch.randn((1, in_channel, *shape)) + for in_channel, shape in zip(in_channels, shapes) + ] + temp = model(inputs) + + assert temp.shape == (1, 19, H // 4, W // 4) diff --git a/tests/test_models/test_heads/test_segmenter_mask_head.py b/tests/test_models/test_heads/test_segmenter_mask_head.py new file mode 100644 index 0000000000..7b681ac15c --- /dev/null +++ b/tests/test_models/test_heads/test_segmenter_mask_head.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.decode_heads import SegmenterMaskTransformerHead +from .utils import _conv_has_norm, to_cuda + + +def test_segmenter_mask_transformer_head(): + head = SegmenterMaskTransformerHead( + in_channels=2, + channels=2, + num_classes=150, + num_layers=2, + num_heads=3, + embed_dims=192, + dropout_ratio=0.0) + assert _conv_has_norm(head, sync_bn=True) + head.init_weights() + + inputs = [torch.randn(1, 2, 32, 32)] + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 32, 32) diff --git a/tests/test_models/test_heads/test_setr_mla_head.py b/tests/test_models/test_heads/test_setr_mla_head.py new file mode 100644 index 0000000000..301bc0bff4 --- /dev/null +++ b/tests/test_models/test_heads/test_setr_mla_head.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import SETRMLAHead +from .utils import to_cuda + + +def test_setr_mla_head(capsys): + + with pytest.raises(AssertionError): + # MLA requires input multiple stage feature information. + SETRMLAHead(in_channels=8, channels=4, num_classes=19, in_index=1) + + with pytest.raises(AssertionError): + # multiple in_indexs requires multiple in_channels. + SETRMLAHead( + in_channels=8, channels=4, num_classes=19, in_index=(0, 1, 2, 3)) + + with pytest.raises(AssertionError): + # channels should be len(in_channels) * mla_channels + SETRMLAHead( + in_channels=(8, 8, 8, 8), + channels=8, + mla_channels=4, + in_index=(0, 1, 2, 3), + num_classes=19) + + # test inference of MLA head + img_size = (8, 8) + patch_size = 4 + head = SETRMLAHead( + in_channels=(8, 8, 8, 8), + channels=16, + mla_channels=4, + in_index=(0, 1, 2, 3), + num_classes=19, + norm_cfg=dict(type='BN')) + + h, w = img_size[0] // patch_size, img_size[1] // patch_size + # Input square NCHW format feature information + x = [ + torch.randn(1, 8, h, w), + torch.randn(1, 8, h, w), + torch.randn(1, 8, h, w), + torch.randn(1, 8, h, w) + ] + if torch.cuda.is_available(): + head, x = to_cuda(head, x) + out = head(x) + assert out.shape == (1, head.num_classes, h * 4, w * 4) + + # Input non-square NCHW format feature information + x = [ + torch.randn(1, 8, h, w * 2), + torch.randn(1, 8, h, w * 2), + torch.randn(1, 8, h, w * 2), + torch.randn(1, 8, h, w * 2) + ] + if torch.cuda.is_available(): + head, x = to_cuda(head, x) + out = head(x) + assert out.shape == (1, head.num_classes, h * 4, w * 8) diff --git a/tests/test_models/test_heads/test_setr_up_head.py b/tests/test_models/test_heads/test_setr_up_head.py new file mode 100644 index 0000000000..a05192229c --- /dev/null +++ b/tests/test_models/test_heads/test_setr_up_head.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import SETRUPHead +from .utils import to_cuda + + +def test_setr_up_head(capsys): + + with pytest.raises(AssertionError): + # kernel_size must be [1/3] + SETRUPHead(num_classes=19, kernel_size=2) + + with pytest.raises(AssertionError): + # in_channels must be int type and in_channels must be same + # as embed_dim. + SETRUPHead(in_channels=(4, 4), channels=2, num_classes=19) + + # test init_cfg of head + head = SETRUPHead( + in_channels=4, + channels=2, + norm_cfg=dict(type='SyncBN'), + num_classes=19, + init_cfg=dict(type='Kaiming')) + super(SETRUPHead, head).init_weights() + + # test inference of Naive head + # the auxiliary head of Naive head is same as Naive head + img_size = (4, 4) + patch_size = 2 + head = SETRUPHead( + in_channels=4, + channels=2, + num_classes=19, + num_convs=1, + up_scale=4, + kernel_size=1, + norm_cfg=dict(type='BN')) + + h, w = img_size[0] // patch_size, img_size[1] // patch_size + + # Input square NCHW format feature information + x = [torch.randn(1, 4, h, w)] + if torch.cuda.is_available(): + head, x = to_cuda(head, x) + out = head(x) + assert out.shape == (1, head.num_classes, h * 4, w * 4) + + # Input non-square NCHW format feature information + x = [torch.randn(1, 4, h, w * 2)] + if torch.cuda.is_available(): + head, x = to_cuda(head, x) + out = head(x) + assert out.shape == (1, head.num_classes, h * 4, w * 8) diff --git a/tests/test_models/test_heads/test_uper_head.py b/tests/test_models/test_heads/test_uper_head.py new file mode 100644 index 0000000000..09456a80c4 --- /dev/null +++ b/tests/test_models/test_heads/test_uper_head.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.decode_heads import UPerHead +from .utils import _conv_has_norm, to_cuda + + +def test_uper_head(): + + with pytest.raises(AssertionError): + # fpn_in_channels must be list|tuple + UPerHead(in_channels=4, channels=2, num_classes=19) + + # test no norm_cfg + head = UPerHead( + in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1]) + assert not _conv_has_norm(head, sync_bn=False) + + # test with norm_cfg + head = UPerHead( + in_channels=[4, 2], + channels=2, + num_classes=19, + norm_cfg=dict(type='SyncBN'), + in_index=[-2, -1]) + assert _conv_has_norm(head, sync_bn=True) + + inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 2, 21, 21)] + head = UPerHead( + in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1]) + if torch.cuda.is_available(): + head, inputs = to_cuda(head, inputs) + outputs = head(inputs) + assert outputs.shape == (1, head.num_classes, 45, 45) diff --git a/tests/test_models/test_heads/utils.py b/tests/test_models/test_heads/utils.py new file mode 100644 index 0000000000..335e261a5e --- /dev/null +++ b/tests/test_models/test_heads/utils.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import SyncBatchNorm + + +def _conv_has_norm(module, sync_bn): + for m in module.modules(): + if isinstance(m, ConvModule): + if not m.with_norm: + return False + if sync_bn: + if not isinstance(m.bn, SyncBatchNorm): + return False + return True + + +def to_cuda(module, data): + module = module.cuda() + if isinstance(data, list): + for i in range(len(data)): + data[i] = data[i].cuda() + return module, data diff --git a/tests/test_models/test_losses.py b/tests/test_models/test_losses.py deleted file mode 100644 index edae6bfd16..0000000000 --- a/tests/test_models/test_losses.py +++ /dev/null @@ -1,134 +0,0 @@ -import numpy as np -import pytest -import torch - -from mmseg.models.losses import Accuracy, reduce_loss, weight_reduce_loss - - -def test_utils(): - loss = torch.rand(1, 3, 4, 4) - weight = torch.zeros(1, 3, 4, 4) - weight[:, :, :2, :2] = 1 - - # test reduce_loss() - reduced = reduce_loss(loss, 'none') - assert reduced is loss - - reduced = reduce_loss(loss, 'mean') - np.testing.assert_almost_equal(reduced.numpy(), loss.mean()) - - reduced = reduce_loss(loss, 'sum') - np.testing.assert_almost_equal(reduced.numpy(), loss.sum()) - - # test weight_reduce_loss() - reduced = weight_reduce_loss(loss, weight=None, reduction='none') - assert reduced is loss - - reduced = weight_reduce_loss(loss, weight=weight, reduction='mean') - target = (loss * weight).mean() - np.testing.assert_almost_equal(reduced.numpy(), target) - - reduced = weight_reduce_loss(loss, weight=weight, reduction='sum') - np.testing.assert_almost_equal(reduced.numpy(), (loss * weight).sum()) - - with pytest.raises(AssertionError): - weight_wrong = weight[0, 0, ...] - weight_reduce_loss(loss, weight=weight_wrong, reduction='mean') - - with pytest.raises(AssertionError): - weight_wrong = weight[:, 0:2, ...] - weight_reduce_loss(loss, weight=weight_wrong, reduction='mean') - - -def test_ce_loss(): - from mmseg.models import build_loss - - # use_mask and use_sigmoid cannot be true at the same time - with pytest.raises(AssertionError): - loss_cfg = dict( - type='CrossEntropyLoss', - use_mask=True, - use_sigmoid=True, - loss_weight=1.0) - build_loss(loss_cfg) - - # test loss with class weights - loss_cls_cfg = dict( - type='CrossEntropyLoss', - use_sigmoid=False, - class_weight=[0.8, 0.2], - loss_weight=1.0) - loss_cls = build_loss(loss_cls_cfg) - fake_pred = torch.Tensor([[100, -100]]) - fake_label = torch.Tensor([1]).long() - assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.)) - - loss_cls_cfg = dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0) - loss_cls = build_loss(loss_cls_cfg) - assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.)) - - loss_cls_cfg = dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0) - loss_cls = build_loss(loss_cls_cfg) - assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(0.)) - - # TODO test use_mask - - -def test_accuracy(): - # test for empty pred - pred = torch.empty(0, 4) - label = torch.empty(0) - accuracy = Accuracy(topk=1) - acc = accuracy(pred, label) - assert acc.item() == 0 - - pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6], - [0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1], - [0.0, 0.0, 0.99, 0]]) - # test for top1 - true_label = torch.Tensor([2, 3, 0, 1, 2]).long() - accuracy = Accuracy(topk=1) - acc = accuracy(pred, true_label) - assert acc.item() == 100 - - # test for top1 with score thresh=0.8 - true_label = torch.Tensor([2, 3, 0, 1, 2]).long() - accuracy = Accuracy(topk=1, thresh=0.8) - acc = accuracy(pred, true_label) - assert acc.item() == 40 - - # test for top2 - accuracy = Accuracy(topk=2) - label = torch.Tensor([3, 2, 0, 0, 2]).long() - acc = accuracy(pred, label) - assert acc.item() == 100 - - # test for both top1 and top2 - accuracy = Accuracy(topk=(1, 2)) - true_label = torch.Tensor([2, 3, 0, 1, 2]).long() - acc = accuracy(pred, true_label) - for a in acc: - assert a.item() == 100 - - # topk is larger than pred class number - with pytest.raises(AssertionError): - accuracy = Accuracy(topk=5) - accuracy(pred, true_label) - - # wrong topk type - with pytest.raises(AssertionError): - accuracy = Accuracy(topk='wrong type') - accuracy(pred, true_label) - - # label size is larger than required - with pytest.raises(AssertionError): - label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch - accuracy = Accuracy() - accuracy(pred, label) - - # wrong pred dimension - with pytest.raises(AssertionError): - accuracy = Accuracy() - accuracy(pred[:, :, None], true_label) diff --git a/tests/test_models/test_losses/test_tversky_loss.py b/tests/test_models/test_losses/test_tversky_loss.py new file mode 100644 index 0000000000..c5c581d8b4 --- /dev/null +++ b/tests/test_models/test_losses/test_tversky_loss.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + + +def test_tversky_lose(): + from mmseg.models import build_loss + + # test alpha + beta != 1 + with pytest.raises(AssertionError): + loss_cfg = dict( + type='TverskyLoss', + class_weight=[1.0, 2.0, 3.0], + loss_weight=1.0, + alpha=0.4, + beta=0.7, + loss_name='loss_tversky') + tversky_loss = build_loss(loss_cfg) + logits = torch.rand(8, 3, 4, 4) + labels = (torch.rand(8, 4, 4) * 3).long() + tversky_loss(logits, labels, ignore_index=1) + + # test tversky loss + loss_cfg = dict( + type='TverskyLoss', + class_weight=[1.0, 2.0, 3.0], + loss_weight=1.0, + ignore_index=1, + loss_name='loss_tversky') + tversky_loss = build_loss(loss_cfg) + logits = torch.rand(8, 3, 4, 4) + labels = (torch.rand(8, 4, 4) * 3).long() + tversky_loss(logits, labels) + + # test loss with class weights from file + import os + import tempfile + + import mmengine + import numpy as np + tmp_file = tempfile.NamedTemporaryFile() + + mmengine.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', + 'pkl') # from pkl file + loss_cfg = dict( + type='TverskyLoss', + class_weight=f'{tmp_file.name}.pkl', + loss_weight=1.0, + ignore_index=1, + loss_name='loss_tversky') + tversky_loss = build_loss(loss_cfg) + tversky_loss(logits, labels) + + np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0])) # from npy file + loss_cfg = dict( + type='TverskyLoss', + class_weight=f'{tmp_file.name}.pkl', + loss_weight=1.0, + ignore_index=1, + loss_name='loss_tversky') + tversky_loss = build_loss(loss_cfg) + tversky_loss(logits, labels) + tmp_file.close() + os.remove(f'{tmp_file.name}.pkl') + os.remove(f'{tmp_file.name}.npy') + + # test tversky loss has name `loss_tversky` + loss_cfg = dict( + type='TverskyLoss', + smooth=2, + loss_weight=1.0, + ignore_index=1, + alpha=0.3, + beta=0.7, + loss_name='loss_tversky') + tversky_loss = build_loss(loss_cfg) + assert tversky_loss.loss_name == 'loss_tversky' diff --git a/tests/test_models/test_necks/__init__.py b/tests/test_models/test_necks/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/test_necks/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_necks/test_feature2pyramid.py b/tests/test_models/test_necks/test_feature2pyramid.py new file mode 100644 index 0000000000..44fd02c489 --- /dev/null +++ b/tests/test_models/test_necks/test_feature2pyramid.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models import Feature2Pyramid + + +def test_feature2pyramid(): + # test + rescales = [4, 2, 1, 0.5] + embed_dim = 64 + inputs = [torch.randn(1, embed_dim, 32, 32) for i in range(len(rescales))] + + fpn = Feature2Pyramid( + embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True)) + outputs = fpn(inputs) + assert outputs[0].shape == torch.Size([1, 64, 128, 128]) + assert outputs[1].shape == torch.Size([1, 64, 64, 64]) + assert outputs[2].shape == torch.Size([1, 64, 32, 32]) + assert outputs[3].shape == torch.Size([1, 64, 16, 16]) + + # test rescales = [2, 1, 0.5, 0.25] + rescales = [2, 1, 0.5, 0.25] + inputs = [torch.randn(1, embed_dim, 32, 32) for i in range(len(rescales))] + + fpn = Feature2Pyramid( + embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True)) + outputs = fpn(inputs) + assert outputs[0].shape == torch.Size([1, 64, 64, 64]) + assert outputs[1].shape == torch.Size([1, 64, 32, 32]) + assert outputs[2].shape == torch.Size([1, 64, 16, 16]) + assert outputs[3].shape == torch.Size([1, 64, 8, 8]) + + # test rescales = [4, 2, 0.25, 0] + rescales = [4, 2, 0.25, 0] + with pytest.raises(KeyError): + fpn = Feature2Pyramid( + embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True)) diff --git a/tests/test_models/test_necks/test_fpn.py b/tests/test_models/test_necks/test_fpn.py new file mode 100644 index 0000000000..c29400602d --- /dev/null +++ b/tests/test_models/test_necks/test_fpn.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models import FPN + + +def test_fpn(): + in_channels = [64, 128, 256, 512] + inputs = [ + torch.randn(1, c, 56 // 2**i, 56 // 2**i) + for i, c in enumerate(in_channels) + ] + + fpn = FPN(in_channels, 64, len(in_channels)) + outputs = fpn(inputs) + assert outputs[0].shape == torch.Size([1, 64, 56, 56]) + assert outputs[1].shape == torch.Size([1, 64, 28, 28]) + assert outputs[2].shape == torch.Size([1, 64, 14, 14]) + assert outputs[3].shape == torch.Size([1, 64, 7, 7]) + + fpn = FPN( + in_channels, + 64, + len(in_channels), + upsample_cfg=dict(mode='nearest', scale_factor=2.0)) + outputs = fpn(inputs) + assert outputs[0].shape == torch.Size([1, 64, 56, 56]) + assert outputs[1].shape == torch.Size([1, 64, 28, 28]) + assert outputs[2].shape == torch.Size([1, 64, 14, 14]) + assert outputs[3].shape == torch.Size([1, 64, 7, 7]) diff --git a/tests/test_models/test_necks/test_ic_neck.py b/tests/test_models/test_necks/test_ic_neck.py new file mode 100644 index 0000000000..3d13008b5f --- /dev/null +++ b/tests/test_models/test_necks/test_ic_neck.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.necks import ICNeck +from mmseg.models.necks.ic_neck import CascadeFeatureFusion +from ..test_heads.utils import _conv_has_norm, to_cuda + + +def test_ic_neck(): + # test with norm_cfg + neck = ICNeck( + in_channels=(4, 16, 16), + out_channels=8, + norm_cfg=dict(type='SyncBN'), + align_corners=False) + assert _conv_has_norm(neck, sync_bn=True) + + inputs = [ + torch.randn(1, 4, 32, 64), + torch.randn(1, 16, 16, 32), + torch.randn(1, 16, 8, 16) + ] + neck = ICNeck( + in_channels=(4, 16, 16), + out_channels=4, + norm_cfg=dict(type='BN', requires_grad=True), + align_corners=False) + if torch.cuda.is_available(): + neck, inputs = to_cuda(neck, inputs) + + outputs = neck(inputs) + assert outputs[0].shape == (1, 4, 16, 32) + assert outputs[1].shape == (1, 4, 32, 64) + assert outputs[1].shape == (1, 4, 32, 64) + + +def test_ic_neck_cascade_feature_fusion(): + cff = CascadeFeatureFusion(64, 64, 32) + assert cff.conv_low.in_channels == 64 + assert cff.conv_low.out_channels == 32 + assert cff.conv_high.in_channels == 64 + assert cff.conv_high.out_channels == 32 + + +def test_ic_neck_input_channels(): + with pytest.raises(AssertionError): + # ICNet Neck input channel constraints. + ICNeck( + in_channels=(16, 64, 64, 64), + out_channels=32, + norm_cfg=dict(type='BN', requires_grad=True), + align_corners=False) diff --git a/tests/test_models/test_necks/test_jpu.py b/tests/test_models/test_necks/test_jpu.py new file mode 100644 index 0000000000..4c3fa9f6bb --- /dev/null +++ b/tests/test_models/test_necks/test_jpu.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.necks import JPU + + +def test_fastfcn_neck(): + # Test FastFCN Standard Forward + model = JPU( + in_channels=(64, 128, 256), + mid_channels=64, + start_level=0, + end_level=-1, + dilations=(1, 2, 4, 8), + ) + model.init_weights() + model.train() + batch_size = 1 + input = [ + torch.randn(batch_size, 64, 64, 128), + torch.randn(batch_size, 128, 32, 64), + torch.randn(batch_size, 256, 16, 32) + ] + feat = model(input) + + assert len(feat) == 3 + assert feat[0].shape == torch.Size([batch_size, 64, 64, 128]) + assert feat[1].shape == torch.Size([batch_size, 128, 32, 64]) + assert feat[2].shape == torch.Size([batch_size, 256, 64, 128]) + + with pytest.raises(AssertionError): + # FastFCN input and in_channels constraints. + JPU(in_channels=(256, 64, 128), start_level=0, end_level=5) + + # Test not default start_level + model = JPU(in_channels=(64, 128, 256), start_level=1, end_level=-1) + input = [ + torch.randn(batch_size, 64, 64, 128), + torch.randn(batch_size, 128, 32, 64), + torch.randn(batch_size, 256, 16, 32) + ] + feat = model(input) + assert len(feat) == 2 + assert feat[0].shape == torch.Size([batch_size, 128, 32, 64]) + assert feat[1].shape == torch.Size([batch_size, 2048, 32, 64]) diff --git a/tests/test_models/test_necks/test_mla_neck.py b/tests/test_models/test_necks/test_mla_neck.py new file mode 100644 index 0000000000..e385418949 --- /dev/null +++ b/tests/test_models/test_necks/test_mla_neck.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models import MLANeck + + +def test_mla(): + in_channels = [4, 4, 4, 4] + mla = MLANeck(in_channels, 32) + + inputs = [torch.randn(1, c, 12, 12) for i, c in enumerate(in_channels)] + outputs = mla(inputs) + assert outputs[0].shape == torch.Size([1, 32, 12, 12]) + assert outputs[1].shape == torch.Size([1, 32, 12, 12]) + assert outputs[2].shape == torch.Size([1, 32, 12, 12]) + assert outputs[3].shape == torch.Size([1, 32, 12, 12]) diff --git a/tests/test_models/test_necks/test_multilevel_neck.py b/tests/test_models/test_necks/test_multilevel_neck.py new file mode 100644 index 0000000000..9c71d51563 --- /dev/null +++ b/tests/test_models/test_necks/test_multilevel_neck.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models import MultiLevelNeck + + +def test_multilevel_neck(): + + # Test init_weights + MultiLevelNeck([266], 32).init_weights() + + # Test multi feature maps + in_channels = [32, 64, 128, 256] + inputs = [torch.randn(1, c, 14, 14) for i, c in enumerate(in_channels)] + + neck = MultiLevelNeck(in_channels, 32) + outputs = neck(inputs) + assert outputs[0].shape == torch.Size([1, 32, 7, 7]) + assert outputs[1].shape == torch.Size([1, 32, 14, 14]) + assert outputs[2].shape == torch.Size([1, 32, 28, 28]) + assert outputs[3].shape == torch.Size([1, 32, 56, 56]) + + # Test one feature map + in_channels = [768] + inputs = [torch.randn(1, 768, 14, 14)] + + neck = MultiLevelNeck(in_channels, 32) + outputs = neck(inputs) + assert outputs[0].shape == torch.Size([1, 32, 7, 7]) + assert outputs[1].shape == torch.Size([1, 32, 14, 14]) + assert outputs[2].shape == torch.Size([1, 32, 28, 28]) + assert outputs[3].shape == torch.Size([1, 32, 56, 56]) diff --git a/tests/test_models/test_segmentor.py b/tests/test_models/test_segmentor.py deleted file mode 100644 index 67f7884bc8..0000000000 --- a/tests/test_models/test_segmentor.py +++ /dev/null @@ -1,212 +0,0 @@ -import mmcv -import numpy as np -import torch -from torch import nn - -from mmseg.models import BACKBONES, HEADS, build_segmentor -from mmseg.models.decode_heads.cascade_decode_head import BaseCascadeDecodeHead -from mmseg.models.decode_heads.decode_head import BaseDecodeHead - - -def _demo_mm_inputs(input_shape=(1, 3, 8, 16), num_classes=10): - """Create a superset of inputs needed to run test or train batches. - - Args: - input_shape (tuple): - input batch dimensions - - num_classes (int): - number of semantic classes - """ - (N, C, H, W) = input_shape - - rng = np.random.RandomState(0) - - imgs = rng.rand(*input_shape) - segs = rng.randint( - low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8) - - img_metas = [{ - 'img_shape': (H, W, C), - 'ori_shape': (H, W, C), - 'pad_shape': (H, W, C), - 'filename': '.png', - 'scale_factor': 1.0, - 'flip': False, - 'flip_direction': 'horizontal' - } for _ in range(N)] - - mm_inputs = { - 'imgs': torch.FloatTensor(imgs), - 'img_metas': img_metas, - 'gt_semantic_seg': torch.LongTensor(segs) - } - return mm_inputs - - -@BACKBONES.register_module() -class ExampleBackbone(nn.Module): - - def __init__(self): - super(ExampleBackbone, self).__init__() - self.conv = nn.Conv2d(3, 3, 3) - - def init_weights(self, pretrained=None): - pass - - def forward(self, x): - return [self.conv(x)] - - -@HEADS.register_module() -class ExampleDecodeHead(BaseDecodeHead): - - def __init__(self): - super(ExampleDecodeHead, self).__init__(3, 3, num_classes=19) - - def forward(self, inputs): - return self.cls_seg(inputs[0]) - - -@HEADS.register_module() -class ExampleCascadeDecodeHead(BaseCascadeDecodeHead): - - def __init__(self): - super(ExampleCascadeDecodeHead, self).__init__(3, 3, num_classes=19) - - def forward(self, inputs, prev_out): - return self.cls_seg(inputs[0]) - - -def _segmentor_forward_train_test(segmentor): - if isinstance(segmentor.decode_head, nn.ModuleList): - num_classes = segmentor.decode_head[-1].num_classes - else: - num_classes = segmentor.decode_head.num_classes - # batch_size=2 for BatchNorm - mm_inputs = _demo_mm_inputs(num_classes=num_classes) - - imgs = mm_inputs.pop('imgs') - img_metas = mm_inputs.pop('img_metas') - gt_semantic_seg = mm_inputs['gt_semantic_seg'] - - # convert to cuda Tensor if applicable - if torch.cuda.is_available(): - segmentor = segmentor.cuda() - imgs = imgs.cuda() - gt_semantic_seg = gt_semantic_seg.cuda() - - # Test forward train - losses = segmentor.forward( - imgs, img_metas, gt_semantic_seg=gt_semantic_seg, return_loss=True) - assert isinstance(losses, dict) - - # Test forward simple test - with torch.no_grad(): - segmentor.eval() - # pack into lists - img_list = [img[None, :] for img in imgs] - img_meta_list = [[img_meta] for img_meta in img_metas] - segmentor.forward(img_list, img_meta_list, return_loss=False) - - # Test forward aug test - with torch.no_grad(): - segmentor.eval() - # pack into lists - img_list = [img[None, :] for img in imgs] - img_list = img_list + img_list - img_meta_list = [[img_meta] for img_meta in img_metas] - img_meta_list = img_meta_list + img_meta_list - segmentor.forward(img_list, img_meta_list, return_loss=False) - - -def test_encoder_decoder(): - - # test 1 decode head, w.o. aux head - cfg = dict( - type='EncoderDecoder', - backbone=dict(type='ExampleBackbone'), - decode_head=dict(type='ExampleDecodeHead')) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test slide mode - test_cfg = mmcv.Config(dict(mode='slide', crop_size=(3, 3), stride=(2, 2))) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test 1 decode head, 1 aux head - cfg = dict( - type='EncoderDecoder', - backbone=dict(type='ExampleBackbone'), - decode_head=dict(type='ExampleDecodeHead'), - auxiliary_head=dict(type='ExampleDecodeHead')) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test 1 decode head, 2 aux head - cfg = dict( - type='EncoderDecoder', - backbone=dict(type='ExampleBackbone'), - decode_head=dict(type='ExampleDecodeHead'), - auxiliary_head=[ - dict(type='ExampleDecodeHead'), - dict(type='ExampleDecodeHead') - ]) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - -def test_cascade_encoder_decoder(): - - # test 1 decode head, w.o. aux head - cfg = dict( - type='CascadeEncoderDecoder', - num_stages=2, - backbone=dict(type='ExampleBackbone'), - decode_head=[ - dict(type='ExampleDecodeHead'), - dict(type='ExampleCascadeDecodeHead') - ]) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test slide mode - test_cfg = mmcv.Config(dict(mode='slide', crop_size=(3, 3), stride=(2, 2))) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test 1 decode head, 1 aux head - cfg = dict( - type='CascadeEncoderDecoder', - num_stages=2, - backbone=dict(type='ExampleBackbone'), - decode_head=[ - dict(type='ExampleDecodeHead'), - dict(type='ExampleCascadeDecodeHead') - ], - auxiliary_head=dict(type='ExampleDecodeHead')) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) - - # test 1 decode head, 2 aux head - cfg = dict( - type='CascadeEncoderDecoder', - num_stages=2, - backbone=dict(type='ExampleBackbone'), - decode_head=[ - dict(type='ExampleDecodeHead'), - dict(type='ExampleCascadeDecodeHead') - ], - auxiliary_head=[ - dict(type='ExampleDecodeHead'), - dict(type='ExampleDecodeHead') - ]) - test_cfg = mmcv.Config(dict(mode='whole')) - segmentor = build_segmentor(cfg, train_cfg=None, test_cfg=test_cfg) - _segmentor_forward_train_test(segmentor) diff --git a/tests/test_models/test_segmentors/__init__.py b/tests/test_models/test_segmentors/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/test_segmentors/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_segmentors/test_cascade_encoder_decoder.py b/tests/test_models/test_segmentors/test_cascade_encoder_decoder.py new file mode 100644 index 0000000000..941816d253 --- /dev/null +++ b/tests/test_models/test_segmentors/test_cascade_encoder_decoder.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine import ConfigDict + +from mmseg.models import build_segmentor +from .utils import _segmentor_forward_train_test + + +def test_cascade_encoder_decoder(): + + # test 1 decode head, w.o. aux head + cfg = ConfigDict( + type='CascadeEncoderDecoder', + num_stages=2, + backbone=dict(type='ExampleBackbone'), + decode_head=[ + dict(type='ExampleDecodeHead'), + dict(type='ExampleCascadeDecodeHead') + ]) + cfg.test_cfg = ConfigDict(mode='whole') + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test slide mode + cfg.test_cfg = ConfigDict(mode='slide', crop_size=(3, 3), stride=(2, 2)) + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test 1 decode head, 1 aux head + cfg = ConfigDict( + type='CascadeEncoderDecoder', + num_stages=2, + backbone=dict(type='ExampleBackbone'), + decode_head=[ + dict(type='ExampleDecodeHead'), + dict(type='ExampleCascadeDecodeHead') + ], + auxiliary_head=dict(type='ExampleDecodeHead')) + cfg.test_cfg = ConfigDict(mode='whole') + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test 1 decode head, 2 aux head + cfg = ConfigDict( + type='CascadeEncoderDecoder', + num_stages=2, + backbone=dict(type='ExampleBackbone'), + decode_head=[ + dict(type='ExampleDecodeHead'), + dict(type='ExampleCascadeDecodeHead') + ], + auxiliary_head=[ + dict(type='ExampleDecodeHead'), + dict(type='ExampleDecodeHead') + ]) + cfg.test_cfg = ConfigDict(mode='whole') + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) diff --git a/tests/test_models/test_segmentors/test_encoder_decoder.py b/tests/test_models/test_segmentors/test_encoder_decoder.py new file mode 100644 index 0000000000..5795f513d3 --- /dev/null +++ b/tests/test_models/test_segmentors/test_encoder_decoder.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine import ConfigDict +from mmengine.structures import PixelData + +from mmseg.models import build_segmentor +from mmseg.structures import SegDataSample +from .utils import _segmentor_forward_train_test + + +def test_encoder_decoder(): + + # test 1 decode head, w.o. aux head + + cfg = ConfigDict( + type='EncoderDecoder', + backbone=dict(type='ExampleBackbone'), + decode_head=dict(type='ExampleDecodeHead'), + train_cfg=None, + test_cfg=dict(mode='whole')) + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test out_channels == 1 + cfg = ConfigDict( + type='EncoderDecoder', + backbone=dict(type='ExampleBackbone'), + decode_head=dict( + type='ExampleDecodeHead', num_classes=2, out_channels=1), + train_cfg=None, + test_cfg=dict(mode='whole')) + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test slide mode + cfg.test_cfg = ConfigDict(mode='slide', crop_size=(3, 3), stride=(2, 2)) + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test 1 decode head, 1 aux head + cfg = ConfigDict( + type='EncoderDecoder', + backbone=dict(type='ExampleBackbone'), + decode_head=dict(type='ExampleDecodeHead'), + auxiliary_head=dict(type='ExampleDecodeHead')) + cfg.test_cfg = ConfigDict(mode='whole') + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + # test 1 decode head, 2 aux head + cfg = ConfigDict( + type='EncoderDecoder', + backbone=dict(type='ExampleBackbone'), + decode_head=dict(type='ExampleDecodeHead'), + auxiliary_head=[ + dict(type='ExampleDecodeHead'), + dict(type='ExampleDecodeHead') + ]) + cfg.test_cfg = ConfigDict(mode='whole') + segmentor = build_segmentor(cfg) + _segmentor_forward_train_test(segmentor) + + +def test_postprocess_result(): + cfg = ConfigDict( + type='EncoderDecoder', + backbone=dict(type='ExampleBackbone'), + decode_head=dict(type='ExampleDecodeHead'), + train_cfg=None, + test_cfg=dict(mode='whole')) + model = build_segmentor(cfg) + + # test postprocess + data_sample = SegDataSample() + data_sample.gt_sem_seg = PixelData( + **{'data': torch.randint(0, 10, (1, 8, 8))}) + data_sample.set_metainfo({ + 'padding_size': (0, 2, 0, 2), + 'ori_shape': (8, 8) + }) + seg_logits = torch.zeros((1, 2, 10, 10)) + seg_logits[:, :, :8, :8] = 1 + data_samples = [data_sample] + + outputs = model.postprocess_result(seg_logits, data_samples) + assert outputs[0].seg_logits.data.shape == torch.Size((2, 8, 8)) + assert torch.allclose(outputs[0].seg_logits.data, torch.ones((2, 8, 8))) + + data_sample = SegDataSample() + data_sample.gt_sem_seg = PixelData( + **{'data': torch.randint(0, 10, (1, 8, 8))}) + data_sample.set_metainfo({ + 'img_padding_size': (0, 2, 0, 2), + 'ori_shape': (8, 8) + }) + + data_samples = [data_sample] + outputs = model.postprocess_result(seg_logits, data_samples) + assert outputs[0].seg_logits.data.shape == torch.Size((2, 8, 8)) + assert torch.allclose(outputs[0].seg_logits.data, torch.ones((2, 8, 8))) diff --git a/tests/test_models/test_segmentors/utils.py b/tests/test_models/test_segmentors/utils.py new file mode 100644 index 0000000000..9b155c0961 --- /dev/null +++ b/tests/test_models/test_segmentors/utils.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.optim import OptimWrapper +from mmengine.structures import PixelData +from torch import nn +from torch.optim import SGD + +from mmseg.models import SegDataPreProcessor +from mmseg.models.decode_heads.cascade_decode_head import BaseCascadeDecodeHead +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.registry import MODELS +from mmseg.structures import SegDataSample + + +def _demo_mm_inputs(input_shape=(1, 3, 8, 16), num_classes=10): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + + imgs = torch.randn(*input_shape) + segs = torch.randint( + low=0, high=num_classes - 1, size=(N, H, W), dtype=torch.long) + + img_metas = [{ + 'img_shape': (H, W), + 'ori_shape': (H, W), + 'pad_shape': (H, W, C), + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False, + 'flip_direction': 'horizontal' + } for _ in range(N)] + + data_samples = [ + SegDataSample( + gt_sem_seg=PixelData(data=segs[i]), metainfo=img_metas[i]) + for i in range(N) + ] + + mm_inputs = {'imgs': torch.FloatTensor(imgs), 'data_samples': data_samples} + + return mm_inputs + + +@MODELS.register_module() +class ExampleBackbone(nn.Module): + + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 3, 3) + + def init_weights(self, pretrained=None): + pass + + def forward(self, x): + return [self.conv(x)] + + +@MODELS.register_module() +class ExampleDecodeHead(BaseDecodeHead): + + def __init__(self, num_classes=19, out_channels=None): + super().__init__( + 3, 3, num_classes=num_classes, out_channels=out_channels) + + def forward(self, inputs): + return self.cls_seg(inputs[0]) + + +@MODELS.register_module() +class ExampleCascadeDecodeHead(BaseCascadeDecodeHead): + + def __init__(self): + super().__init__(3, 3, num_classes=19) + + def forward(self, inputs, prev_out): + return self.cls_seg(inputs[0]) + + +def _segmentor_forward_train_test(segmentor): + if isinstance(segmentor.decode_head, nn.ModuleList): + num_classes = segmentor.decode_head[-1].num_classes + else: + num_classes = segmentor.decode_head.num_classes + # batch_size=2 for BatchNorm + mm_inputs = _demo_mm_inputs(num_classes=num_classes) + + # convert to cuda Tensor if applicable + if torch.cuda.is_available(): + segmentor = segmentor.cuda() + + # check data preprocessor + if not hasattr(segmentor, + 'data_preprocessor') or segmentor.data_preprocessor is None: + segmentor.data_preprocessor = SegDataPreProcessor() + + mm_inputs = segmentor.data_preprocessor(mm_inputs, True) + imgs = mm_inputs.pop('imgs') + data_samples = mm_inputs.pop('data_samples') + + # create optimizer wrapper + optimizer = SGD(segmentor.parameters(), lr=0.1) + optim_wrapper = OptimWrapper(optimizer) + + # Test forward train + losses = segmentor.forward(imgs, data_samples, mode='loss') + assert isinstance(losses, dict) + + # Test train_step + data_batch = dict(inputs=imgs, data_samples=data_samples) + outputs = segmentor.train_step(data_batch, optim_wrapper) + assert isinstance(outputs, dict) + assert 'loss' in outputs + + # Test val_step + with torch.no_grad(): + segmentor.eval() + data_batch = dict(inputs=imgs, data_samples=data_samples) + outputs = segmentor.val_step(data_batch) + assert isinstance(outputs, list) + + # Test forward simple test + with torch.no_grad(): + segmentor.eval() + data_batch = dict(inputs=imgs, data_samples=data_samples) + results = segmentor.forward(imgs, data_samples, mode='tensor') + assert isinstance(results, torch.Tensor) diff --git a/tests/test_models/test_utils/__init__.py b/tests/test_models/test_utils/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/test_models/test_utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_utils/test_embed.py b/tests/test_models/test_utils/test_embed.py new file mode 100644 index 0000000000..be20c97b0d --- /dev/null +++ b/tests/test_models/test_utils/test_embed.py @@ -0,0 +1,461 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmseg.models.utils.embed import AdaptivePadding, PatchEmbed, PatchMerging + + +def test_adaptive_padding(): + + for padding in ('same', 'corner'): + kernel_size = 16 + stride = 16 + dilation = 1 + input = torch.rand(1, 1, 15, 17) + adap_pool = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + out = adap_pool(input) + # padding to divisible by 16 + assert (out.shape[2], out.shape[3]) == (16, 32) + input = torch.rand(1, 1, 16, 17) + out = adap_pool(input) + # padding to divisible by 16 + assert (out.shape[2], out.shape[3]) == (16, 32) + + kernel_size = (2, 2) + stride = (2, 2) + dilation = (1, 1) + + adap_pad = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + input = torch.rand(1, 1, 11, 13) + out = adap_pad(input) + # padding to divisible by 2 + assert (out.shape[2], out.shape[3]) == (12, 14) + + kernel_size = (2, 2) + stride = (10, 10) + dilation = (1, 1) + + adap_pad = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + input = torch.rand(1, 1, 10, 13) + out = adap_pad(input) + # no padding + assert (out.shape[2], out.shape[3]) == (10, 13) + + kernel_size = (11, 11) + adap_pad = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + input = torch.rand(1, 1, 11, 13) + out = adap_pad(input) + # all padding + assert (out.shape[2], out.shape[3]) == (21, 21) + + # test padding as kernel is (7,9) + input = torch.rand(1, 1, 11, 13) + stride = (3, 4) + kernel_size = (4, 5) + dilation = (2, 2) + # actually (7, 9) + adap_pad = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + dilation_out = adap_pad(input) + assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21) + kernel_size = (7, 9) + dilation = (1, 1) + adap_pad = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + kernel79_out = adap_pad(input) + assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21) + assert kernel79_out.shape == dilation_out.shape + + # assert only support "same" "corner" + with pytest.raises(AssertionError): + AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=1) + + +def test_patch_embed(): + B = 2 + H = 3 + W = 4 + C = 3 + embed_dims = 10 + kernel_size = 3 + stride = 1 + dummy_input = torch.rand(B, C, H, W) + patch_merge_1 = PatchEmbed( + in_channels=C, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=0, + dilation=1, + norm_cfg=None) + + x1, shape = patch_merge_1(dummy_input) + # test out shape + assert x1.shape == (2, 2, 10) + # test outsize is correct + assert shape == (1, 2) + # test L = out_h * out_w + assert shape[0] * shape[1] == x1.shape[1] + + B = 2 + H = 10 + W = 10 + C = 3 + embed_dims = 10 + kernel_size = 5 + stride = 2 + dummy_input = torch.rand(B, C, H, W) + # test dilation + patch_merge_2 = PatchEmbed( + in_channels=C, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=0, + dilation=2, + norm_cfg=None, + ) + + x2, shape = patch_merge_2(dummy_input) + # test out shape + assert x2.shape == (2, 1, 10) + # test outsize is correct + assert shape == (1, 1) + # test L = out_h * out_w + assert shape[0] * shape[1] == x2.shape[1] + + stride = 2 + input_size = (10, 10) + + dummy_input = torch.rand(B, C, H, W) + # test stride and norm + patch_merge_3 = PatchEmbed( + in_channels=C, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=0, + dilation=2, + norm_cfg=dict(type='LN'), + input_size=input_size) + + x3, shape = patch_merge_3(dummy_input) + # test out shape + assert x3.shape == (2, 1, 10) + # test outsize is correct + assert shape == (1, 1) + # test L = out_h * out_w + assert shape[0] * shape[1] == x3.shape[1] + + # test the init_out_size with nn.Unfold + assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 - + 1) // 2 + 1 + assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 - + 1) // 2 + 1 + H = 11 + W = 12 + input_size = (H, W) + dummy_input = torch.rand(B, C, H, W) + # test stride and norm + patch_merge_3 = PatchEmbed( + in_channels=C, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=0, + dilation=2, + norm_cfg=dict(type='LN'), + input_size=input_size) + + _, shape = patch_merge_3(dummy_input) + # when input_size equal to real input + # the out_size should be equal to `init_out_size` + assert shape == patch_merge_3.init_out_size + + input_size = (H, W) + dummy_input = torch.rand(B, C, H, W) + # test stride and norm + patch_merge_3 = PatchEmbed( + in_channels=C, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=0, + dilation=2, + norm_cfg=dict(type='LN'), + input_size=input_size) + + _, shape = patch_merge_3(dummy_input) + # when input_size equal to real input + # the out_size should be equal to `init_out_size` + assert shape == patch_merge_3.init_out_size + + # test adap padding + for padding in ('same', 'corner'): + in_c = 2 + embed_dims = 3 + B = 2 + + # test stride is 1 + input_size = (5, 5) + kernel_size = (5, 5) + stride = (1, 1) + dilation = 1 + bias = False + + x = torch.rand(B, in_c, *input_size) + patch_embed = PatchEmbed( + in_channels=in_c, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_embed(x) + assert x_out.size() == (B, 25, 3) + assert out_size == (5, 5) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test kernel_size == stride + input_size = (5, 5) + kernel_size = (5, 5) + stride = (5, 5) + dilation = 1 + bias = False + + x = torch.rand(B, in_c, *input_size) + patch_embed = PatchEmbed( + in_channels=in_c, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_embed(x) + assert x_out.size() == (B, 1, 3) + assert out_size == (1, 1) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test kernel_size == stride + input_size = (6, 5) + kernel_size = (5, 5) + stride = (5, 5) + dilation = 1 + bias = False + + x = torch.rand(B, in_c, *input_size) + patch_embed = PatchEmbed( + in_channels=in_c, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_embed(x) + assert x_out.size() == (B, 2, 3) + assert out_size == (2, 1) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test different kernel_size with different stride + input_size = (6, 5) + kernel_size = (6, 2) + stride = (6, 2) + dilation = 1 + bias = False + + x = torch.rand(B, in_c, *input_size) + patch_embed = PatchEmbed( + in_channels=in_c, + embed_dims=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_embed(x) + assert x_out.size() == (B, 3, 3) + assert out_size == (1, 3) + assert x_out.size(1) == out_size[0] * out_size[1] + + +def test_patch_merging(): + + # Test the model with int padding + in_c = 3 + out_c = 4 + kernel_size = 3 + stride = 3 + padding = 1 + dilation = 1 + bias = False + # test the case `pad_to_stride` is False + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + B, L, C = 1, 100, 3 + input_size = (10, 10) + x = torch.rand(B, L, C) + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (1, 16, 4) + assert out_size == (4, 4) + # assert out size is consistent with real output + assert x_out.size(1) == out_size[0] * out_size[1] + in_c = 4 + out_c = 5 + kernel_size = 6 + stride = 3 + padding = 2 + dilation = 2 + bias = False + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + B, L, C = 1, 100, 4 + input_size = (10, 10) + x = torch.rand(B, L, C) + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (1, 4, 5) + assert out_size == (2, 2) + # assert out size is consistent with real output + assert x_out.size(1) == out_size[0] * out_size[1] + + # Test with adaptive padding + for padding in ('same', 'corner'): + in_c = 2 + out_c = 3 + B = 2 + + # test stride is 1 + input_size = (5, 5) + kernel_size = (5, 5) + stride = (1, 1) + dilation = 1 + bias = False + L = input_size[0] * input_size[1] + + x = torch.rand(B, L, in_c) + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (B, 25, 3) + assert out_size == (5, 5) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test kernel_size == stride + input_size = (5, 5) + kernel_size = (5, 5) + stride = (5, 5) + dilation = 1 + bias = False + L = input_size[0] * input_size[1] + + x = torch.rand(B, L, in_c) + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (B, 1, 3) + assert out_size == (1, 1) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test kernel_size == stride + input_size = (6, 5) + kernel_size = (5, 5) + stride = (5, 5) + dilation = 1 + bias = False + L = input_size[0] * input_size[1] + + x = torch.rand(B, L, in_c) + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (B, 2, 3) + assert out_size == (2, 1) + assert x_out.size(1) == out_size[0] * out_size[1] + + # test different kernel_size with different stride + input_size = (6, 5) + kernel_size = (6, 2) + stride = (6, 2) + dilation = 1 + bias = False + L = input_size[0] * input_size[1] + + x = torch.rand(B, L, in_c) + patch_merge = PatchMerging( + in_channels=in_c, + out_channels=out_c, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + x_out, out_size = patch_merge(x, input_size) + assert x_out.size() == (B, 3, 3) + assert out_size == (1, 3) + assert x_out.size(1) == out_size[0] * out_size[1] diff --git a/tests/test_models/test_utils/test_shape_convert.py b/tests/test_models/test_utils/test_shape_convert.py new file mode 100644 index 0000000000..60e87f38ed --- /dev/null +++ b/tests/test_models/test_utils/test_shape_convert.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmseg.models.utils import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc, + nlc_to_nchw) + + +def test_nchw2nlc2nchw(): + # Test nchw2nlc2nchw function + shape_nchw = (4, 2, 5, 5) + shape_nlc = (4, 25, 2) + + def test_func(x): + assert x.shape == torch.Size(shape_nlc) + return x + + x = torch.rand(*shape_nchw) + output = nchw2nlc2nchw(test_func, x) + assert output.shape == torch.Size(shape_nchw) + + def test_func2(x, arg): + assert x.shape == torch.Size(shape_nlc) + assert arg == 100 + return x + + x = torch.rand(*shape_nchw) + output = nchw2nlc2nchw(test_func2, x, arg=100) + assert output.shape == torch.Size(shape_nchw) + + def test_func3(x): + assert x.is_contiguous() + assert x.shape == torch.Size(shape_nlc) + return x + + x = torch.rand(*shape_nchw) + output = nchw2nlc2nchw(test_func3, x, contiguous=True) + assert output.shape == torch.Size(shape_nchw) + assert output.is_contiguous() + + +def test_nlc2nchw2nlc(): + # Test nlc2nchw2nlc function + shape_nchw = (4, 2, 5, 5) + shape_nlc = (4, 25, 2) + + def test_func(x): + assert x.shape == torch.Size(shape_nchw) + return x + + x = torch.rand(*shape_nlc) + output = nlc2nchw2nlc(test_func, x, shape_nchw[2:]) + assert output.shape == torch.Size(shape_nlc) + + def test_func2(x, arg): + assert x.shape == torch.Size(shape_nchw) + assert arg == 100 + return x + + x = torch.rand(*shape_nlc) + output = nlc2nchw2nlc(test_func2, x, shape_nchw[2:], arg=100) + assert output.shape == torch.Size(shape_nlc) + + def test_func3(x): + assert x.is_contiguous() + assert x.shape == torch.Size(shape_nchw) + return x + + x = torch.rand(*shape_nlc) + output = nlc2nchw2nlc(test_func3, x, shape_nchw[2:], contiguous=True) + assert output.shape == torch.Size(shape_nlc) + assert output.is_contiguous() + + +def test_nchw_to_nlc(): + # Test nchw_to_nlc function + shape_nchw = (4, 2, 5, 5) + shape_nlc = (4, 25, 2) + x = torch.rand(*shape_nchw) + y = nchw_to_nlc(x) + assert y.shape == torch.Size(shape_nlc) + + +def test_nlc_to_nchw(): + # Test nlc_to_nchw function + shape_nchw = (4, 2, 5, 5) + shape_nlc = (4, 25, 2) + x = torch.rand(*shape_nlc) + y = nlc_to_nchw(x, (5, 5)) + assert y.shape == torch.Size(shape_nchw) diff --git a/tests/test_ops/test_sep_conv_module.py b/tests/test_ops/test_sep_conv_module.py deleted file mode 100644 index 4eb650111c..0000000000 --- a/tests/test_ops/test_sep_conv_module.py +++ /dev/null @@ -1,71 +0,0 @@ -import pytest -import torch -import torch.nn as nn - -from mmseg.ops import DepthwiseSeparableConvModule - - -def test_depthwise_separable_conv(): - with pytest.raises(AssertionError): - # conv_cfg must be a dict or None - DepthwiseSeparableConvModule(4, 8, 2, groups=2) - - # test default config - conv = DepthwiseSeparableConvModule(3, 8, 2) - assert conv.depthwise_conv.conv.groups == 3 - assert conv.pointwise_conv.conv.kernel_size == (1, 1) - assert not conv.depthwise_conv.with_norm - assert not conv.pointwise_conv.with_norm - assert conv.depthwise_conv.activate.__class__.__name__ == 'ReLU' - assert conv.pointwise_conv.activate.__class__.__name__ == 'ReLU' - x = torch.rand(1, 3, 256, 256) - output = conv(x) - assert output.shape == (1, 8, 255, 255) - - # test - conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN')) - assert conv.depthwise_conv.norm_name == 'bn' - assert not conv.pointwise_conv.with_norm - x = torch.rand(1, 3, 256, 256) - output = conv(x) - assert output.shape == (1, 8, 255, 255) - - conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN')) - assert not conv.depthwise_conv.with_norm - assert conv.pointwise_conv.norm_name == 'bn' - x = torch.rand(1, 3, 256, 256) - output = conv(x) - assert output.shape == (1, 8, 255, 255) - - # add test for ['norm', 'conv', 'act'] - conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act')) - x = torch.rand(1, 3, 256, 256) - output = conv(x) - assert output.shape == (1, 8, 255, 255) - - conv = DepthwiseSeparableConvModule( - 3, 8, 3, padding=1, with_spectral_norm=True) - assert hasattr(conv.depthwise_conv.conv, 'weight_orig') - assert hasattr(conv.pointwise_conv.conv, 'weight_orig') - output = conv(x) - assert output.shape == (1, 8, 256, 256) - - conv = DepthwiseSeparableConvModule( - 3, 8, 3, padding=1, padding_mode='reflect') - assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d) - output = conv(x) - assert output.shape == (1, 8, 256, 256) - - conv = DepthwiseSeparableConvModule( - 3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU')) - assert conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU' - assert conv.pointwise_conv.activate.__class__.__name__ == 'ReLU' - output = conv(x) - assert output.shape == (1, 8, 256, 256) - - conv = DepthwiseSeparableConvModule( - 3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU')) - assert conv.depthwise_conv.activate.__class__.__name__ == 'ReLU' - assert conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU' - output = conv(x) - assert output.shape == (1, 8, 256, 256) diff --git a/tests/test_sampler.py b/tests/test_sampler.py index af26b8dd62..322be9579b 100644 --- a/tests/test_sampler.py +++ b/tests/test_sampler.py @@ -1,21 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch -from mmseg.core import OHEMPixelSampler +from mmseg.models.decode_heads import FCNHead +from mmseg.structures import OHEMPixelSampler + + +def _context_for_ohem(): + return FCNHead(in_channels=32, channels=16, num_classes=19) + + +def _context_for_ohem_multiple_loss(): + return FCNHead( + in_channels=32, + channels=16, + num_classes=19, + loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_1'), + dict(type='CrossEntropyLoss', loss_name='loss_2') + ]) def test_ohem_sampler(): with pytest.raises(AssertionError): # seg_logit and seg_label must be of the same size - sampler = OHEMPixelSampler() + sampler = OHEMPixelSampler(context=_context_for_ohem()) + seg_logit = torch.randn(1, 19, 45, 45) + seg_label = torch.randint(0, 19, size=(1, 1, 89, 89)) + sampler.sample(seg_logit, seg_label) + + # test with thresh + sampler = OHEMPixelSampler( + context=_context_for_ohem(), thresh=0.7, min_kept=200) + seg_logit = torch.randn(1, 19, 45, 45) + seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) + seg_weight = sampler.sample(seg_logit, seg_label) + assert seg_weight.shape[0] == seg_logit.shape[0] + assert seg_weight.shape[1:] == seg_logit.shape[2:] + assert seg_weight.sum() > 200 + + # test w.o thresh + sampler = OHEMPixelSampler(context=_context_for_ohem(), min_kept=200) + seg_logit = torch.randn(1, 19, 45, 45) + seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) + seg_weight = sampler.sample(seg_logit, seg_label) + assert seg_weight.shape[0] == seg_logit.shape[0] + assert seg_weight.shape[1:] == seg_logit.shape[2:] + assert seg_weight.sum() == 200 + + # test multiple losses case + with pytest.raises(AssertionError): + # seg_logit and seg_label must be of the same size + sampler = OHEMPixelSampler(context=_context_for_ohem_multiple_loss()) seg_logit = torch.randn(1, 19, 45, 45) seg_label = torch.randint(0, 19, size=(1, 1, 89, 89)) sampler.sample(seg_logit, seg_label) - sampler = OHEMPixelSampler() + # test with thresh in multiple losses case + sampler = OHEMPixelSampler( + context=_context_for_ohem_multiple_loss(), thresh=0.7, min_kept=200) + seg_logit = torch.randn(1, 19, 45, 45) + seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) + seg_weight = sampler.sample(seg_logit, seg_label) + assert seg_weight.shape[0] == seg_logit.shape[0] + assert seg_weight.shape[1:] == seg_logit.shape[2:] + assert seg_weight.sum() > 200 + + # test w.o thresh in multiple losses case + sampler = OHEMPixelSampler( + context=_context_for_ohem_multiple_loss(), min_kept=200) seg_logit = torch.randn(1, 19, 45, 45) seg_label = torch.randint(0, 19, size=(1, 1, 45, 45)) seg_weight = sampler.sample(seg_logit, seg_label) assert seg_weight.shape[0] == seg_logit.shape[0] assert seg_weight.shape[1:] == seg_logit.shape[2:] + assert seg_weight.sum() == 200 diff --git a/tests/test_structures/test_seg_data_sample.py b/tests/test_structures/test_seg_data_sample.py new file mode 100644 index 0000000000..37796b611d --- /dev/null +++ b/tests/test_structures/test_seg_data_sample.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import pytest +import torch +from mmengine.structures import PixelData + +from mmseg.structures import SegDataSample + + +def _equal(a, b): + if isinstance(a, (torch.Tensor, np.ndarray)): + return (a == b).all() + else: + return a == b + + +class TestSegDataSample(TestCase): + + def test_init(self): + meta_info = dict( + img_size=[256, 256], + scale_factor=np.array([1.5, 1.5]), + img_shape=torch.rand(4)) + + seg_data_sample = SegDataSample(metainfo=meta_info) + assert 'img_size' in seg_data_sample + assert seg_data_sample.img_size == [256, 256] + assert seg_data_sample.get('img_size') == [256, 256] + + def test_setter(self): + seg_data_sample = SegDataSample() + + # test gt_sem_seg + gt_sem_seg_data = dict(sem_seg=torch.rand(5, 4, 2)) + gt_sem_seg = PixelData(**gt_sem_seg_data) + seg_data_sample.gt_sem_seg = gt_sem_seg + assert 'gt_sem_seg' in seg_data_sample + assert _equal(seg_data_sample.gt_sem_seg.sem_seg, + gt_sem_seg_data['sem_seg']) + + # test pred_sem_seg + pred_sem_seg_data = dict(sem_seg=torch.rand(5, 4, 2)) + pred_sem_seg = PixelData(**pred_sem_seg_data) + seg_data_sample.pred_sem_seg = pred_sem_seg + assert 'pred_sem_seg' in seg_data_sample + assert _equal(seg_data_sample.pred_sem_seg.sem_seg, + pred_sem_seg_data['sem_seg']) + + # test seg_logits + seg_logits_data = dict(sem_seg=torch.rand(5, 4, 2)) + seg_logits = PixelData(**seg_logits_data) + seg_data_sample.seg_logits = seg_logits + assert 'seg_logits' in seg_data_sample + assert _equal(seg_data_sample.seg_logits.sem_seg, + seg_logits_data['sem_seg']) + + # test type error + with pytest.raises(AssertionError): + seg_data_sample.gt_sem_seg = torch.rand(2, 4) + + with pytest.raises(AssertionError): + seg_data_sample.pred_sem_seg = torch.rand(2, 4) + + with pytest.raises(AssertionError): + seg_data_sample.seg_logits = torch.rand(2, 4) + + def test_deleter(self): + seg_data_sample = SegDataSample() + + pred_sem_seg_data = dict(sem_seg=torch.rand(5, 4, 2)) + pred_sem_seg = PixelData(**pred_sem_seg_data) + seg_data_sample.pred_sem_seg = pred_sem_seg + assert 'pred_sem_seg' in seg_data_sample + del seg_data_sample.pred_sem_seg + assert 'pred_sem_seg' not in seg_data_sample diff --git a/tests/test_utils/test_io.py b/tests/test_utils/test_io.py new file mode 100644 index 0000000000..05abd275f8 --- /dev/null +++ b/tests/test_utils/test_io.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np +import pytest +from mmengine import FileClient + +from mmseg.utils import datafrombytes + + +@pytest.mark.parametrize( + ['backend', 'suffix'], + [['nifti', '.nii.gz'], ['numpy', '.npy'], ['pickle', '.pkl']]) +def test_datafrombytes(backend, suffix): + + file_client = FileClient('disk') + file_path = osp.join(osp.dirname(__file__), '../data/biomedical' + suffix) + bytes = file_client.get(file_path) + data = datafrombytes(bytes, backend) + + if backend == 'pickle': + # test pickle loading + assert isinstance(data, dict) + else: + assert isinstance(data, np.ndarray) + if backend == 'nifti': + # test nifti file loading + assert len(data.shape) == 3 + else: + # test npy file loading + # testing data biomedical.npy includes data and label + assert len(data.shape) == 4 + assert data.shape[0] == 2 diff --git a/tests/test_utils/test_set_env.py b/tests/test_utils/test_set_env.py new file mode 100644 index 0000000000..86a2d29aec --- /dev/null +++ b/tests/test_utils/test_set_env.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import sys +from unittest import TestCase + +from mmengine import DefaultScope + +from mmseg.utils import register_all_modules + + +class TestSetupEnv(TestCase): + + def test_register_all_modules(self): + from mmseg.registry import DATASETS + + # not init default scope + sys.modules.pop('mmseg.datasets', None) + sys.modules.pop('mmseg.datasets.ade', None) + DATASETS._module_dict.pop('ADE20KDataset', None) + self.assertFalse('ADE20KDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=False) + self.assertTrue('ADE20KDataset' in DATASETS.module_dict) + + # init default scope + sys.modules.pop('mmseg.datasets') + sys.modules.pop('mmseg.datasets.ade') + DATASETS._module_dict.pop('ADE20KDataset', None) + self.assertFalse('ADE20KDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=True) + self.assertTrue('ADE20KDataset' in DATASETS.module_dict) + self.assertEqual(DefaultScope.get_current_instance().scope_name, + 'mmseg') + + # init default scope when another scope is init + name = f'test-{datetime.datetime.now()}' + DefaultScope.get_instance(name, scope_name='test') + with self.assertWarnsRegex( + Warning, 'The current default scope "test" is not "mmseg"'): + register_all_modules(init_default_scope=True) diff --git a/tests/test_visualization/test_local_visualizer.py b/tests/test_visualization/test_local_visualizer.py new file mode 100644 index 0000000000..b60a9b8750 --- /dev/null +++ b/tests/test_visualization/test_local_visualizer.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import tempfile +from unittest import TestCase + +import cv2 +import mmcv +import numpy as np +import torch +from mmengine.structures import PixelData + +from mmseg.structures import SegDataSample +from mmseg.visualization import SegLocalVisualizer + + +class TestSegLocalVisualizer(TestCase): + + def test_add_datasample(self): + h = 10 + w = 12 + num_class = 2 + out_file = 'out_file' + + image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8') + + # test gt_sem_seg + gt_sem_seg_data = dict(data=torch.randint(0, num_class, (1, h, w))) + gt_sem_seg = PixelData(**gt_sem_seg_data) + + def test_add_datasample_forward(gt_sem_seg): + data_sample = SegDataSample() + data_sample.gt_sem_seg = gt_sem_seg + + with tempfile.TemporaryDirectory() as tmp_dir: + seg_local_visualizer = SegLocalVisualizer( + vis_backends=[dict(type='LocalVisBackend')], + save_dir=tmp_dir) + seg_local_visualizer.dataset_meta = dict( + classes=('background', 'foreground'), + palette=[[120, 120, 120], [6, 230, 230]]) + + # test out_file + seg_local_visualizer.add_datasample(out_file, image, + data_sample) + + assert os.path.exists( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png')) + drawn_img = cv2.imread( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png')) + assert drawn_img.shape == (h, w, 3) + + # test gt_instances and pred_instances + pred_sem_seg_data = dict( + data=torch.randint(0, num_class, (1, h, w))) + pred_sem_seg = PixelData(**pred_sem_seg_data) + + data_sample.pred_sem_seg = pred_sem_seg + + seg_local_visualizer.add_datasample(out_file, image, + data_sample) + self._assert_image_and_shape( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png'), (h, w * 2, 3)) + + seg_local_visualizer.add_datasample( + out_file, image, data_sample, draw_gt=False) + self._assert_image_and_shape( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png'), (h, w, 3)) + + if torch.cuda.is_available(): + test_add_datasample_forward(gt_sem_seg.cuda()) + test_add_datasample_forward(gt_sem_seg) + + def test_cityscapes_add_datasample(self): + h = 128 + w = 256 + num_class = 19 + out_file = 'out_file_cityscapes' + + image = mmcv.imread( + osp.join( + osp.dirname(__file__), + '../data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png' # noqa + ), + 'color') + sem_seg = mmcv.imread( + osp.join( + osp.dirname(__file__), + '../data/pseudo_cityscapes_dataset/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png' # noqa + ), + 'unchanged') + sem_seg = torch.unsqueeze(torch.from_numpy(sem_seg), 0) + gt_sem_seg_data = dict(data=sem_seg) + gt_sem_seg = PixelData(**gt_sem_seg_data) + + def test_cityscapes_add_datasample_forward(gt_sem_seg): + data_sample = SegDataSample() + data_sample.gt_sem_seg = gt_sem_seg + + with tempfile.TemporaryDirectory() as tmp_dir: + seg_local_visualizer = SegLocalVisualizer( + vis_backends=[dict(type='LocalVisBackend')], + save_dir=tmp_dir) + seg_local_visualizer.dataset_meta = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', + 'pole', 'traffic light', 'traffic sign', + 'vegetation', 'terrain', 'sky', 'person', 'rider', + 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], + [102, 102, 156], [190, 153, 153], [153, 153, 153], + [250, 170, 30], [220, 220, 0], [107, 142, 35], + [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], + [119, 11, 32]]) + # test out_file + seg_local_visualizer.add_datasample( + out_file, + image, + data_sample, + out_file=osp.join(tmp_dir, 'test.png')) + self._assert_image_and_shape( + osp.join(tmp_dir, 'test.png'), (h, w, 3)) + + # test gt_instances and pred_instances + pred_sem_seg_data = dict( + data=torch.randint(0, num_class, (1, h, w))) + pred_sem_seg = PixelData(**pred_sem_seg_data) + + data_sample.pred_sem_seg = pred_sem_seg + + # test draw prediction with gt + seg_local_visualizer.add_datasample(out_file, image, + data_sample) + self._assert_image_and_shape( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png'), (h, w * 2, 3)) + # test draw prediction without gt + seg_local_visualizer.add_datasample( + out_file, image, data_sample, draw_gt=False) + self._assert_image_and_shape( + osp.join(tmp_dir, 'vis_data', 'vis_image', + out_file + '_0.png'), (h, w, 3)) + + if torch.cuda.is_available(): + test_cityscapes_add_datasample_forward(gt_sem_seg.cuda()) + test_cityscapes_add_datasample_forward(gt_sem_seg) + + def _assert_image_and_shape(self, out_file, out_shape): + assert os.path.exists(out_file) + drawn_img = cv2.imread(out_file) + assert drawn_img.shape == out_shape diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py new file mode 100644 index 0000000000..7464d23162 --- /dev/null +++ b/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/open- +mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py.""" +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import seaborn as sns + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + plot_epochs = [] + plot_iters = [] + plot_values = [] + # In some log files exist lines of validation, + # `mode` list is used to only collect iter number + # of training line. + for epoch in epochs: + epoch_logs = log_dict[epoch] + if metric not in epoch_logs.keys(): + continue + if metric in ['mIoU', 'mAcc', 'aAcc']: + plot_epochs.append(epoch) + plot_values.append(epoch_logs[metric][0]) + else: + for idx in range(len(epoch_logs[metric])): + plot_iters.append(epoch_logs['step'][idx]) + plot_values.append(epoch_logs[metric][idx]) + ax = plt.gca() + label = legend[i * num_metrics + j] + if metric in ['mIoU', 'mAcc', 'aAcc']: + ax.set_xticks(plot_epochs) + plt.xlabel('step') + plt.plot(plot_epochs, plot_values, label=label, marker='o') + else: + plt.xlabel('iter') + plt.plot(plot_iters, plot_values, label=label, linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + parser.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser.add_argument( + '--keys', + type=str, + nargs='+', + default=['mIoU'], + help='the metric that you want to plot') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser.add_argument('--out', type=str, default=None) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is step, value is a sub dict + # keys of sub dict is different metrics + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + prev_step = 0 + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log) as log_file: + for line in log_file: + log = json.loads(line.strip()) + # the final step in json file is 0. + if 'step' in log and log['step'] != 0: + step = log['step'] + prev_step = step + else: + step = prev_step + if step not in log_dict: + log_dict[step] = defaultdict(list) + for k, v in log.items(): + log_dict[step][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + log_dicts = load_json_logs(json_logs) + plot_curve(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/benchmark.py b/tools/analysis_tools/benchmark.py new file mode 100644 index 0000000000..bcb3948a6e --- /dev/null +++ b/tools/analysis_tools/benchmark.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import time + +import numpy as np +import torch +from mmengine import Config +from mmengine.fileio import dump +from mmengine.model.utils import revert_sync_batchnorm +from mmengine.runner import Runner, load_checkpoint +from mmengine.utils import mkdir_or_exist + +from mmseg.registry import MODELS +from mmseg.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMSeg benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--log-interval', type=int, default=50, help='interval of logging') + parser.add_argument( + '--work-dir', + help=('if specified, the results will be dumped ' + 'into the directory as json')) + parser.add_argument('--repeat-times', type=int, default=1) + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + register_all_modules() + cfg = Config.fromfile(args.config) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + if args.work_dir is not None: + mkdir_or_exist(osp.abspath(args.work_dir)) + json_file = osp.join(args.work_dir, f'fps_{timestamp}.json') + else: + # use config filename as default work_dir if cfg.work_dir is None + work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + mkdir_or_exist(osp.abspath(work_dir)) + json_file = osp.join(work_dir, f'fps_{timestamp}.json') + + repeat_times = args.repeat_times + # set cudnn_benchmark + torch.backends.cudnn.benchmark = False + cfg.model.pretrained = None + + benchmark_dict = dict(config=args.config, unit='img / s') + overall_fps_list = [] + cfg.test_dataloader.batch_size = 1 + for time_index in range(repeat_times): + print(f'Run {time_index + 1}:') + # build the dataloader + data_loader = Runner.build_dataloader(cfg.test_dataloader) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = MODELS.build(cfg.model) + + if 'checkpoint' in args and osp.exists(args.checkpoint): + load_checkpoint(model, args.checkpoint, map_location='cpu') + + if torch.cuda.is_available(): + model = model.cuda() + + model = revert_sync_batchnorm(model) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + total_iters = 200 + + # benchmark with 200 batches and take the average + for i, data in enumerate(data_loader): + data = model.data_preprocessor(data, True) + inputs = data['inputs'] + data_samples = data['data_samples'] + if torch.cuda.is_available(): + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model(inputs, data_samples, mode='predict') + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {total_iters}], ' + f'fps: {fps:.2f} img / s') + + if (i + 1) == total_iters: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.2f} img / s\n') + benchmark_dict[f'overall_fps_{time_index + 1}'] = round(fps, 2) + overall_fps_list.append(fps) + break + benchmark_dict['average_fps'] = round(np.mean(overall_fps_list), 2) + benchmark_dict['fps_variance'] = round(np.var(overall_fps_list), 4) + print(f'Average fps of {repeat_times} evaluations: ' + f'{benchmark_dict["average_fps"]}') + print(f'The variance of {repeat_times} evaluations: ' + f'{benchmark_dict["fps_variance"]}') + dump(benchmark_dict, json_file, indent=4) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/confusion_matrix.py b/tools/analysis_tools/confusion_matrix.py new file mode 100644 index 0000000000..9a87bc14c9 --- /dev/null +++ b/tools/analysis_tools/confusion_matrix.py @@ -0,0 +1,184 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.ticker import MultipleLocator +from mmengine import Config, DictAction +from mmengine.utils import ProgressBar, load + +from mmseg.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate confusion matrix from segmentation results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'prediction_path', help='prediction path where test .pkl result') + parser.add_argument( + 'save_dir', help='directory where confusion matrix will be saved') + parser.add_argument( + '--show', action='store_true', help='show confusion matrix') + parser.add_argument( + '--color-theme', + default='winter', + help='theme of the matrix color map') + parser.add_argument( + '--title', + default='Normalized Confusion Matrix', + help='title of the matrix color map') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def calculate_confusion_matrix(dataset, results): + """Calculate the confusion matrix. + + Args: + dataset (Dataset): Test or val dataset. + results (list[ndarray]): A list of segmentation results in each image. + """ + n = len(dataset.CLASSES) + confusion_matrix = np.zeros(shape=[n, n]) + assert len(dataset) == len(results) + prog_bar = ProgressBar(len(results)) + for idx, per_img_res in enumerate(results): + res_segm = per_img_res + gt_segm = dataset.get_gt_seg_map_by_idx(idx) + inds = n * gt_segm + res_segm + inds = inds.flatten() + mat = np.bincount(inds, minlength=n**2).reshape(n, n) + confusion_matrix += mat + prog_bar.update() + return confusion_matrix + + +def plot_confusion_matrix(confusion_matrix, + labels, + save_dir=None, + show=True, + title='Normalized Confusion Matrix', + color_theme='winter'): + """Draw confusion matrix with matplotlib. + + Args: + confusion_matrix (ndarray): The confusion matrix. + labels (list[str]): List of class names. + save_dir (str|optional): If set, save the confusion matrix plot to the + given path. Default: None. + show (bool): Whether to show the plot. Default: True. + title (str): Title of the plot. Default: `Normalized Confusion Matrix`. + color_theme (str): Theme of the matrix color map. Default: `winter`. + """ + # normalize the confusion matrix + per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] + confusion_matrix = \ + confusion_matrix.astype(np.float32) / per_label_sums * 100 + + num_classes = len(labels) + fig, ax = plt.subplots( + figsize=(2 * num_classes, 2 * num_classes * 0.8), dpi=180) + cmap = plt.get_cmap(color_theme) + im = ax.imshow(confusion_matrix, cmap=cmap) + plt.colorbar(mappable=im, ax=ax) + + title_font = {'weight': 'bold', 'size': 12} + ax.set_title(title, fontdict=title_font) + label_font = {'size': 10} + plt.ylabel('Ground Truth Label', fontdict=label_font) + plt.xlabel('Prediction Label', fontdict=label_font) + + # draw locator + xmajor_locator = MultipleLocator(1) + xminor_locator = MultipleLocator(0.5) + ax.xaxis.set_major_locator(xmajor_locator) + ax.xaxis.set_minor_locator(xminor_locator) + ymajor_locator = MultipleLocator(1) + yminor_locator = MultipleLocator(0.5) + ax.yaxis.set_major_locator(ymajor_locator) + ax.yaxis.set_minor_locator(yminor_locator) + + # draw grid + ax.grid(True, which='minor', linestyle='-') + + # draw label + ax.set_xticks(np.arange(num_classes)) + ax.set_yticks(np.arange(num_classes)) + ax.set_xticklabels(labels) + ax.set_yticklabels(labels) + + ax.tick_params( + axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) + plt.setp( + ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') + + # draw confusion matrix value + for i in range(num_classes): + for j in range(num_classes): + ax.text( + j, + i, + '{}%'.format( + round(confusion_matrix[i, j], 2 + ) if not np.isnan(confusion_matrix[i, j]) else -1), + ha='center', + va='center', + color='w', + size=7) + + ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 + + fig.tight_layout() + if save_dir is not None: + plt.savefig( + os.path.join(save_dir, 'confusion_matrix.png'), format='png') + if show: + plt.show() + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + results = load(args.prediction_path) + + assert isinstance(results, list) + if isinstance(results[0], np.ndarray): + pass + else: + raise TypeError('invalid type of prediction results') + + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + + dataset = build_dataset(cfg.data.test) + confusion_matrix = calculate_confusion_matrix(dataset, results) + plot_confusion_matrix( + confusion_matrix, + dataset.CLASSES, + save_dir=args.save_dir, + show=args.show, + title=args.title, + color_theme=args.color_theme) + + +if __name__ == '__main__': + main() diff --git a/tools/get_flops.py b/tools/analysis_tools/get_flops.py similarity index 82% rename from tools/get_flops.py rename to tools/analysis_tools/get_flops.py index 86f1c5a9ef..1e8f188e18 100644 --- a/tools/get_flops.py +++ b/tools/analysis_tools/get_flops.py @@ -1,13 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse -from mmcv import Config from mmcv.cnn import get_model_complexity_info +from mmengine import Config from mmseg.models import build_segmentor def parse_args(): - parser = argparse.ArgumentParser(description='Train a segmentor') + parser = argparse.ArgumentParser( + description='Get the FLOPs of a segmentor') parser.add_argument('config', help='train config file path') parser.add_argument( '--shape', @@ -31,8 +33,11 @@ def main(): raise ValueError('invalid input shape') cfg = Config.fromfile(args.config) + cfg.model.pretrained = None model = build_segmentor( - cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda() + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')).cuda() model.eval() if hasattr(model, 'forward_dummy'): diff --git a/tools/benchmark.py b/tools/benchmark.py deleted file mode 100644 index bcb0d9580f..0000000000 --- a/tools/benchmark.py +++ /dev/null @@ -1,81 +0,0 @@ -import argparse -import time - -import torch -from mmcv import Config -from mmcv.parallel import MMDataParallel -from mmcv.runner import load_checkpoint - -from mmseg.datasets import build_dataloader, build_dataset -from mmseg.models import build_segmentor - - -def parse_args(): - parser = argparse.ArgumentParser(description='MMSeg benchmark a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument( - '--log-interval', type=int, default=50, help='interval of logging') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - # set cudnn_benchmark - torch.backends.cudnn.benchmark = False - cfg.model.pretrained = None - cfg.data.test.test_mode = True - - # build the dataloader - # TODO: support multiple images per gpu (only minor changes are needed) - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=1, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=False, - shuffle=False) - - # build the model and load checkpoint - model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) - load_checkpoint(model, args.checkpoint, map_location='cpu') - - model = MMDataParallel(model, device_ids=[0]) - - model.eval() - - # the first several iterations may be very slow so skip them - num_warmup = 5 - pure_inf_time = 0 - total_iters = 200 - - # benchmark with 200 image and take the average - for i, data in enumerate(data_loader): - - torch.cuda.synchronize() - start_time = time.perf_counter() - - with torch.no_grad(): - model(return_loss=False, rescale=True, **data) - - torch.cuda.synchronize() - elapsed = time.perf_counter() - start_time - - if i >= num_warmup: - pure_inf_time += elapsed - if (i + 1) % args.log_interval == 0: - fps = (i + 1 - num_warmup) / pure_inf_time - print(f'Done image [{i + 1:<3}/ {total_iters}], ' - f'fps: {fps:.2f} img / s') - - if (i + 1) == total_iters: - fps = (i + 1 - num_warmup) / pure_inf_time - print(f'Overall fps: {fps:.2f} img / s') - break - - -if __name__ == '__main__': - main() diff --git a/tools/dataset_converters/chase_db1.py b/tools/dataset_converters/chase_db1.py new file mode 100644 index 0000000000..f4fefbd774 --- /dev/null +++ b/tools/dataset_converters/chase_db1.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +from mmengine.utils import mkdir_or_exist + +CHASE_DB1_LEN = 28 * 3 +TRAINING_LEN = 60 + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert CHASE_DB1 dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='path of CHASEDB1.zip') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'CHASE_DB1') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(out_dir) + mkdir_or_exist(osp.join(out_dir, 'images')) + mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mkdir_or_exist(osp.join(out_dir, 'annotations')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + print('Extracting CHASEDB1.zip...') + zip_file = zipfile.ZipFile(dataset_path) + zip_file.extractall(tmp_dir) + + print('Generating training dataset...') + + assert len(os.listdir(tmp_dir)) == CHASE_DB1_LEN, \ + f'len(os.listdir(tmp_dir)) != {CHASE_DB1_LEN}' + + for img_name in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, img_name)) + if osp.splitext(img_name)[1] == '.jpg': + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(img_name)[0] + '.png')) + else: + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(img_name)[0] + '.png')) + + for img_name in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, img_name)) + if osp.splitext(img_name)[1] == '.jpg': + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(img_name)[0] + '.png')) + else: + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/convert_datasets/cityscapes.py b/tools/dataset_converters/cityscapes.py similarity index 77% rename from tools/convert_datasets/cityscapes.py rename to tools/dataset_converters/cityscapes.py index 99d05b41f5..0d6a80135d 100644 --- a/tools/convert_datasets/cityscapes.py +++ b/tools/dataset_converters/cityscapes.py @@ -1,8 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp -import mmcv from cityscapesscripts.preparation.json2labelImg import json2labelImg +from mmengine.utils import (mkdir_or_exist, scandir, track_parallel_progress, + track_progress) def convert_json_to_label(json_file): @@ -26,25 +28,24 @@ def main(): args = parse_args() cityscapes_path = args.cityscapes_path out_dir = args.out_dir if args.out_dir else cityscapes_path - mmcv.mkdir_or_exist(out_dir) + mkdir_or_exist(out_dir) gt_dir = osp.join(cityscapes_path, args.gt_dir) poly_files = [] - for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True): + for poly in scandir(gt_dir, '_polygons.json', recursive=True): poly_file = osp.join(gt_dir, poly) poly_files.append(poly_file) if args.nproc > 1: - mmcv.track_parallel_progress(convert_json_to_label, poly_files, - args.nproc) + track_parallel_progress(convert_json_to_label, poly_files, args.nproc) else: - mmcv.track_progress(convert_json_to_label, poly_files) + track_progress(convert_json_to_label, poly_files) split_names = ['train', 'val', 'test'] for split in split_names: filenames = [] - for poly in mmcv.scandir( + for poly in scandir( osp.join(gt_dir, split), '_polygons.json', recursive=True): filenames.append(poly.replace('_gtFine_polygons.json', '')) with open(osp.join(out_dir, f'{split}.txt'), 'w') as f: diff --git a/tools/dataset_converters/coco_stuff10k.py b/tools/dataset_converters/coco_stuff10k.py new file mode 100644 index 0000000000..920127ee10 --- /dev/null +++ b/tools/dataset_converters/coco_stuff10k.py @@ -0,0 +1,308 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import shutil +from functools import partial + +import numpy as np +from mmengine.utils import (mkdir_or_exist, track_parallel_progress, + track_progress) +from PIL import Image +from scipy.io import loadmat + +COCO_LEN = 10000 + +clsID_to_trID = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 11: 11, + 13: 12, + 14: 13, + 15: 14, + 16: 15, + 17: 16, + 18: 17, + 19: 18, + 20: 19, + 21: 20, + 22: 21, + 23: 22, + 24: 23, + 25: 24, + 27: 25, + 28: 26, + 31: 27, + 32: 28, + 33: 29, + 34: 30, + 35: 31, + 36: 32, + 37: 33, + 38: 34, + 39: 35, + 40: 36, + 41: 37, + 42: 38, + 43: 39, + 44: 40, + 46: 41, + 47: 42, + 48: 43, + 49: 44, + 50: 45, + 51: 46, + 52: 47, + 53: 48, + 54: 49, + 55: 50, + 56: 51, + 57: 52, + 58: 53, + 59: 54, + 60: 55, + 61: 56, + 62: 57, + 63: 58, + 64: 59, + 65: 60, + 67: 61, + 70: 62, + 72: 63, + 73: 64, + 74: 65, + 75: 66, + 76: 67, + 77: 68, + 78: 69, + 79: 70, + 80: 71, + 81: 72, + 82: 73, + 84: 74, + 85: 75, + 86: 76, + 87: 77, + 88: 78, + 89: 79, + 90: 80, + 92: 81, + 93: 82, + 94: 83, + 95: 84, + 96: 85, + 97: 86, + 98: 87, + 99: 88, + 100: 89, + 101: 90, + 102: 91, + 103: 92, + 104: 93, + 105: 94, + 106: 95, + 107: 96, + 108: 97, + 109: 98, + 110: 99, + 111: 100, + 112: 101, + 113: 102, + 114: 103, + 115: 104, + 116: 105, + 117: 106, + 118: 107, + 119: 108, + 120: 109, + 121: 110, + 122: 111, + 123: 112, + 124: 113, + 125: 114, + 126: 115, + 127: 116, + 128: 117, + 129: 118, + 130: 119, + 131: 120, + 132: 121, + 133: 122, + 134: 123, + 135: 124, + 136: 125, + 137: 126, + 138: 127, + 139: 128, + 140: 129, + 141: 130, + 142: 131, + 143: 132, + 144: 133, + 145: 134, + 146: 135, + 147: 136, + 148: 137, + 149: 138, + 150: 139, + 151: 140, + 152: 141, + 153: 142, + 154: 143, + 155: 144, + 156: 145, + 157: 146, + 158: 147, + 159: 148, + 160: 149, + 161: 150, + 162: 151, + 163: 152, + 164: 153, + 165: 154, + 166: 155, + 167: 156, + 168: 157, + 169: 158, + 170: 159, + 171: 160, + 172: 161, + 173: 162, + 174: 163, + 175: 164, + 176: 165, + 177: 166, + 178: 167, + 179: 168, + 180: 169, + 181: 170, + 182: 171 +} + + +def convert_to_trainID(tuple_path, in_img_dir, in_ann_dir, out_img_dir, + out_mask_dir, is_train): + imgpath, maskpath = tuple_path + shutil.copyfile( + osp.join(in_img_dir, imgpath), + osp.join(out_img_dir, 'train2014', imgpath) if is_train else osp.join( + out_img_dir, 'test2014', imgpath)) + annotate = loadmat(osp.join(in_ann_dir, maskpath)) + mask = annotate['S'].astype(np.uint8) + mask_copy = mask.copy() + for clsID, trID in clsID_to_trID.items(): + mask_copy[mask == clsID] = trID + seg_filename = osp.join(out_mask_dir, 'train2014', + maskpath.split('.')[0] + + '_labelTrainIds.png') if is_train else osp.join( + out_mask_dir, 'test2014', + maskpath.split('.')[0] + '_labelTrainIds.png') + Image.fromarray(mask_copy).save(seg_filename, 'PNG') + + +def generate_coco_list(folder): + train_list = osp.join(folder, 'imageLists', 'train.txt') + test_list = osp.join(folder, 'imageLists', 'test.txt') + train_paths = [] + test_paths = [] + + with open(train_list) as f: + for filename in f: + basename = filename.strip() + imgpath = basename + '.jpg' + maskpath = basename + '.mat' + train_paths.append((imgpath, maskpath)) + + with open(test_list) as f: + for filename in f: + basename = filename.strip() + imgpath = basename + '.jpg' + maskpath = basename + '.mat' + test_paths.append((imgpath, maskpath)) + + return train_paths, test_paths + + +def parse_args(): + parser = argparse.ArgumentParser( + description=\ + 'Convert COCO Stuff 10k annotations to mmsegmentation format') # noqa + parser.add_argument('coco_path', help='coco stuff path') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=16, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + coco_path = args.coco_path + nproc = args.nproc + + out_dir = args.out_dir or coco_path + out_img_dir = osp.join(out_dir, 'images') + out_mask_dir = osp.join(out_dir, 'annotations') + + mkdir_or_exist(osp.join(out_img_dir, 'train2014')) + mkdir_or_exist(osp.join(out_img_dir, 'test2014')) + mkdir_or_exist(osp.join(out_mask_dir, 'train2014')) + mkdir_or_exist(osp.join(out_mask_dir, 'test2014')) + + train_list, test_list = generate_coco_list(coco_path) + assert (len(train_list) + + len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format( + len(train_list), len(test_list)) + + if args.nproc > 1: + track_parallel_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=True), + train_list, + nproc=nproc) + track_parallel_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=False), + test_list, + nproc=nproc) + else: + track_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=True), train_list) + track_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=False), test_list) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/coco_stuff164k.py b/tools/dataset_converters/coco_stuff164k.py new file mode 100644 index 0000000000..a13114ab1e --- /dev/null +++ b/tools/dataset_converters/coco_stuff164k.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import shutil +from functools import partial +from glob import glob + +import numpy as np +from mmengine.utils import (mkdir_or_exist, track_parallel_progress, + track_progress) +from PIL import Image + +COCO_LEN = 123287 + +clsID_to_trID = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 12: 11, + 13: 12, + 14: 13, + 15: 14, + 16: 15, + 17: 16, + 18: 17, + 19: 18, + 20: 19, + 21: 20, + 22: 21, + 23: 22, + 24: 23, + 26: 24, + 27: 25, + 30: 26, + 31: 27, + 32: 28, + 33: 29, + 34: 30, + 35: 31, + 36: 32, + 37: 33, + 38: 34, + 39: 35, + 40: 36, + 41: 37, + 42: 38, + 43: 39, + 45: 40, + 46: 41, + 47: 42, + 48: 43, + 49: 44, + 50: 45, + 51: 46, + 52: 47, + 53: 48, + 54: 49, + 55: 50, + 56: 51, + 57: 52, + 58: 53, + 59: 54, + 60: 55, + 61: 56, + 62: 57, + 63: 58, + 64: 59, + 66: 60, + 69: 61, + 71: 62, + 72: 63, + 73: 64, + 74: 65, + 75: 66, + 76: 67, + 77: 68, + 78: 69, + 79: 70, + 80: 71, + 81: 72, + 83: 73, + 84: 74, + 85: 75, + 86: 76, + 87: 77, + 88: 78, + 89: 79, + 91: 80, + 92: 81, + 93: 82, + 94: 83, + 95: 84, + 96: 85, + 97: 86, + 98: 87, + 99: 88, + 100: 89, + 101: 90, + 102: 91, + 103: 92, + 104: 93, + 105: 94, + 106: 95, + 107: 96, + 108: 97, + 109: 98, + 110: 99, + 111: 100, + 112: 101, + 113: 102, + 114: 103, + 115: 104, + 116: 105, + 117: 106, + 118: 107, + 119: 108, + 120: 109, + 121: 110, + 122: 111, + 123: 112, + 124: 113, + 125: 114, + 126: 115, + 127: 116, + 128: 117, + 129: 118, + 130: 119, + 131: 120, + 132: 121, + 133: 122, + 134: 123, + 135: 124, + 136: 125, + 137: 126, + 138: 127, + 139: 128, + 140: 129, + 141: 130, + 142: 131, + 143: 132, + 144: 133, + 145: 134, + 146: 135, + 147: 136, + 148: 137, + 149: 138, + 150: 139, + 151: 140, + 152: 141, + 153: 142, + 154: 143, + 155: 144, + 156: 145, + 157: 146, + 158: 147, + 159: 148, + 160: 149, + 161: 150, + 162: 151, + 163: 152, + 164: 153, + 165: 154, + 166: 155, + 167: 156, + 168: 157, + 169: 158, + 170: 159, + 171: 160, + 172: 161, + 173: 162, + 174: 163, + 175: 164, + 176: 165, + 177: 166, + 178: 167, + 179: 168, + 180: 169, + 181: 170, + 255: 255 +} + + +def convert_to_trainID(maskpath, out_mask_dir, is_train): + mask = np.array(Image.open(maskpath)) + mask_copy = mask.copy() + for clsID, trID in clsID_to_trID.items(): + mask_copy[mask == clsID] = trID + seg_filename = osp.join( + out_mask_dir, 'train2017', + osp.basename(maskpath).split('.')[0] + + '_labelTrainIds.png') if is_train else osp.join( + out_mask_dir, 'val2017', + osp.basename(maskpath).split('.')[0] + '_labelTrainIds.png') + Image.fromarray(mask_copy).save(seg_filename, 'PNG') + + +def parse_args(): + parser = argparse.ArgumentParser( + description=\ + 'Convert COCO Stuff 164k annotations to mmsegmentation format') # noqa + parser.add_argument('coco_path', help='coco stuff path') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=16, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + coco_path = args.coco_path + nproc = args.nproc + + out_dir = args.out_dir or coco_path + out_img_dir = osp.join(out_dir, 'images') + out_mask_dir = osp.join(out_dir, 'annotations') + + mkdir_or_exist(osp.join(out_mask_dir, 'train2017')) + mkdir_or_exist(osp.join(out_mask_dir, 'val2017')) + + if out_dir != coco_path: + shutil.copytree(osp.join(coco_path, 'images'), out_img_dir) + + train_list = glob(osp.join(coco_path, 'annotations', 'train2017', '*.png')) + train_list = [file for file in train_list if '_labelTrainIds' not in file] + test_list = glob(osp.join(coco_path, 'annotations', 'val2017', '*.png')) + test_list = [file for file in test_list if '_labelTrainIds' not in file] + assert (len(train_list) + + len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format( + len(train_list), len(test_list)) + + if args.nproc > 1: + track_parallel_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), + train_list, + nproc=nproc) + track_parallel_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), + test_list, + nproc=nproc) + else: + track_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), + train_list) + track_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), + test_list) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/drive.py b/tools/dataset_converters/drive.py new file mode 100644 index 0000000000..076fd05a20 --- /dev/null +++ b/tools/dataset_converters/drive.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import cv2 +import mmcv +from mmengine.utils import mkdir_or_exist + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert DRIVE dataset to mmsegmentation format') + parser.add_argument( + 'training_path', help='the training part of DRIVE dataset') + parser.add_argument( + 'testing_path', help='the testing part of DRIVE dataset') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + training_path = args.training_path + testing_path = args.testing_path + if args.out_dir is None: + out_dir = osp.join('data', 'DRIVE') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(out_dir) + mkdir_or_exist(osp.join(out_dir, 'images')) + mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mkdir_or_exist(osp.join(out_dir, 'annotations')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + print('Extracting training.zip...') + zip_file = zipfile.ZipFile(training_path) + zip_file.extractall(tmp_dir) + + print('Generating training dataset...') + now_dir = osp.join(tmp_dir, 'training', 'images') + for img_name in os.listdir(now_dir): + img = mmcv.imread(osp.join(now_dir, img_name)) + mmcv.imwrite( + img, + osp.join( + out_dir, 'images', 'training', + osp.splitext(img_name)[0].replace('_training', '') + + '.png')) + + now_dir = osp.join(tmp_dir, 'training', '1st_manual') + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(img_name)[0] + '.png')) + + print('Extracting test.zip...') + zip_file = zipfile.ZipFile(testing_path) + zip_file.extractall(tmp_dir) + + print('Generating validation dataset...') + now_dir = osp.join(tmp_dir, 'test', 'images') + for img_name in os.listdir(now_dir): + img = mmcv.imread(osp.join(now_dir, img_name)) + mmcv.imwrite( + img, + osp.join( + out_dir, 'images', 'validation', + osp.splitext(img_name)[0].replace('_test', '') + '.png')) + + now_dir = osp.join(tmp_dir, 'test', '1st_manual') + if osp.exists(now_dir): + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + now_dir = osp.join(tmp_dir, 'test', '2nd_manual') + if osp.exists(now_dir): + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/hrf.py b/tools/dataset_converters/hrf.py new file mode 100644 index 0000000000..3bfd80c9ee --- /dev/null +++ b/tools/dataset_converters/hrf.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +from mmengine.utils import mkdir_or_exist + +HRF_LEN = 15 +TRAINING_LEN = 5 + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert HRF dataset to mmsegmentation format') + parser.add_argument('healthy_path', help='the path of healthy.zip') + parser.add_argument( + 'healthy_manualsegm_path', help='the path of healthy_manualsegm.zip') + parser.add_argument('glaucoma_path', help='the path of glaucoma.zip') + parser.add_argument( + 'glaucoma_manualsegm_path', help='the path of glaucoma_manualsegm.zip') + parser.add_argument( + 'diabetic_retinopathy_path', + help='the path of diabetic_retinopathy.zip') + parser.add_argument( + 'diabetic_retinopathy_manualsegm_path', + help='the path of diabetic_retinopathy_manualsegm.zip') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + images_path = [ + args.healthy_path, args.glaucoma_path, args.diabetic_retinopathy_path + ] + annotations_path = [ + args.healthy_manualsegm_path, args.glaucoma_manualsegm_path, + args.diabetic_retinopathy_manualsegm_path + ] + if args.out_dir is None: + out_dir = osp.join('data', 'HRF') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(out_dir) + mkdir_or_exist(osp.join(out_dir, 'images')) + mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mkdir_or_exist(osp.join(out_dir, 'annotations')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + print('Generating images...') + for now_path in images_path: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(now_path) + zip_file.extractall(tmp_dir) + + assert len(os.listdir(tmp_dir)) == HRF_LEN, \ + f'len(os.listdir(tmp_dir)) != {HRF_LEN}' + + for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(filename)[0] + '.png')) + for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Generating annotations...') + for now_path in annotations_path: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(now_path) + zip_file.extractall(tmp_dir) + + assert len(os.listdir(tmp_dir)) == HRF_LEN, \ + f'len(os.listdir(tmp_dir)) != {HRF_LEN}' + + for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/isaid.py b/tools/dataset_converters/isaid.py new file mode 100644 index 0000000000..1da264d975 --- /dev/null +++ b/tools/dataset_converters/isaid.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os +import os.path as osp +import shutil +import tempfile +import zipfile + +import mmcv +import numpy as np +from mmengine.utils import ProgressBar, mkdir_or_exist +from PIL import Image + +iSAID_palette = \ + { + 0: (0, 0, 0), + 1: (0, 0, 63), + 2: (0, 63, 63), + 3: (0, 63, 0), + 4: (0, 63, 127), + 5: (0, 63, 191), + 6: (0, 63, 255), + 7: (0, 127, 63), + 8: (0, 127, 127), + 9: (0, 0, 127), + 10: (0, 0, 191), + 11: (0, 0, 255), + 12: (0, 191, 127), + 13: (0, 127, 191), + 14: (0, 127, 255), + 15: (0, 100, 155) + } + +iSAID_invert_palette = {v: k for k, v in iSAID_palette.items()} + + +def iSAID_convert_from_color(arr_3d, palette=iSAID_invert_palette): + """RGB-color encoding to grayscale labels.""" + arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8) + + for c, i in palette.items(): + m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2) + arr_2d[m] = i + + return arr_2d + + +def slide_crop_image(src_path, out_dir, mode, patch_H, patch_W, overlap): + img = np.asarray(Image.open(src_path).convert('RGB')) + + img_H, img_W, _ = img.shape + + if img_H < patch_H and img_W > patch_W: + + img = mmcv.impad(img, shape=(patch_H, img_W), pad_val=0) + + img_H, img_W, _ = img.shape + + elif img_H > patch_H and img_W < patch_W: + + img = mmcv.impad(img, shape=(img_H, patch_W), pad_val=0) + + img_H, img_W, _ = img.shape + + elif img_H < patch_H and img_W < patch_W: + + img = mmcv.impad(img, shape=(patch_H, patch_W), pad_val=0) + + img_H, img_W, _ = img.shape + + for x in range(0, img_W, patch_W - overlap): + for y in range(0, img_H, patch_H - overlap): + x_str = x + x_end = x + patch_W + if x_end > img_W: + diff_x = x_end - img_W + x_str -= diff_x + x_end = img_W + y_str = y + y_end = y + patch_H + if y_end > img_H: + diff_y = y_end - img_H + y_str -= diff_y + y_end = img_H + + img_patch = img[y_str:y_end, x_str:x_end, :] + img_patch = Image.fromarray(img_patch.astype(np.uint8)) + image = osp.basename(src_path).split('.')[0] + '_' + str( + y_str) + '_' + str(y_end) + '_' + str(x_str) + '_' + str( + x_end) + '.png' + # print(image) + save_path_image = osp.join(out_dir, 'img_dir', mode, str(image)) + img_patch.save(save_path_image) + + +def slide_crop_label(src_path, out_dir, mode, patch_H, patch_W, overlap): + label = mmcv.imread(src_path, channel_order='rgb') + label = iSAID_convert_from_color(label) + img_H, img_W = label.shape + + if img_H < patch_H and img_W > patch_W: + + label = mmcv.impad(label, shape=(patch_H, img_W), pad_val=255) + + img_H = patch_H + + elif img_H > patch_H and img_W < patch_W: + + label = mmcv.impad(label, shape=(img_H, patch_W), pad_val=255) + + img_W = patch_W + + elif img_H < patch_H and img_W < patch_W: + + label = mmcv.impad(label, shape=(patch_H, patch_W), pad_val=255) + + img_H = patch_H + img_W = patch_W + + for x in range(0, img_W, patch_W - overlap): + for y in range(0, img_H, patch_H - overlap): + x_str = x + x_end = x + patch_W + if x_end > img_W: + diff_x = x_end - img_W + x_str -= diff_x + x_end = img_W + y_str = y + y_end = y + patch_H + if y_end > img_H: + diff_y = y_end - img_H + y_str -= diff_y + y_end = img_H + + lab_patch = label[y_str:y_end, x_str:x_end] + lab_patch = Image.fromarray(lab_patch.astype(np.uint8), mode='P') + + image = osp.basename(src_path).split('.')[0].split( + '_')[0] + '_' + str(y_str) + '_' + str(y_end) + '_' + str( + x_str) + '_' + str(x_end) + '_instance_color_RGB' + '.png' + lab_patch.save(osp.join(out_dir, 'ann_dir', mode, str(image))) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert iSAID dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='iSAID folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + + parser.add_argument( + '--patch_width', + default=896, + type=int, + help='Width of the cropped image patch') + parser.add_argument( + '--patch_height', + default=896, + type=int, + help='Height of the cropped image patch') + parser.add_argument( + '--overlap_area', default=384, type=int, help='Overlap area') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + # image patch width and height + patch_H, patch_W = args.patch_width, args.patch_height + + overlap = args.overlap_area # overlap area + + if args.out_dir is None: + out_dir = osp.join('data', 'iSAID') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test')) + + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'test')) + + assert os.path.exists(os.path.join(dataset_path, 'train')), \ + f'train is not in {dataset_path}' + assert os.path.exists(os.path.join(dataset_path, 'val')), \ + f'val is not in {dataset_path}' + assert os.path.exists(os.path.join(dataset_path, 'test')), \ + f'test is not in {dataset_path}' + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for dataset_mode in ['train', 'val', 'test']: + + # for dataset_mode in [ 'test']: + print(f'Extracting {dataset_mode}ing.zip...') + img_zipp_list = glob.glob( + os.path.join(dataset_path, dataset_mode, 'images', '*.zip')) + print('Find the data', img_zipp_list) + for img_zipp in img_zipp_list: + zip_file = zipfile.ZipFile(img_zipp) + zip_file.extractall(os.path.join(tmp_dir, dataset_mode, 'img')) + src_path_list = glob.glob( + os.path.join(tmp_dir, dataset_mode, 'img', 'images', '*.png')) + + src_prog_bar = ProgressBar(len(src_path_list)) + for i, img_path in enumerate(src_path_list): + if dataset_mode != 'test': + slide_crop_image(img_path, out_dir, dataset_mode, patch_H, + patch_W, overlap) + + else: + shutil.move(img_path, + os.path.join(out_dir, 'img_dir', dataset_mode)) + src_prog_bar.update() + + if dataset_mode != 'test': + label_zipp_list = glob.glob( + os.path.join(dataset_path, dataset_mode, 'Semantic_masks', + '*.zip')) + for label_zipp in label_zipp_list: + zip_file = zipfile.ZipFile(label_zipp) + zip_file.extractall( + os.path.join(tmp_dir, dataset_mode, 'lab')) + + lab_path_list = glob.glob( + os.path.join(tmp_dir, dataset_mode, 'lab', 'images', + '*.png')) + lab_prog_bar = ProgressBar(len(lab_path_list)) + for i, lab_path in enumerate(lab_path_list): + slide_crop_label(lab_path, out_dir, dataset_mode, patch_H, + patch_W, overlap) + lab_prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/loveda.py b/tools/dataset_converters/loveda.py new file mode 100644 index 0000000000..5b0ef4bb8b --- /dev/null +++ b/tools/dataset_converters/loveda.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import shutil +import tempfile +import zipfile + +from mmengine.utils import mkdir_or_exist + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert LoveDA dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='LoveDA folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'loveDA') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(out_dir) + mkdir_or_exist(osp.join(out_dir, 'img_dir')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + assert 'Train.zip' in os.listdir(dataset_path), \ + f'Train.zip is not in {dataset_path}' + assert 'Val.zip' in os.listdir(dataset_path), \ + f'Val.zip is not in {dataset_path}' + assert 'Test.zip' in os.listdir(dataset_path), \ + f'Test.zip is not in {dataset_path}' + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for dataset in ['Train', 'Val', 'Test']: + zip_file = zipfile.ZipFile( + os.path.join(dataset_path, dataset + '.zip')) + zip_file.extractall(tmp_dir) + data_type = dataset.lower() + for location in ['Rural', 'Urban']: + for image_type in ['images_png', 'masks_png']: + if image_type == 'images_png': + dst = osp.join(out_dir, 'img_dir', data_type) + else: + dst = osp.join(out_dir, 'ann_dir', data_type) + if dataset == 'Test' and image_type == 'masks_png': + continue + else: + src_dir = osp.join(tmp_dir, dataset, location, + image_type) + src_lst = os.listdir(src_dir) + for file in src_lst: + shutil.move(osp.join(src_dir, file), dst) + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/pascal_context.py b/tools/dataset_converters/pascal_context.py new file mode 100644 index 0000000000..a92d1dc641 --- /dev/null +++ b/tools/dataset_converters/pascal_context.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from functools import partial + +import numpy as np +from detail import Detail +from mmengine.utils import mkdir_or_exist, track_progress +from PIL import Image + +_mapping = np.sort( + np.array([ + 0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284, + 158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59, + 440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355, + 85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115 + ])) +_key = np.array(range(len(_mapping))).astype('uint8') + + +def generate_labels(img_id, detail, out_dir): + + def _class_to_index(mask, _mapping, _key): + # assert the values + values = np.unique(mask) + for i in range(len(values)): + assert (values[i] in _mapping) + index = np.digitize(mask.ravel(), _mapping, right=True) + return _key[index].reshape(mask.shape) + + mask = Image.fromarray( + _class_to_index(detail.getMask(img_id), _mapping=_mapping, _key=_key)) + filename = img_id['file_name'] + mask.save(osp.join(out_dir, filename.replace('jpg', 'png'))) + return osp.splitext(osp.basename(filename))[0] + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmsegmentation format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('json_path', help='annoation json filepath') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + if args.out_dir is None: + out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext') + else: + out_dir = args.out_dir + json_path = args.json_path + mkdir_or_exist(out_dir) + img_dir = osp.join(devkit_path, 'VOC2010', 'JPEGImages') + + train_detail = Detail(json_path, img_dir, 'train') + train_ids = train_detail.getImgs() + + val_detail = Detail(json_path, img_dir, 'val') + val_ids = val_detail.getImgs() + + mkdir_or_exist( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext')) + + train_list = track_progress( + partial(generate_labels, detail=train_detail, out_dir=out_dir), + train_ids) + with open( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext', + 'train.txt'), 'w') as f: + f.writelines(line + '\n' for line in sorted(train_list)) + + val_list = track_progress( + partial(generate_labels, detail=val_detail, out_dir=out_dir), val_ids) + with open( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext', + 'val.txt'), 'w') as f: + f.writelines(line + '\n' for line in sorted(val_list)) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/potsdam.py b/tools/dataset_converters/potsdam.py new file mode 100644 index 0000000000..f3c713ee2a --- /dev/null +++ b/tools/dataset_converters/potsdam.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import math +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +import numpy as np +from mmengine.utils import ProgressBar, mkdir_or_exist + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert potsdam dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='potsdam folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--clip_size', + type=int, + help='clipped size of image after preparation', + default=512) + parser.add_argument( + '--stride_size', + type=int, + help='stride of clipping original images', + default=256) + args = parser.parse_args() + return args + + +def clip_big_image(image_path, clip_save_dir, args, to_label=False): + # Original image of Potsdam dataset is very large, thus pre-processing + # of them is adopted. Given fixed clip size and stride size to generate + # clipped image, the intersection of width and height is determined. + # For example, given one 5120 x 5120 original image, the clip size is + # 512 and stride size is 256, thus it would generate 20x20 = 400 images + # whose size are all 512x512. + image = mmcv.imread(image_path) + + h, w, c = image.shape + clip_size = args.clip_size + stride_size = args.stride_size + + num_rows = math.ceil((h - clip_size) / stride_size) if math.ceil( + (h - clip_size) / + stride_size) * stride_size + clip_size >= h else math.ceil( + (h - clip_size) / stride_size) + 1 + num_cols = math.ceil((w - clip_size) / stride_size) if math.ceil( + (w - clip_size) / + stride_size) * stride_size + clip_size >= w else math.ceil( + (w - clip_size) / stride_size) + 1 + + x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1)) + xmin = x * clip_size + ymin = y * clip_size + + xmin = xmin.ravel() + ymin = ymin.ravel() + xmin_offset = np.where(xmin + clip_size > w, w - xmin - clip_size, + np.zeros_like(xmin)) + ymin_offset = np.where(ymin + clip_size > h, h - ymin - clip_size, + np.zeros_like(ymin)) + boxes = np.stack([ + xmin + xmin_offset, ymin + ymin_offset, + np.minimum(xmin + clip_size, w), + np.minimum(ymin + clip_size, h) + ], + axis=1) + + if to_label: + color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0], + [255, 255, 0], [0, 255, 0], [0, 255, 255], + [0, 0, 255]]) + flatten_v = np.matmul( + image.reshape(-1, c), + np.array([2, 3, 4]).reshape(3, 1)) + out = np.zeros_like(flatten_v) + for idx, class_color in enumerate(color_map): + value_idx = np.matmul(class_color, + np.array([2, 3, 4]).reshape(3, 1)) + out[flatten_v == value_idx] = idx + image = out.reshape(h, w) + + for box in boxes: + start_x, start_y, end_x, end_y = box + clipped_image = image[start_y:end_y, + start_x:end_x] if to_label else image[ + start_y:end_y, start_x:end_x, :] + idx_i, idx_j = osp.basename(image_path).split('_')[2:4] + mmcv.imwrite( + clipped_image.astype(np.uint8), + osp.join( + clip_save_dir, + f'{idx_i}_{idx_j}_{start_x}_{start_y}_{end_x}_{end_y}.png')) + + +def main(): + args = parse_args() + splits = { + 'train': [ + '2_10', '2_11', '2_12', '3_10', '3_11', '3_12', '4_10', '4_11', + '4_12', '5_10', '5_11', '5_12', '6_10', '6_11', '6_12', '6_7', + '6_8', '6_9', '7_10', '7_11', '7_12', '7_7', '7_8', '7_9' + ], + 'val': [ + '5_15', '6_15', '6_13', '3_13', '4_14', '6_14', '5_14', '2_13', + '4_15', '2_14', '5_13', '4_13', '3_14', '7_13' + ] + } + + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'potsdam') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + zipp_list = glob.glob(os.path.join(dataset_path, '*.zip')) + print('Find the data', zipp_list) + + for zipp in zipp_list: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(zipp) + zip_file.extractall(tmp_dir) + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + if not len(src_path_list): + sub_tmp_dir = os.path.join(tmp_dir, os.listdir(tmp_dir)[0]) + src_path_list = glob.glob(os.path.join(sub_tmp_dir, '*.tif')) + + prog_bar = ProgressBar(len(src_path_list)) + for i, src_path in enumerate(src_path_list): + idx_i, idx_j = osp.basename(src_path).split('_')[2:4] + data_type = 'train' if f'{idx_i}_{idx_j}' in splits[ + 'train'] else 'val' + if 'label' in src_path: + dst_dir = osp.join(out_dir, 'ann_dir', data_type) + clip_big_image(src_path, dst_dir, args, to_label=True) + else: + dst_dir = osp.join(out_dir, 'img_dir', data_type) + clip_big_image(src_path, dst_dir, args, to_label=False) + prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/stare.py b/tools/dataset_converters/stare.py new file mode 100644 index 0000000000..4a23ba4dd8 --- /dev/null +++ b/tools/dataset_converters/stare.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import gzip +import os +import os.path as osp +import tarfile +import tempfile + +import mmcv +from mmengine.utils import mkdir_or_exist + +STARE_LEN = 20 +TRAINING_LEN = 10 + + +def un_gz(src, dst): + g_file = gzip.GzipFile(src) + with open(dst, 'wb+') as f: + f.write(g_file.read()) + g_file.close() + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert STARE dataset to mmsegmentation format') + parser.add_argument('image_path', help='the path of stare-images.tar') + parser.add_argument('labels_ah', help='the path of labels-ah.tar') + parser.add_argument('labels_vk', help='the path of labels-vk.tar') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + image_path = args.image_path + labels_ah = args.labels_ah + labels_vk = args.labels_vk + if args.out_dir is None: + out_dir = osp.join('data', 'STARE') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(out_dir) + mkdir_or_exist(osp.join(out_dir, 'images')) + mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mkdir_or_exist(osp.join(out_dir, 'annotations')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting stare-images.tar...') + with tarfile.open(image_path) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + f'len(os.listdir(now_dir)) != {STARE_LEN}' + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting labels-ah.tar...') + with tarfile.open(labels_ah) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + f'len(os.listdir(now_dir)) != {STARE_LEN}' + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a threshold + # to convert the nonstandard annotation imgs. The value divided by + # 128 equivalent to '1 if value >= 128 else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting labels-vk.tar...') + with tarfile.open(labels_vk) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + f'len(os.listdir(now_dir)) != {STARE_LEN}' + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/synapse.py b/tools/dataset_converters/synapse.py new file mode 100644 index 0000000000..9d49af11f3 --- /dev/null +++ b/tools/dataset_converters/synapse.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import nibabel as nib +import numpy as np +from mmengine.utils import mkdir_or_exist +from PIL import Image + + +def read_files_from_txt(txt_path): + with open(txt_path) as f: + files = f.readlines() + files = [file.strip() for file in files] + return files + + +def read_nii_file(nii_path): + img = nib.load(nii_path).get_fdata() + return img + + +def split_3d_image(img): + c, _, _ = img.shape + res = [] + for i in range(c): + res.append(img[i, :, :]) + return res + + +def label_mapping(label): + maped_label = np.zeros_like(label) + maped_label[label == 8] = 1 + maped_label[label == 4] = 2 + maped_label[label == 3] = 3 + maped_label[label == 2] = 4 + maped_label[label == 6] = 5 + maped_label[label == 11] = 6 + maped_label[label == 1] = 7 + maped_label[label == 7] = 8 + return maped_label + + +def pares_args(): + parser = argparse.ArgumentParser( + description='Convert synapse dataset to mmsegmentation format') + parser.add_argument( + '--dataset-path', type=str, help='synapse dataset path.') + parser.add_argument( + '--save-path', + default='data/synapse', + type=str, + help='save path of the dataset.') + args = parser.parse_args() + return args + + +def main(): + args = pares_args() + dataset_path = args.dataset_path + save_path = args.save_path + + if not osp.exists(dataset_path): + raise ValueError('The dataset path does not exist. ' + 'Please enter a correct dataset path.') + if not osp.exists(osp.join(dataset_path, 'img')) \ + or not osp.exists(osp.join(dataset_path, 'label')): + raise FileNotFoundError('The dataset structure is incorrect. ' + 'Please check your dataset.') + + train_id = read_files_from_txt(osp.join(dataset_path, 'train.txt')) + train_id = [idx[3:7] for idx in train_id] + + test_id = read_files_from_txt(osp.join(dataset_path, 'val.txt')) + test_id = [idx[3:7] for idx in test_id] + + mkdir_or_exist(osp.join(save_path, 'img_dir/train')) + mkdir_or_exist(osp.join(save_path, 'img_dir/val')) + mkdir_or_exist(osp.join(save_path, 'ann_dir/train')) + mkdir_or_exist(osp.join(save_path, 'ann_dir/val')) + + for i, idx in enumerate(train_id): + img_3d = read_nii_file( + osp.join(dataset_path, 'img', 'img' + idx + '.nii.gz')) + label_3d = read_nii_file( + osp.join(dataset_path, 'label', 'label' + idx + '.nii.gz')) + + img_3d = np.clip(img_3d, -125, 275) + img_3d = (img_3d + 125) / 400 + img_3d *= 255 + img_3d = np.transpose(img_3d, [2, 0, 1]) + img_3d = np.flip(img_3d, 2) + + label_3d = np.transpose(label_3d, [2, 0, 1]) + label_3d = np.flip(label_3d, 2) + label_3d = label_mapping(label_3d) + + for c in range(img_3d.shape[0]): + img = img_3d[c] + label = label_3d[c] + + img = Image.fromarray(img).convert('RGB') + label = Image.fromarray(label).convert('L') + img.save( + osp.join( + save_path, 'img_dir/train', 'case' + idx.zfill(4) + + '_slice' + str(c).zfill(3) + '.jpg')) + label.save( + osp.join( + save_path, 'ann_dir/train', 'case' + idx.zfill(4) + + '_slice' + str(c).zfill(3) + '.png')) + + for i, idx in enumerate(test_id): + img_3d = read_nii_file( + osp.join(dataset_path, 'img', 'img' + idx + '.nii.gz')) + label_3d = read_nii_file( + osp.join(dataset_path, 'label', 'label' + idx + '.nii.gz')) + + img_3d = np.clip(img_3d, -125, 275) + img_3d = (img_3d + 125) / 400 + img_3d *= 255 + img_3d = np.transpose(img_3d, [2, 0, 1]) + img_3d = np.flip(img_3d, 2) + + label_3d = np.transpose(label_3d, [2, 0, 1]) + label_3d = np.flip(label_3d, 2) + label_3d = label_mapping(label_3d) + + for c in range(img_3d.shape[0]): + img = img_3d[c] + label = label_3d[c] + + img = Image.fromarray(img).convert('RGB') + label = Image.fromarray(label).convert('L') + img.save( + osp.join( + save_path, 'img_dir/val', 'case' + idx.zfill(4) + + '_slice' + str(c).zfill(3) + '.jpg')) + label.save( + osp.join( + save_path, 'ann_dir/val', 'case' + idx.zfill(4) + + '_slice' + str(c).zfill(3) + '.png')) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/vaihingen.py b/tools/dataset_converters/vaihingen.py new file mode 100644 index 0000000000..db980144eb --- /dev/null +++ b/tools/dataset_converters/vaihingen.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import math +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +import numpy as np +from mmengine.utils import ProgressBar, mkdir_or_exist + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert vaihingen dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='vaihingen folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--clip_size', + type=int, + help='clipped size of image after preparation', + default=512) + parser.add_argument( + '--stride_size', + type=int, + help='stride of clipping original images', + default=256) + args = parser.parse_args() + return args + + +def clip_big_image(image_path, clip_save_dir, to_label=False): + # Original image of Vaihingen dataset is very large, thus pre-processing + # of them is adopted. Given fixed clip size and stride size to generate + # clipped image, the intersection of width and height is determined. + # For example, given one 5120 x 5120 original image, the clip size is + # 512 and stride size is 256, thus it would generate 20x20 = 400 images + # whose size are all 512x512. + image = mmcv.imread(image_path) + + h, w, c = image.shape + cs = args.clip_size + ss = args.stride_size + + num_rows = math.ceil((h - cs) / ss) if math.ceil( + (h - cs) / ss) * ss + cs >= h else math.ceil((h - cs) / ss) + 1 + num_cols = math.ceil((w - cs) / ss) if math.ceil( + (w - cs) / ss) * ss + cs >= w else math.ceil((w - cs) / ss) + 1 + + x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1)) + xmin = x * cs + ymin = y * cs + + xmin = xmin.ravel() + ymin = ymin.ravel() + xmin_offset = np.where(xmin + cs > w, w - xmin - cs, np.zeros_like(xmin)) + ymin_offset = np.where(ymin + cs > h, h - ymin - cs, np.zeros_like(ymin)) + boxes = np.stack([ + xmin + xmin_offset, ymin + ymin_offset, + np.minimum(xmin + cs, w), + np.minimum(ymin + cs, h) + ], + axis=1) + + if to_label: + color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0], + [255, 255, 0], [0, 255, 0], [0, 255, 255], + [0, 0, 255]]) + flatten_v = np.matmul( + image.reshape(-1, c), + np.array([2, 3, 4]).reshape(3, 1)) + out = np.zeros_like(flatten_v) + for idx, class_color in enumerate(color_map): + value_idx = np.matmul(class_color, + np.array([2, 3, 4]).reshape(3, 1)) + out[flatten_v == value_idx] = idx + image = out.reshape(h, w) + + for box in boxes: + start_x, start_y, end_x, end_y = box + clipped_image = image[start_y:end_y, + start_x:end_x] if to_label else image[ + start_y:end_y, start_x:end_x, :] + area_idx = osp.basename(image_path).split('_')[3].strip('.tif') + mmcv.imwrite( + clipped_image.astype(np.uint8), + osp.join(clip_save_dir, + f'{area_idx}_{start_x}_{start_y}_{end_x}_{end_y}.png')) + + +def main(): + splits = { + 'train': [ + 'area1', 'area11', 'area13', 'area15', 'area17', 'area21', + 'area23', 'area26', 'area28', 'area3', 'area30', 'area32', + 'area34', 'area37', 'area5', 'area7' + ], + 'val': [ + 'area6', 'area24', 'area35', 'area16', 'area14', 'area22', + 'area10', 'area4', 'area2', 'area20', 'area8', 'area31', 'area33', + 'area27', 'area38', 'area12', 'area29' + ], + } + + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'vaihingen') + else: + out_dir = args.out_dir + + print('Making directories...') + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + zipp_list = glob.glob(os.path.join(dataset_path, '*.zip')) + print('Find the data', zipp_list) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for zipp in zipp_list: + zip_file = zipfile.ZipFile(zipp) + zip_file.extractall(tmp_dir) + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + if 'ISPRS_semantic_labeling_Vaihingen' in zipp: + src_path_list = glob.glob( + os.path.join(os.path.join(tmp_dir, 'top'), '*.tif')) + if 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE' in zipp: # noqa + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + # delete unused area9 ground truth + for area_ann in src_path_list: + if 'area9' in area_ann: + src_path_list.remove(area_ann) + prog_bar = ProgressBar(len(src_path_list)) + for i, src_path in enumerate(src_path_list): + area_idx = osp.basename(src_path).split('_')[3].strip('.tif') + data_type = 'train' if area_idx in splits['train'] else 'val' + if 'noBoundary' in src_path: + dst_dir = osp.join(out_dir, 'ann_dir', data_type) + clip_big_image(src_path, dst_dir, to_label=True) + else: + dst_dir = osp.join(out_dir, 'img_dir', data_type) + clip_big_image(src_path, dst_dir, to_label=False) + prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + args = parse_args() + main() diff --git a/tools/convert_datasets/voc_aug.py b/tools/dataset_converters/voc_aug.py similarity index 84% rename from tools/convert_datasets/voc_aug.py rename to tools/dataset_converters/voc_aug.py index fd5400361f..a536f4290d 100644 --- a/tools/convert_datasets/voc_aug.py +++ b/tools/dataset_converters/voc_aug.py @@ -1,9 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp from functools import partial -import mmcv import numpy as np +from mmengine.utils import mkdir_or_exist, scandir, track_parallel_progress from PIL import Image from scipy.io import loadmat @@ -42,16 +43,20 @@ def main(): out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug') else: out_dir = args.out_dir - mmcv.mkdir_or_exist(out_dir) + mkdir_or_exist(out_dir) in_dir = osp.join(aug_path, 'dataset', 'cls') - mmcv.track_parallel_progress( + track_parallel_progress( partial(convert_mat, in_dir=in_dir, out_dir=out_dir), - list(mmcv.scandir(in_dir, suffix='.mat')), + list(scandir(in_dir, suffix='.mat')), nproc=nproc) - with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f: - full_aug_list = [line.strip() for line in f] + full_aug_list = [] + with open(osp.join(aug_path, 'dataset', 'train.txt')) as f: + full_aug_list += [line.strip() for line in f] + with open(osp.join(aug_path, 'dataset', 'val.txt')) as f: + full_aug_list += [line.strip() for line in f] + with open( osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'train.txt')) as f: diff --git a/tools/deployment/pytorch2torchscript.py b/tools/deployment/pytorch2torchscript.py new file mode 100644 index 0000000000..e69e705bb1 --- /dev/null +++ b/tools/deployment/pytorch2torchscript.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import numpy as np +import torch +import torch._C +import torch.serialization +from mmengine import Config +from mmengine.runner import load_checkpoint +from torch import nn + +from mmseg.models import build_segmentor + +torch.manual_seed(3) + + +def digit_version(version_str): + digit_version = [] + for x in version_str.split('.'): + if x.isdigit(): + digit_version.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + digit_version.append(int(patch_version[0]) - 1) + digit_version.append(int(patch_version[1])) + return digit_version + + +def check_torch_version(): + torch_minimum_version = '1.8.0' + torch_version = digit_version(torch.__version__) + + assert (torch_version >= digit_version(torch_minimum_version)), \ + f'Torch=={torch.__version__} is not support for converting to ' \ + f'torchscript. Please install pytorch>={torch_minimum_version}.' + + +def _convert_batchnorm(module): + module_output = module + if isinstance(module, torch.nn.SyncBatchNorm): + module_output = torch.nn.BatchNorm2d(module.num_features, module.eps, + module.momentum, module.affine, + module.track_running_stats) + if module.affine: + module_output.weight.data = module.weight.data.clone().detach() + module_output.bias.data = module.bias.data.clone().detach() + # keep requires_grad unchanged + module_output.weight.requires_grad = module.weight.requires_grad + module_output.bias.requires_grad = module.bias.requires_grad + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + for name, child in module.named_children(): + module_output.add_module(name, _convert_batchnorm(child)) + del module + return module_output + + +def _demo_mm_inputs(input_shape, num_classes): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + segs = rng.randint( + low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8) + img_metas = [{ + 'img_shape': (H, W, C), + 'ori_shape': (H, W, C), + 'pad_shape': (H, W, C), + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False, + } for _ in range(N)] + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(True), + 'img_metas': img_metas, + 'gt_semantic_seg': torch.LongTensor(segs) + } + return mm_inputs + + +def pytorch2libtorch(model, + input_shape, + show=False, + output_file='tmp.pt', + verify=False): + """Export Pytorch model to TorchScript model and verify the outputs are + same between Pytorch and TorchScript. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the + output TorchScript model. Default: `tmp.pt`. + verify (bool): Whether compare the outputs between + Pytorch and TorchScript. Default: False. + """ + if isinstance(model.decode_head, nn.ModuleList): + num_classes = model.decode_head[-1].num_classes + else: + num_classes = model.decode_head.num_classes + + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + + # replace the original forword with forward_dummy + model.forward = model.forward_dummy + model.eval() + traced_model = torch.jit.trace( + model, + example_inputs=imgs, + check_trace=verify, + ) + + if show: + print(traced_model.graph) + + traced_model.save(output_file) + print(f'Successfully exported TorchScript model: {output_file}') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMSeg to TorchScript') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', default=None) + parser.add_argument( + '--show', action='store_true', help='show TorchScript graph') + parser.add_argument( + '--verify', action='store_true', help='verify the TorchScript model') + parser.add_argument('--output-file', type=str, default='tmp.pt') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[512, 512], + help='input image size (height, width)') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + check_torch_version() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + cfg.model.train_cfg = None + segmentor = build_segmentor( + cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg')) + # convert SyncBN to BN + segmentor = _convert_batchnorm(segmentor) + + if args.checkpoint: + load_checkpoint(segmentor, args.checkpoint, map_location='cpu') + + # convert the PyTorch model to LibTorch model + pytorch2libtorch( + segmentor, + input_shape, + show=args.show, + output_file=args.output_file, + verify=args.verify) diff --git a/tools/dist_test.sh b/tools/dist_test.sh index 7381dfb1d7..89711fd5c0 100755 --- a/tools/dist_test.sh +++ b/tools/dist_test.sh @@ -1,10 +1,20 @@ -#!/usr/bin/env bash - CONFIG=$1 CHECKPOINT=$2 GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} -$CONFIG\/$GPUS/ +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ - $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh index 5b43fffbf2..a857df7878 100755 --- a/tools/dist_train.sh +++ b/tools/dist_train.sh @@ -1,9 +1,17 @@ -#!/usr/bin/env bash - CONFIG=$1 GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ - $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/tools/misc/browse_dataset.py b/tools/misc/browse_dataset.py new file mode 100644 index 0000000000..b2852c21ab --- /dev/null +++ b/tools/misc/browse_dataset.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +from mmengine import Config, DictAction +from mmengine.utils import ProgressBar + +from mmseg.registry import DATASETS, VISUALIZERS +from mmseg.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--show-interval', + type=float, + default=2, + help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # register all modules in mmseg into the registries + register_all_modules() + + dataset = DATASETS.build(cfg.train_dataloader.dataset) + cfg.visualizer['save_dir'] = args.output_dir + visualizer = VISUALIZERS.build(cfg.visualizer) + visualizer.dataset_meta = dataset.METAINFO + + progress_bar = ProgressBar(len(dataset)) + for item in dataset: + img = item['inputs'].permute(1, 2, 0).numpy() + data_sample = item['data_samples'].numpy() + img_path = osp.basename(item['data_samples'].img_path) + + img = img[..., [2, 1, 0]] # bgr to rgb + + visualizer.add_datasample( + osp.basename(img_path), + img, + data_sample, + show=not args.not_show, + wait_time=args.show_interval) + + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/tools/misc/print_config.py b/tools/misc/print_config.py new file mode 100644 index 0000000000..2a1c024a6a --- /dev/null +++ b/tools/misc/print_config.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +from mmengine import Config, DictAction + +from mmseg.apis import init_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--graph', action='store_true', help='print the models graph') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help="--options is deprecated in favor of --cfg_options' and it will " + 'not be supported in version v0.22.0. Override some settings in the ' + 'used config, the key-value pair in xxx=yyy format will be merged ' + 'into config file. If the value to be overwritten is a list, it ' + 'should be like key="[a,b]" or key=a,b It also allows nested ' + 'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation ' + 'marks are necessary and that no white space is allowed.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options, ' + '--options will not be supported in version v0.22.0.') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + print(f'Config:\n{cfg.pretty_text}') + # dump config + cfg.dump('example.py') + # dump models graph + if args.graph: + model = init_model(args.config, device='cpu') + print(f'Model graph:\n{str(model)}') + with open('example-graph.txt', 'w') as f: + f.writelines(str(model)) + + +if __name__ == '__main__': + main() diff --git a/tools/publish_model.py b/tools/misc/publish_model.py similarity index 89% rename from tools/publish_model.py rename to tools/misc/publish_model.py index a049f17674..c1bbc9ac1a 100644 --- a/tools/publish_model.py +++ b/tools/misc/publish_model.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess @@ -22,7 +23,7 @@ def process_checkpoint(in_file, out_file): # add the code here. torch.save(checkpoint, out_file) sha = subprocess.check_output(['sha256sum', out_file]).decode() - final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth' subprocess.Popen(['mv', out_file, final_file]) diff --git a/tools/model_converters/beit2mmseg.py b/tools/model_converters/beit2mmseg.py new file mode 100644 index 0000000000..20f8f0f450 --- /dev/null +++ b/tools/model_converters/beit2mmseg.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_beit(ckpt): + new_ckpt = OrderedDict() + + for k, v in ckpt.items(): + if k.startswith('patch_embed'): + new_key = k.replace('patch_embed.proj', 'patch_embed.projection') + new_ckpt[new_key] = v + if k.startswith('blocks'): + new_key = k.replace('blocks', 'layers') + if 'norm' in new_key: + new_key = new_key.replace('norm', 'ln') + elif 'mlp.fc1' in new_key: + new_key = new_key.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in new_key: + new_key = new_key.replace('mlp.fc2', 'ffn.layers.1') + new_ckpt[new_key] = v + else: + new_key = k + new_ckpt[new_key] = v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained beit models to' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_beit(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/mit2mmseg.py b/tools/model_converters/mit2mmseg.py new file mode 100644 index 0000000000..f10cbbf9d4 --- /dev/null +++ b/tools/model_converters/mit2mmseg.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_mit(ckpt): + new_ckpt = OrderedDict() + # Process the concat between q linear weights and kv linear weights + for k, v in ckpt.items(): + if k.startswith('head'): + continue + # patch embedding conversion + elif k.startswith('patch_embed'): + stage_i = int(k.split('.')[0].replace('patch_embed', '')) + new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i-1}.0') + new_v = v + if 'proj.' in new_k: + new_k = new_k.replace('proj.', 'projection.') + # transformer encoder layer conversion + elif k.startswith('block'): + stage_i = int(k.split('.')[0].replace('block', '')) + new_k = k.replace(f'block{stage_i}', f'layers.{stage_i-1}.1') + new_v = v + if 'attn.q.' in new_k: + sub_item_k = k.replace('q.', 'kv.') + new_k = new_k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) + elif 'attn.kv.' in new_k: + continue + elif 'attn.proj.' in new_k: + new_k = new_k.replace('proj.', 'attn.out_proj.') + elif 'attn.sr.' in new_k: + new_k = new_k.replace('sr.', 'sr.') + elif 'mlp.' in new_k: + string = f'{new_k}-' + new_k = new_k.replace('mlp.', 'ffn.layers.') + if 'fc1.weight' in new_k or 'fc2.weight' in new_k: + new_v = v.reshape((*v.shape, 1, 1)) + new_k = new_k.replace('fc1.', '0.') + new_k = new_k.replace('dwconv.dwconv.', '1.') + new_k = new_k.replace('fc2.', '4.') + string += f'{new_k} {v.shape}-{new_v.shape}' + # norm layer conversion + elif k.startswith('norm'): + stage_i = int(k.split('.')[0].replace('norm', '')) + new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i-1}.2') + new_v = v + else: + new_k = k + new_v = v + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained segformer to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_mit(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/stdc2mmseg.py b/tools/model_converters/stdc2mmseg.py new file mode 100644 index 0000000000..6ea3b8342f --- /dev/null +++ b/tools/model_converters/stdc2mmseg.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_stdc(ckpt, stdc_type): + new_state_dict = {} + if stdc_type == 'STDC1': + stage_lst = ['0', '1', '2.0', '2.1', '3.0', '3.1', '4.0', '4.1'] + else: + stage_lst = [ + '0', '1', '2.0', '2.1', '2.2', '2.3', '3.0', '3.1', '3.2', '3.3', + '3.4', '4.0', '4.1', '4.2' + ] + for k, v in ckpt.items(): + ori_k = k + flag = False + if 'cp.' in k: + k = k.replace('cp.', '') + if 'features.' in k: + num_layer = int(k.split('.')[1]) + feature_key_lst = 'features.' + str(num_layer) + '.' + stages_key_lst = 'stages.' + stage_lst[num_layer] + '.' + k = k.replace(feature_key_lst, stages_key_lst) + flag = True + if 'conv_list' in k: + k = k.replace('conv_list', 'layers') + flag = True + if 'avd_layer.' in k: + if 'avd_layer.0' in k: + k = k.replace('avd_layer.0', 'downsample.conv') + elif 'avd_layer.1' in k: + k = k.replace('avd_layer.1', 'downsample.bn') + flag = True + if flag: + new_state_dict[k] = ckpt[ori_k] + + return new_state_dict + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained STDC1/2 to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + parser.add_argument('type', help='model type: STDC1 or STDC2') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + assert args.type in ['STDC1', + 'STDC2'], 'STD type should be STDC1 or STDC2!' + weight = convert_stdc(state_dict, args.type) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/swin2mmseg.py b/tools/model_converters/swin2mmseg.py new file mode 100644 index 0000000000..d434f9465b --- /dev/null +++ b/tools/model_converters/swin2mmseg.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_swin(ckpt): + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt[new_k] = new_v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained swin models to' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_swin(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/twins2mmseg.py b/tools/model_converters/twins2mmseg.py new file mode 100644 index 0000000000..647d41784a --- /dev/null +++ b/tools/model_converters/twins2mmseg.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_twins(args, ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + continue + elif k.startswith('patch_embeds'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + # Union + if 'attn.q.' in k: + new_k = k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]], + dim=0) + elif 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + # Only pcpvt + elif args.model == 'pcpvt': + if 'attn.proj.' in k: + new_k = k.replace('proj.', 'attn.out_proj.') + else: + new_k = k + + # Only svt + else: + if 'attn.proj.' in k: + k_lst = k.split('.') + if int(k_lst[2]) % 2 == 1: + new_k = k.replace('proj.', 'attn.out_proj.') + else: + new_k = k + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + elif k.startswith('pos_block'): + new_k = k.replace('pos_block', 'position_encodings') + if 'proj.0.' in new_k: + new_k = new_k.replace('proj.0.', 'proj.') + else: + new_k = k + if 'attn.kv.' not in k: + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + parser.add_argument('model', help='model: pcpvt or svt') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_twins(args, state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/vit2mmseg.py b/tools/model_converters/vit2mmseg.py new file mode 100644 index 0000000000..1d1f8a427e --- /dev/null +++ b/tools/model_converters/vit2mmseg.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_vit(ckpt): + + new_ckpt = OrderedDict() + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + if k.startswith('norm'): + new_k = k.replace('norm.', 'ln1.') + elif k.startswith('patch_embed'): + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + elif k.startswith('blocks'): + if 'norm' in k: + new_k = k.replace('norm', 'ln') + elif 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + elif 'attn.qkv' in k: + new_k = k.replace('attn.qkv.', 'attn.attn.in_proj_') + elif 'attn.proj' in k: + new_k = k.replace('attn.proj', 'attn.attn.out_proj') + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + else: + new_k = k + new_ckpt[new_k] = v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + # deit checkpoint + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_vit(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/vitjax2mmseg.py b/tools/model_converters/vitjax2mmseg.py new file mode 100644 index 0000000000..81bc2ea020 --- /dev/null +++ b/tools/model_converters/vitjax2mmseg.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmengine +import numpy as np +import torch + + +def vit_jax_to_torch(jax_weights, num_layer=12): + torch_weights = dict() + + # patch embedding + conv_filters = jax_weights['embedding/kernel'] + conv_filters = conv_filters.permute(3, 2, 0, 1) + torch_weights['patch_embed.projection.weight'] = conv_filters + torch_weights['patch_embed.projection.bias'] = jax_weights[ + 'embedding/bias'] + + # pos embedding + torch_weights['pos_embed'] = jax_weights[ + 'Transformer/posembed_input/pos_embedding'] + + # cls token + torch_weights['cls_token'] = jax_weights['cls'] + + # head + torch_weights['ln1.weight'] = jax_weights['Transformer/encoder_norm/scale'] + torch_weights['ln1.bias'] = jax_weights['Transformer/encoder_norm/bias'] + + # transformer blocks + for i in range(num_layer): + jax_block = f'Transformer/encoderblock_{i}' + torch_block = f'layers.{i}' + + # attention norm + torch_weights[f'{torch_block}.ln1.weight'] = jax_weights[ + f'{jax_block}/LayerNorm_0/scale'] + torch_weights[f'{torch_block}.ln1.bias'] = jax_weights[ + f'{jax_block}/LayerNorm_0/bias'] + + # attention + query_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/query/kernel'] + query_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/query/bias'] + key_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/key/kernel'] + key_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/key/bias'] + value_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/value/kernel'] + value_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/value/bias'] + + qkv_weight = torch.from_numpy( + np.stack((query_weight, key_weight, value_weight), 1)) + qkv_weight = torch.flatten(qkv_weight, start_dim=1) + qkv_bias = torch.from_numpy( + np.stack((query_bias, key_bias, value_bias), 0)) + qkv_bias = torch.flatten(qkv_bias, start_dim=0) + + torch_weights[f'{torch_block}.attn.attn.in_proj_weight'] = qkv_weight + torch_weights[f'{torch_block}.attn.attn.in_proj_bias'] = qkv_bias + to_out_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/out/kernel'] + to_out_weight = torch.flatten(to_out_weight, start_dim=0, end_dim=1) + torch_weights[ + f'{torch_block}.attn.attn.out_proj.weight'] = to_out_weight + torch_weights[f'{torch_block}.attn.attn.out_proj.bias'] = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/out/bias'] + + # mlp norm + torch_weights[f'{torch_block}.ln2.weight'] = jax_weights[ + f'{jax_block}/LayerNorm_2/scale'] + torch_weights[f'{torch_block}.ln2.bias'] = jax_weights[ + f'{jax_block}/LayerNorm_2/bias'] + + # mlp + torch_weights[f'{torch_block}.ffn.layers.0.0.weight'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_0/kernel'] + torch_weights[f'{torch_block}.ffn.layers.0.0.bias'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_0/bias'] + torch_weights[f'{torch_block}.ffn.layers.1.weight'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_1/kernel'] + torch_weights[f'{torch_block}.ffn.layers.1.bias'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_1/bias'] + + # transpose weights + for k, v in torch_weights.items(): + if 'weight' in k and 'patch_embed' not in k and 'ln' not in k: + v = v.permute(1, 0) + torch_weights[k] = v + + return torch_weights + + +def main(): + # stole refactoring code from Robin Strudel, thanks + parser = argparse.ArgumentParser( + description='Convert keys from jax official pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + jax_weights = np.load(args.src) + jax_weights_tensor = {} + for key in jax_weights.files: + value = torch.from_numpy(jax_weights[key]) + jax_weights_tensor[key] = value + if 'L_16-i21k' in args.src: + num_layer = 24 + else: + num_layer = 12 + torch_weights = vit_jax_to_torch(jax_weights_tensor, num_layer) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(torch_weights, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/print_config.py b/tools/print_config.py deleted file mode 100644 index 2a0c67780a..0000000000 --- a/tools/print_config.py +++ /dev/null @@ -1,28 +0,0 @@ -import argparse - -from mmcv import Config, DictAction - - -def parse_args(): - parser = argparse.ArgumentParser(description='Print the whole config') - parser.add_argument('config', help='config file path') - parser.add_argument( - '--options', nargs='+', action=DictAction, help='arguments in dict') - args = parser.parse_args() - - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - if args.options is not None: - cfg.merge_from_dict(args.options) - print(f'Config:\n{cfg.pretty_text}') - # dump config - cfg.dump('example.py') - - -if __name__ == '__main__': - main() diff --git a/tools/test.py b/tools/test.py index 3910f1f0bb..ea1917d182 100644 --- a/tools/test.py +++ b/tools/test.py @@ -1,55 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import os +import os.path as osp -import mmcv -import torch -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import get_dist_info, init_dist, load_checkpoint -from mmcv.utils import DictAction +from mmengine.config import Config, DictAction +from mmengine.runner import Runner -from mmseg.apis import multi_gpu_test, single_gpu_test -from mmseg.datasets import build_dataloader, build_dataset -from mmseg.models import build_segmentor +from mmseg.utils import register_all_modules +# TODO: support fuse_conv_bn, visualization, and format_only def parse_args(): parser = argparse.ArgumentParser( - description='mmseg test (and eval) a model') - parser.add_argument('config', help='test config file path') + description='MMSeg test (and eval) a model') + parser.add_argument('config', help='train config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( - '--aug-test', action='store_true', help='Use Flip and Multi scale aug') - parser.add_argument('--out', help='output result file in pickle format') + '--work-dir', + help=('if specified, the evaluation metric results will be dumped' + 'into the directory as json')) parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') + '--show', action='store_true', help='show prediction results') parser.add_argument( - '--eval', - type=str, - nargs='+', - help='evaluation metrics, which depends on the dataset, e.g., "mIoU"' - ' for generic datasets, and "cityscapes" for Cityscapes') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where painted images will be saved') - parser.add_argument( - '--gpu-collect', - action='store_true', - help='whether to use gpu to collect results.') + '--show-dir', + help='directory where painted images will be saved. ' + 'If specified, it will be automatically saved ' + 'to the work_dir/timestamp/show_dir') parser.add_argument( - '--tmpdir', - help='tmp directory used for collecting results from multiple ' - 'workers, available when gpu_collect is not specified') + '--wait-time', type=float, default=2, help='the interval of show (s)') parser.add_argument( - '--options', nargs='+', action=DictAction, help='custom options') - parser.add_argument( - '--eval-options', + '--cfg-options', nargs='+', action=DictAction, - help='custom options for evaluation') + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], @@ -59,83 +47,63 @@ def parse_args(): args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) + return args +def trigger_visualization_hook(cfg, args): + default_hooks = cfg.default_hooks + if 'visualization' in default_hooks: + visualization_hook = default_hooks['visualization'] + # Turn on visualization + visualization_hook['draw'] = True + if args.show: + visualization_hook['show'] = True + visualization_hook['wait_time'] = args.wait_time + if args.show_dir: + visulizer = cfg.visualizer + visulizer['save_dir'] = args.show_dir + else: + raise RuntimeError( + 'VisualizationHook must be included in default_hooks.' + 'refer to usage ' + '"visualization=dict(type=\'VisualizationHook\')"') + + return cfg + + def main(): args = parse_args() - assert args.out or args.eval or args.format_only or args.show \ - or args.show_dir, \ - ('Please specify at least one operation (save/eval/format/show the ' - 'results / save the results) with the argument "--out", "--eval"' - ', "--format-only", "--show" or "--show-dir"') - - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') - - cfg = mmcv.Config.fromfile(args.config) - if args.options is not None: - cfg.merge_from_dict(args.options) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - if args.aug_test: - # hard code index - cfg.data.test.pipeline[1].img_ratios = [ - 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 - ] - cfg.data.test.pipeline[1].flip = True - cfg.model.pretrained = None - cfg.data.test.test_mode = True - - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - - # build the dataloader - # TODO: support multiple images per gpu (only minor changes are needed) - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=1, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False) - - # build the model and load checkpoint - model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) - checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') - model.CLASSES = checkpoint['meta']['CLASSES'] - model.PALETTE = checkpoint['meta']['PALETTE'] - - if not distributed: - model = MMDataParallel(model, device_ids=[0]) - outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) - else: - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - outputs = multi_gpu_test(model, data_loader, args.tmpdir, - args.gpu_collect) - - rank, _ = get_dist_info() - if rank == 0: - if args.out: - print(f'\nwriting results to {args.out}') - mmcv.dump(outputs, args.out) - kwargs = {} if args.eval_options is None else args.eval_options - if args.format_only: - dataset.format_results(outputs, **kwargs) - if args.eval: - dataset.evaluate(outputs, args.eval, **kwargs) + # register all modules in mmseg into the registries + # do not init the default scope here because it will be init in the runner + register_all_modules(init_default_scope=False) + + # load config + cfg = Config.fromfile(args.config) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + if args.show or args.show_dir: + cfg = trigger_visualization_hook(cfg, args) + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start testing + runner.test() if __name__ == '__main__': diff --git a/tools/torchserve/mmseg2torchserve.py b/tools/torchserve/mmseg2torchserve.py new file mode 100644 index 0000000000..23f99638e7 --- /dev/null +++ b/tools/torchserve/mmseg2torchserve.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +from mmengine import Config +from mmengine.utils import mkdir_or_exist + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmseg2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts mmsegmentation model (config + checkpoint) to TorchServe + `.mar`. + + Args: + config_file: + In MMSegmentation config format. + The contents vary for each task repository. + checkpoint_file: + In MMSegmentation checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mkdir_or_exist(output_folder) + + config = Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmseg_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert mmseg models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmseg2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/torchserve/mmseg_handler.py b/tools/torchserve/mmseg_handler.py new file mode 100644 index 0000000000..dbe5ded848 --- /dev/null +++ b/tools/torchserve/mmseg_handler.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import cv2 +import mmcv +import torch +from mmengine.model.utils import revert_sync_batchnorm +from ts.torch_handler.base_handler import BaseHandler + +from mmseg.apis import inference_model, init_model + + +class MMsegHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_model(self.config_file, checkpoint, self.device) + self.model = revert_sync_batchnorm(self.model) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = [inference_model(self.model, img) for img in data] + return results + + def postprocess(self, data): + output = [] + + for image_result in data: + _, buffer = cv2.imencode('.png', image_result[0].astype('uint8')) + content = buffer.tobytes() + output.append(content) + return output diff --git a/tools/torchserve/test_torchserve.py b/tools/torchserve/test_torchserve.py new file mode 100644 index 0000000000..b015b66585 --- /dev/null +++ b/tools/torchserve/test_torchserve.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser +from io import BytesIO + +import matplotlib.pyplot as plt +import mmcv +import requests + +from mmseg.apis import inference_model, init_model + + +def parse_args(): + parser = ArgumentParser( + description='Compare result of torchserve and pytorch,' + 'and visualize them.') + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--result-image', + type=str, + default=None, + help='save server output in result-image') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + + args = parser.parse_args() + return args + + +def main(args): + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + tmp_res = requests.post(url, image) + content = tmp_res.content + if args.result_image: + with open(args.result_image, 'wb') as out_image: + out_image.write(content) + plt.imshow(mmcv.imread(args.result_image, 'grayscale')) + plt.show() + else: + plt.imshow(plt.imread(BytesIO(content))) + plt.show() + model = init_model(args.config, args.checkpoint, args.device) + image = mmcv.imread(args.img) + result = inference_model(model, image) + plt.imshow(result[0]) + plt.show() + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/tools/train.py b/tools/train.py index 26e8274b1f..172815a9fe 100644 --- a/tools/train.py +++ b/tools/train.py @@ -1,50 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse -import copy +import logging import os import os.path as osp -import time -import mmcv -import torch -from mmcv.runner import init_dist -from mmcv.utils import Config, DictAction +from mmengine.config import Config, DictAction +from mmengine.logging import print_log +from mmengine.registry import RUNNERS +from mmengine.runner import Runner -from mmseg import __version__ -from mmseg.apis import set_random_seed, train_segmentor -from mmseg.datasets import build_dataset -from mmseg.models import build_segmentor -from mmseg.utils import collect_env, get_root_logger +from mmseg.utils import register_all_modules def parse_args(): parser = argparse.ArgumentParser(description='Train a segmentor') parser.add_argument('config', help='train config file path') - parser.add_argument('--work_dir', help='the dir to save logs and models') + parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( - '--resume-from', help='the checkpoint file to resume from') - parser.add_argument( - '--no-validate', + '--resume', action='store_true', - help='whether not to evaluate the checkpoint during training') - group_gpus = parser.add_mutually_exclusive_group() - group_gpus.add_argument( - '--gpus', - type=int, - help='number of gpus to use ' - '(only applicable to non-distributed training)') - group_gpus.add_argument( - '--gpu-ids', - type=int, - nargs='+', - help='ids of gpus to use ' - '(only applicable to non-distributed training)') - parser.add_argument('--seed', type=int, default=None, help='random seed') + default=False, + help='resume from the latest checkpoint in the work_dir automatically') parser.add_argument( - '--deterministic', + '--amp', action='store_true', - help='whether to set deterministic options for CUDNN backend.') + default=False, + help='enable automatic-mixed-precision training') parser.add_argument( - '--options', nargs='+', action=DictAction, help='custom options') + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], @@ -61,12 +52,15 @@ def parse_args(): def main(): args = parse_args() + # register all modules in mmseg into the registries + # do not init the default scope here because it will be init in the runner + register_all_modules(init_default_scope=False) + + # load config cfg = Config.fromfile(args.config) - if args.options is not None: - cfg.merge_from_dict(args.options) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: @@ -76,81 +70,36 @@ def main(): # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) - if args.resume_from is not None: - cfg.resume_from = args.resume_from - if args.gpu_ids is not None: - cfg.gpu_ids = args.gpu_ids - else: - cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.type + if optim_wrapper == 'AmpOptimWrapper': + print_log( + 'AMP training is already enabled in your config.', + logger='current', + level=logging.WARNING) + else: + assert optim_wrapper == 'OptimWrapper', ( + '`--amp` is only supported when the optimizer wrapper type is ' + f'`OptimWrapper` but got {optim_wrapper}.') + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.loss_scale = 'dynamic' + + # resume training + cfg.resume = args.resume + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - - # create work_dir - mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) - # dump config - cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) - # init the logger before other steps - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - log_file = osp.join(cfg.work_dir, f'{timestamp}.log') - logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) - - # init the meta dict to record some important information such as - # environment info and seed, which will be logged - meta = dict() - # log env info - env_info_dict = collect_env() - env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) - dash_line = '-' * 60 + '\n' - logger.info('Environment info:\n' + dash_line + env_info + '\n' + - dash_line) - meta['env_info'] = env_info - - # log some basic info - logger.info(f'Distributed training: {distributed}') - logger.info(f'Config:\n{cfg.pretty_text}') - - # set random seeds - if args.seed is not None: - logger.info(f'Set random seed to {args.seed}, deterministic: ' - f'{args.deterministic}') - set_random_seed(args.seed, deterministic=args.deterministic) - cfg.seed = args.seed - meta['seed'] = args.seed - meta['exp_name'] = osp.basename(args.config) - - model = build_segmentor( - cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) - - logger.info(model) - - datasets = [build_dataset(cfg.data.train)] - if len(cfg.workflow) == 2: - val_dataset = copy.deepcopy(cfg.data.val) - val_dataset.pipeline = cfg.data.train.pipeline - datasets.append(build_dataset(val_dataset)) - if cfg.checkpoint_config is not None: - # save mmseg version, config file content and class names in - # checkpoints as meta data - cfg.checkpoint_config.meta = dict( - mmseg_version=__version__, - config=cfg.pretty_text, - CLASSES=datasets[0].CLASSES, - PALETTE=datasets[0].PALETTE) - # add an attribute for visualization convenience - model.CLASSES = datasets[0].CLASSES - train_segmentor( - model, - datasets, - cfg, - distributed=distributed, - validate=(not args.no_validate), - timestamp=timestamp, - meta=meta) + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start training + runner.train() if __name__ == '__main__':