diff --git a/configs/_base_/datasets/decompression_test_config.py b/configs/_base_/datasets/decompression_test_config.py
new file mode 100644
index 0000000000..0a52247a2e
--- /dev/null
+++ b/configs/_base_/datasets/decompression_test_config.py
@@ -0,0 +1,66 @@
+quality = 10
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[quality, quality], color_type='color'),
+ bgr2rgb=True,
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+classic5_data_root = 'data/Classic5'
+classic5_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type='BasicImageDataset',
+ metainfo=dict(dataset_type='classic5', task_name='CAR'),
+ data_root=classic5_data_root,
+ data_prefix=dict(img='', gt=''),
+ pipeline=test_pipeline))
+classic5_evaluator = [
+ dict(type='PSNR', prefix='Classic5'),
+ dict(type='SSIM', prefix='Classic5'),
+]
+
+live1_data_root = 'data/LIVE1'
+live1_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type='BasicImageDataset',
+ metainfo=dict(dataset_type='live1', task_name='CAR'),
+ data_root=live1_data_root,
+ data_prefix=dict(img='', gt=''),
+ pipeline=test_pipeline))
+live1_evaluator = [
+ dict(type='PSNR', prefix='LIVE1'),
+ dict(type='SSIM', prefix='LIVE1'),
+]
+
+# test config
+test_cfg = dict(type='MultiTestLoop')
+test_dataloader = [
+ classic5_dataloader,
+ live1_dataloader,
+]
+test_evaluator = [
+ classic5_evaluator,
+ live1_evaluator,
+]
diff --git a/configs/_base_/datasets/denoising-gaussian_color_test_config.py b/configs/_base_/datasets/denoising-gaussian_color_test_config.py
index c06f0a4c3d..7950bebf35 100644
--- a/configs/_base_/datasets/denoising-gaussian_color_test_config.py
+++ b/configs/_base_/datasets/denoising-gaussian_color_test_config.py
@@ -17,7 +17,7 @@
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
- gaussian_sigma=[sigma * 255, sigma * 255],
+ gaussian_sigma=[sigma, sigma],
gaussian_gray_noise_prob=0),
keys=['img']),
dict(type='PackEditInputs')
diff --git a/configs/_base_/datasets/denoising-gaussian_gray_test_config.py b/configs/_base_/datasets/denoising-gaussian_gray_test_config.py
index 326aed8a3b..774869fd07 100644
--- a/configs/_base_/datasets/denoising-gaussian_gray_test_config.py
+++ b/configs/_base_/datasets/denoising-gaussian_gray_test_config.py
@@ -19,7 +19,7 @@
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
- gaussian_sigma=[sigma * 255, sigma * 255],
+ gaussian_sigma=[sigma, sigma],
gaussian_gray_noise_prob=1),
keys=['img']),
dict(type='PackEditInputs')
diff --git a/configs/swinir/README.md b/configs/swinir/README.md
new file mode 100644
index 0000000000..2ada8e12e6
--- /dev/null
+++ b/configs/swinir/README.md
@@ -0,0 +1,512 @@
+# SwinIR (ICCVW'2021)
+
+> [SwinIR: Image Restoration Using Swin Transformer](https://arxiv.org/abs/2108.10257)
+
+> **Task**: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+
+
+
+## Abstract
+
+
+
+Image restoration is a long-standing low-level vision problem that aims to restore high-quality images from low-quality images (e.g., downscaled, noisy and compressed images). While state-of-the-art image restoration methods are based on convolutional neural networks, few attempts have been made with Transformers which show impressive performance on high-level vision tasks. In this paper, we propose a strong baseline model SwinIR for image restoration based on the Swin Transformer. SwinIR consists of three parts: shallow feature extraction, deep feature extraction and high-quality image reconstruction. In particular, the deep feature extraction module is composed of several residual Swin Transformer blocks (RSTB), each of which has several Swin Transformer layers together with a residual connection. We conduct experiments on three representative tasks: image super-resolution (including classical, lightweight and real-world image super-resolution), image denoising (including grayscale and color image denoising) and JPEG compression artifact reduction. Experimental results demonstrate that SwinIR outperforms state-of-the-art methods on different tasks by up to 0.14~0.45dB, while the total number of parameters can be reduced by up to 67%.
+
+
+
+
+
![](https://user-images.githubusercontent.com/40970489/204525707-272fb8c6-1bb3-41f2-9a9b-612c48ddd9b4.png)
+
+
+## Results and models
+
+### **Classical Image Super-Resolution**
+
+Evaluated on Y channels, `scale` pixels in each border are cropped before evaluation.
+The metrics are `PSNR / SSIM` .
+
+| Method | Set5 PSNR | Set14 PSNR | DIV2K PSNR | Set5 SSIM | Set14 SSIM | DIV2K SSIM | GPU Info | Download |
+| :----------------------------------------------------------------: | :-------: | :--------: | :--------: | :-------: | :--------: | :--------: | :------: | :------------------------------------------------------------------: |
+| [swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 38.3240 | 34.1174 | 37.8921 | 0.9626 | 0.9230 | 0.9481 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth) \| log |
+| [swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 34.8640 | 30.7669 | 34.1397 | 0.9317 | 0.8508 | 0.8917 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth) \| log |
+| [swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 32.7315 | 28.9065 | 32.0953 | 0.9029 | 0.7915 | 0.8418 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth) \| log |
+| [swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 38.3971 | 34.4149 | 37.9473 | 0.9629 | 0.9252 | 0.9488 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth) \| log |
+| [swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 34.9335 | 30.9258 | 34.2830 | 0.9323 | 0.8540 | 0.8939 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth) \| log |
+| [swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 32.9214 | 29.0792 | 32.3021 | 0.9053 | 0.7953 | 0.8451 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth) \| log |
+
+### **Lightweight Image Super-Resolution**
+
+Evaluated on Y channels, `scale` pixels in each border are cropped before evaluation.
+The metrics are `PSNR / SSIM` .
+
+| Method | Set5 PSNR | Set14 PSNR | DIV2K PSNR | Set5 SSIM | Set14 SSIM | DIV2K SSIM | GPU Info | Download |
+| :----------------------------------------------------------------: | :-------: | :--------: | :--------: | :-------: | :--------: | :--------: | :------: | :------------------------------------------------------------------: |
+| [swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 38.1289 | 33.8404 | 37.5844 | 0.9617 | 0.9207 | 0.9459 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth) \| log |
+| [swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 34.6037 | 30.5340 | 33.8394 | 0.9293 | 0.8468 | 0.8867 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth) \| log |
+| [swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 32.4343 | 28.7441 | 31.8636 | 0.8984 | 0.7861 | 0.8353 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth) \| log |
+
+### **Real-World Image Super-Resolution**
+
+Evaluated on Y channels.
+The metrics are `NIQE` .
+
+| Method | RealSRSet+5images NIQE | GPU Info | Download |
+| :----------------------------------------------------------------------------------: | :--------------------: | :------: | :------------------------------------------------------------------------------------: |
+| [swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 5.7975 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth) \| log |
+| [swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 7.2738 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth) \| log |
+| [swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 5.2329 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth) \| log |
+| [swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 7.7460 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth) \| log |
+| [swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py) | 5.1464 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/) \| log |
+| [swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py) | 7.6378 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth) \| log |
+
+### **Grayscale Image Deoising**
+
+Evaluated on grayscale images.
+The metrics are `PSNR` .
+
+| Method | Set12 PSNR | BSD68 PSNR | Urban100 PSNR | GPU Info | Download |
+| :----------------------------------------------------------------------------: | :--------: | :--------: | :-----------: | :------: | :-------------------------------------------------------------------------------: |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py) | 33.9731 | 32.5203 | 34.3424 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py) | 31.6434 | 30.1377 | 31.9493 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py) | 28.5651 | 27.3157 | 28.6626 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth) \| log |
+
+### **Color Image Deoising**
+
+Evaluated on RGB channels.
+The metrics are `PSNR` .
+
+| Method | CBSD68 PSNR | Kodak24 PSNR | McMaster PSNR | Urban100 PSNR | GPU Info | Download |
+| :--------------------------------------------------------------------: | :---------: | :----------: | :-----------: | :-----------: | :------: | :-----------------------------------------------------------------------: |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py) | 34.4136 | 35.3555 | 35.6205 | 35.1836 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py) | 31.7626 | 32.9003 | 33.3198 | 32.9458 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py) | 28.5346 | 29.8058 | 30.2027 | 29.8832 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth) \| log |
+
+### **JPEG Compression Artifact Reduction (grayscale)**
+
+Evaluated on grayscale images.
+The metrics are `PSNR / SSIM` .
+
+| Method | Classic5 PSNR | Classic5 SSIM | LIVE1 PSNR | LIVE1 SSIM | GPU Info | Download |
+| :----------------------------------------------------------------------: | :-----------: | :-----------: | :--------: | :--------: | :------: | :------------------------------------------------------------------------: |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py) | 30.2746 | 0.8254 | 29.8611 | 0.8292 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py) | 32.5331 | 0.8753 | 32.2667 | 0.8914 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py) | 33.7504 | 0.8966 | 33.7001 | 0.9179 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py) | 34.5377 | 0.9087 | 34.6846 | 0.9322 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth) \| log |
+
+### **JPEG Compression Artifact Reduction (color)**
+
+Evaluated on RGB channels.
+The metrics are `PSNR / SSIM` .
+
+| Method | Classic5 PSNR | Classic5 SSIM | LIVE1 PSNR | LIVE1 SSIM | GPU Info | Download |
+| :----------------------------------------------------------------------: | :-----------: | :-----------: | :--------: | :--------: | :------: | :------------------------------------------------------------------------: |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py) | 30.1019 | 0.8217 | 28.0676 | 0.8094 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py) | 32.3489 | 0.8727 | 30.4514 | 0.8745 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py) | 33.6028 | 0.8949 | 31.8235 | 0.9023 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py) | 34.4344 | 0.9076 | 32.7610 | 0.9179 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth) \| log |
+
+## Quick Start
+
+**Train**
+
+
+Train Instructions
+
+You can use the following commands to train a model with cpu or single/multiple GPUs.
+
+```shell
+# cpu train
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+
+# 002 Lightweight Image Super-Resolution (small size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+
+# 003 Real-World Image Super-Resolution
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+
+# 004 Grayscale Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
+
+# 005 Color Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
+
+# color
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
+
+
+# single-gpu train
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+python tools/train.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+python tools/train.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+python tools/train.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+python tools/train.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+
+# 002 Lightweight Image Super-Resolution (small size)
+python tools/train.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+
+# 003 Real-World Image Super-Resolution
+python tools/train.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+
+# 004 Grayscale Image Deoising (middle size)
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
+
+# 005 Color Image Deoising (middle size)
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
+
+# color
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
+
+
+# multi-gpu train
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+./tools/dist_train.sh configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+./tools/dist_train.sh configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+
+# 002 Lightweight Image Super-Resolution (small size)
+./tools/dist_train.sh configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+
+# 003 Real-World Image Super-Resolution
+./tools/dist_train.sh configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py 8
+
+# 004 Grayscale Image Deoising (middle size)
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py 8
+
+# 005 Color Image Deoising (middle size)
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py 8
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py 8
+
+# color
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py 8
+```
+
+For more details, you can refer to **Train a model** part in [train_test.md](/docs/en/user_guides/train_test.md#Train-a-model-in-MMEditing).
+
+
+
+**Test**
+
+
+Test Instructions
+
+You can use the following commands to test a model with cpu or single/multiple GPUs.
+
+```shell
+# cpu test
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+
+# 002 Lightweight Image Super-Resolution (small size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+# 003 Real-World Image Super-Resolution
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+# 004 Grayscale Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+# 005 Color Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding usesx8 blocks)
+# grayscale
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+
+# color
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth
+
+
+
+# single-gpu test
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+python tools/test.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+python tools/test.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+python tools/test.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+python tools/test.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+python tools/test.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+python tools/test.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+
+# 002 Lightweight Image Super-Resolution (small size)
+python tools/test.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+python tools/test.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+python tools/test.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+
+# 003 Real-World Image Super-Resolution
+python tools/test.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+python tools/test.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+python tools/test.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+
+# 004 Grayscale Image Deoising (middle size)
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+
+# 005 Color Image Deoising (middle size)
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding usesx8 blocks)
+# grayscale
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+
+# color
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth
+
+
+
+# multi-gpu test
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+./tools/dist_test.sh configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+./tools/dist_test.sh configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+# 002 Lightweight Image Super-Resolution (small size)
+./tools/dist_test.sh configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+# 003 Real-World Image Super-Resolution
+./tools/dist_test.sh configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+./tools/dist_test.sh configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+./tools/dist_test.sh configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+# 004 Grayscale Image Deoising (middle size)
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+# 005 Color Image Deoising (middle size)
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+# color
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/
+
+```
+
+For more details, you can refer to **Test a pre-trained model** part in [train_test.md](/docs/en/user_guides/train_test.md#Test-a-pre-trained-model-in-MMEditing).
+
+
+
+## Citation
+
+```bibtex
+@inproceedings{liang2021swinir,
+ title={Swinir: Image restoration using swin transformer},
+ author={Liang, Jingyun and Cao, Jiezhang and Sun, Guolei and Zhang, Kai and Van Gool, Luc and Timofte, Radu},
+ booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
+ pages={1833--1844},
+ year={2021}
+}
+```
diff --git a/configs/swinir/README_zh-CN.md b/configs/swinir/README_zh-CN.md
new file mode 100644
index 0000000000..4fccfcbaac
--- /dev/null
+++ b/configs/swinir/README_zh-CN.md
@@ -0,0 +1,503 @@
+# SwinIR (ICCVW'2021)
+
+> **任务**: 图像超分辨率, 图像去噪, JPEG压缩伪影移除
+
+
+
+
+SwinIR (ICCVW'2021)
+
+```bibtex
+@inproceedings{liang2021swinir,
+ title={Swinir: Image restoration using swin transformer},
+ author={Liang, Jingyun and Cao, Jiezhang and Sun, Guolei and Zhang, Kai and Van Gool, Luc and Timofte, Radu},
+ booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
+ pages={1833--1844},
+ year={2021}
+}
+```
+
+
+
+
+
+### **Classical Image Super-Resolution**
+
+在 Y 通道上进行评估,在评估之前裁剪每个边界中的 `scale` 像素。
+我们使用 `PSNR` 和 `SSIM` 作为指标。
+
+| 算法 | Set5 PSNR | Set14 PSNR | DIV2K PSNR | Set5 SSIM | Set14 SSIM | DIV2K SSIM | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------: | :-------: | :--------: | :--------: | :-------: | :--------: | :--------: | :------: | :-----------------------------------------------------------------: |
+| [swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 38.3240 | 34.1174 | 37.8921 | 0.9626 | 0.9230 | 0.9481 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth) \| log |
+| [swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 34.8640 | 30.7669 | 34.1397 | 0.9317 | 0.8508 | 0.8917 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth) \| log |
+| [swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py) | 32.7315 | 28.9065 | 32.0953 | 0.9029 | 0.7915 | 0.8418 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth) \| log |
+| [swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 38.3971 | 34.4149 | 37.9473 | 0.9629 | 0.9252 | 0.9488 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth) \| log |
+| [swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 34.9335 | 30.9258 | 34.2830 | 0.9323 | 0.8540 | 0.8939 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth) \| log |
+| [swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k](/configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py) | 32.9214 | 29.0792 | 32.3021 | 0.9053 | 0.7953 | 0.8451 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth) \| log |
+
+### **Lightweight Image Super-Resolution**
+
+在 Y 通道上进行评估,在评估之前裁剪每个边界中的 `scale` 像素。
+我们使用 `PSNR` 和 `SSIM` 作为指标。
+
+| 算法 | Set5 PSNR | Set14 PSNR | DIV2K PSNR | Set5 SSIM | Set14 SSIM | DIV2K SSIM | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------: | :-------: | :--------: | :--------: | :-------: | :--------: | :--------: | :------: | :-----------------------------------------------------------------: |
+| [swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 38.1289 | 33.8404 | 37.5844 | 0.9617 | 0.9207 | 0.9459 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth) \| log |
+| [swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 34.6037 | 30.5340 | 33.8394 | 0.9293 | 0.8468 | 0.8867 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth) \| log |
+| [swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k](/configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py) | 32.4343 | 28.7441 | 31.8636 | 0.8984 | 0.7861 | 0.8353 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth) \| log |
+
+### **Real-World Image Super-Resolution**
+
+在 Y 通道上进行评估。
+我们使用 NIQE 作为指标。
+
+| 算法 | RealSRSet+5images NIQE | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------------------------: | :--------------------: | :------: | :-----------------------------------------------------------------------------------: |
+| [swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 5.7975 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth) \| log |
+| [swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 7.2738 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth) \| log |
+| [swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 5.2329 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth) \| log |
+| [swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py) | 7.7460 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth) \| log |
+| [swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py) | 5.1464 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/) \| log |
+| [swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost](/configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py) | 7.6378 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth) \| log |
+
+### **Grayscale Image Deoising**
+
+在灰度图上进行评估。
+我们使用 PSNR 作为指标。
+
+| 算法 | Set12 PSNR | BSD68 PSNR | Urban100 PSNR | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------------------: | :--------: | :--------: | :-----------: | :------: | :------------------------------------------------------------------------------: |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py) | 33.9731 | 32.5203 | 34.3424 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py) | 31.6434 | 30.1377 | 31.9493 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py) | 28.5651 | 27.3157 | 28.6626 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth) \| log |
+
+### **Color Image Deoising**
+
+在 RGB 通道上进行评估。
+我们使用 PSNR 作为指标。
+
+| 算法 | CBSD68 PSNR | Kodak24 PSNR | McMaster PSNR | Urban100 PSNR | GPU 信息 | 下载 |
+| :---------------------------------------------------------------------: | :---------: | :----------: | :-----------: | :-----------: | :------: | :----------------------------------------------------------------------: |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py) | 34.4136 | 35.3555 | 35.6205 | 35.1836 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py) | 31.7626 | 32.9003 | 33.3198 | 32.9458 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth) \| log |
+| [swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50](/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py) | 28.5346 | 29.8058 | 30.2027 | 29.8832 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth) \| log |
+
+### **JPEG Compression Artifact Reduction (grayscale)**
+
+在灰度图上进行评估。
+我们使用 PSNR 和 SSIM 作为指标。
+
+| 算法 | Classic5 PSNR | Classic5 SSIM | LIVE1 PSNR | LIVE1 SSIM | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------------: | :-----------: | :-----------: | :--------: | :--------: | :------: | :-----------------------------------------------------------------------: |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py) | 30.2746 | 0.8254 | 29.8611 | 0.8292 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py) | 32.5331 | 0.8753 | 32.2667 | 0.8914 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py) | 33.7504 | 0.8966 | 33.7001 | 0.9179 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py) | 34.5377 | 0.9087 | 34.6846 | 0.9322 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth) \| log |
+
+### **JPEG Compression Artifact Reduction (color)**
+
+在 RGB 通道上进行评估。
+我们使用 PSNR 和 SSIM 作为指标。
+
+| 算法 | Classic5 PSNR | Classic5 SSIM | LIVE1 PSNR | LIVE1 SSIM | GPU 信息 | 下载 |
+| :-----------------------------------------------------------------------: | :-----------: | :-----------: | :--------: | :--------: | :------: | :-----------------------------------------------------------------------: |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py) | 30.1019 | 0.8217 | 28.0676 | 0.8094 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py) | 32.3489 | 0.8727 | 30.3489 | 0.8745 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py) | 33.6028 | 0.8949 | 31.8235 | 0.9023 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth) \| log |
+| [swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40](/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py) | 34.4344 | 0.9076 | 32.7610 | 0.9179 | 8 | [model](https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth) \| log |
+
+## 快速开始
+
+**训练**
+
+
+训练说明
+
+您可以使用以下命令来训练模型。
+
+```shell
+# CPU上训练
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+
+# 002 Lightweight Image Super-Resolution (small size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+
+# 003 Real-World Image Super-Resolution
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+
+# 004 Grayscale Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
+
+# 005 Color Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
+
+# color
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
+CUDA_VISIBLE_DEVICES=-1 python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
+
+
+
+# 单个GPU上训练
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+python tools/train.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+python tools/train.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+python tools/train.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+python tools/train.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+
+# 002 Lightweight Image Super-Resolution (small size)
+python tools/train.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+python tools/train.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+
+# 003 Real-World Image Super-Resolution
+python tools/train.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+python tools/train.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+
+# 004 Grayscale Image Deoising (middle size)
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
+
+# 005 Color Image Deoising (middle size)
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
+python tools/train.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
+
+# color
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
+python tools/train.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
+
+
+
+# 多个GPU上训练
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+./tools/dist_train.sh configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py 8
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+./tools/dist_train.sh configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py 8
+
+# 002 Lightweight Image Super-Resolution (small size)
+./tools/dist_train.sh configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+./tools/dist_train.sh configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py 8
+
+# 003 Real-World Image Super-Resolution
+./tools/dist_train.sh configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py 8
+./tools/dist_train.sh configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py 8
+
+# 004 Grayscale Image Deoising (middle size)
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py 8
+
+# 005 Color Image Deoising (middle size)
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py 8
+./tools/dist_train.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py 8
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py 8
+
+# color
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py 8
+./tools/dist_train.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py 8
+```
+
+更多细节可以参考 [train_test.md](/docs/zh_cn/user_guides/train_test.md) 中的 **Train a model** 部分。
+
+
+
+**测试**
+
+
+测试说明
+
+您可以使用以下命令来测试模型。
+
+```shell
+# CPU上测试
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+
+# 002 Lightweight Image Super-Resolution (small size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+# 003 Real-World Image Super-Resolution
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+# 004 Grayscale Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+# 005 Color Image Deoising (middle size)
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding usesx8 blocks)
+# grayscale
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+
+# color
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+CUDA_VISIBLE_DEVICES=-1 python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth
+
+
+
+# 单个GPU上测试
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+python tools/test.py configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+python tools/test.py configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+python tools/test.py configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+python tools/test.py configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+python tools/test.py configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+python tools/test.py configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+
+# 002 Lightweight Image Super-Resolution (small size)
+python tools/test.py configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+python tools/test.py configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+python tools/test.py configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+
+# 003 Real-World Image Super-Resolution
+python tools/test.py configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+python tools/test.py configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+python tools/test.py configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+python tools/test.py configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+
+# 004 Grayscale Image Deoising (middle size)
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+
+# 005 Color Image Deoising (middle size)
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+python tools/test.py configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding usesx8 blocks)
+# grayscale
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+
+# color
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+python tools/test.py configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth
+
+
+
+# 多GPU测试
+# 001 Classical Image Super-Resolution (middle size)
+# (setting1: when model is trained on DIV2K and with training_patch_size=48)
+./tools/dist_test.sh configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+
+# (setting2: when model is trained on DIV2K+Flickr2K and with training_patch_size=64)
+./tools/dist_test.sh configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+
+# 002 Lightweight Image Super-Resolution (small size)
+./tools/dist_test.sh configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+
+./tools/dist_test.sh configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+
+# 003 Real-World Image Super-Resolution
+./tools/dist_test.sh configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+
+./tools/dist_test.sh configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+
+./tools/dist_test.sh configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+
+./tools/dist_test.sh configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-25f1722a.pth
+
+# 004 Grayscale Image Deoising (middle size)
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+
+# 005 Color Image Deoising (middle size)
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+
+# 006 JPEG Compression Artifact Reduction (middle size, using window_size=7 because JPEG encoding uses 8x8 blocks)
+# grayscale
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+
+# color
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+
+./tools/dist_test.sh configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py https://download.openmmlab.com/mmediting/swinir/
+
+```
+
+更多细节可以参考 [train_test.md](/docs/zh_cn/user_guides/train_test.md) 中的 **Test a pre-trained model** 部分。
+
+
diff --git a/configs/swinir/metafile.yml b/configs/swinir/metafile.yml
new file mode 100644
index 0000000000..f6a4a58617
--- /dev/null
+++ b/configs/swinir/metafile.yml
@@ -0,0 +1,446 @@
+Collections:
+- Metadata:
+ Architecture:
+ - SwinIR
+ Name: SwinIR
+ Paper:
+ - https://arxiv.org/abs/2108.10257
+ README: configs/swinir/README.md
+ Task:
+ - image super-resolution
+ - image denoising
+ - jpeg compression artifact reduction
+ Year: 2021
+Models:
+- Config: configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 37.8921
+ DIV2K SSIM: 0.9481
+ Set14 PSNR: 34.1174
+ Set14 SSIM: 0.923
+ Set5 PSNR: 38.324
+ Set5 SSIM: 0.9626
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k-ed2d419e.pth
+- Config: configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 34.1397
+ DIV2K SSIM: 0.8917
+ Set14 PSNR: 30.7669
+ Set14 SSIM: 0.8508
+ Set5 PSNR: 34.864
+ Set5 SSIM: 0.9317
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k-926950f1.pth
+- Config: configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 32.0953
+ DIV2K SSIM: 0.8418
+ Set14 PSNR: 28.9065
+ Set14 SSIM: 0.7915
+ Set5 PSNR: 32.7315
+ Set5 SSIM: 0.9029
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k-88e4903d.pth
+- Config: configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k
+ Results:
+ - Dataset: Others
+ Metrics:
+ DIV2K PSNR: 37.9473
+ DIV2K SSIM: 0.9488
+ Set14 PSNR: 34.4149
+ Set14 SSIM: 0.9252
+ Set5 PSNR: 38.3971
+ Set5 SSIM: 0.9629
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k-69e15fb6.pth
+- Config: configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k
+ Results:
+ - Dataset: Others
+ Metrics:
+ DIV2K PSNR: 34.283
+ DIV2K SSIM: 0.8939
+ Set14 PSNR: 30.9258
+ Set14 SSIM: 0.854
+ Set5 PSNR: 34.9335
+ Set5 SSIM: 0.9323
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k-d6982f7b.pth
+- Config: configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k
+ Results:
+ - Dataset: Others
+ Metrics:
+ DIV2K PSNR: 32.3021
+ DIV2K SSIM: 0.8451
+ Set14 PSNR: 29.0792
+ Set14 SSIM: 0.7953
+ Set5 PSNR: 32.9214
+ Set5 SSIM: 0.9053
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k-0502d775.pth
+- Config: configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 37.5844
+ DIV2K SSIM: 0.9459
+ Set14 PSNR: 33.8404
+ Set14 SSIM: 0.9207
+ Set5 PSNR: 38.1289
+ Set5 SSIM: 0.9617
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k-131d3f64.pth
+- Config: configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 33.8394
+ DIV2K SSIM: 0.8867
+ Set14 PSNR: 30.534
+ Set14 SSIM: 0.8468
+ Set5 PSNR: 34.6037
+ Set5 SSIM: 0.9293
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k-309cb239.pth
+- Config: configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: DIV2K
+ Name: swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k
+ Results:
+ - Dataset: DIV2K
+ Metrics:
+ DIV2K PSNR: 31.8636
+ DIV2K SSIM: 0.8353
+ Set14 PSNR: 28.7441
+ Set14 SSIM: 0.7861
+ Set5 PSNR: 32.4343
+ Set5 SSIM: 0.8984
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k-d6622d03.pth
+- Config: configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 5.7975
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-c6425057.pth
+- Config: configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 7.2738
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-6f0c425f.pth
+- Config: configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 5.2329
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-36960d18.pth
+- Config: configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 7.746
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-os-a016a72f.pth
+- Config: configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 5.1464
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/
+- Config: configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost
+ Results:
+ - Dataset: Others
+ Metrics:
+ RealSRSet+5images NIQE: 7.6378
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-os-9f1599b5.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15
+ Results:
+ - Dataset: Others
+ Metrics:
+ BSD68 PSNR: 32.5203
+ Set12 PSNR: 33.9731
+ Urban100 PSNR: 34.3424
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15-6782691b.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25
+ Results:
+ - Dataset: Others
+ Metrics:
+ BSD68 PSNR: 30.1377
+ Set12 PSNR: 31.6434
+ Urban100 PSNR: 31.9493
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25-d0d8d4da.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50
+ Results:
+ - Dataset: Others
+ Metrics:
+ BSD68 PSNR: 27.3157
+ Set12 PSNR: 28.5651
+ Urban100 PSNR: 28.6626
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50-54c9968a.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15
+ Results:
+ - Dataset: Others
+ Metrics:
+ CBSD68 PSNR: 34.4136
+ Kodak24 PSNR: 35.3555
+ McMaster PSNR: 35.6205
+ Urban100 PSNR: 35.1836
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15-c74a2cee.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25
+ Results:
+ - Dataset: Others
+ Metrics:
+ CBSD68 PSNR: 31.7626
+ Kodak24 PSNR: 32.9003
+ McMaster PSNR: 33.3198
+ Urban100 PSNR: 32.9458
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25-df2b1c0c.pth
+- Config: configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50
+ Results:
+ - Dataset: Others
+ Metrics:
+ CBSD68 PSNR: 28.5346
+ Kodak24 PSNR: 29.8058
+ McMaster PSNR: 30.2027
+ Urban100 PSNR: 29.8832
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50-e369874c.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 30.2746
+ Classic5 SSIM: 0.8254
+ LIVE1 PSNR: 29.8611
+ LIVE1 SSIM: 0.8292
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10-da93c8e9.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 32.5331
+ Classic5 SSIM: 0.8753
+ LIVE1 PSNR: 32.2667
+ LIVE1 SSIM: 0.8914
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20-d47367b1.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 33.7504
+ Classic5 SSIM: 0.8966
+ LIVE1 PSNR: 33.7001
+ LIVE1 SSIM: 0.9179
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30-52c083cf.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 34.5377
+ Classic5 SSIM: 0.9087
+ LIVE1 PSNR: 34.6846
+ LIVE1 SSIM: 0.9322
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40-803e8d9b.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 30.1019
+ Classic5 SSIM: 0.8217
+ LIVE1 PSNR: 28.0676
+ LIVE1 SSIM: 0.8094
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10-09aafadc.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 32.3489
+ Classic5 SSIM: 0.8727
+ LIVE1 PSNR: 30.4514
+ LIVE1 SSIM: 0.8745
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20-b8a42b5e.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 33.6028
+ Classic5 SSIM: 0.8949
+ LIVE1 PSNR: 31.8235
+ LIVE1 SSIM: 0.9023
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30-e9fe6859.pth
+- Config: configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
+ In Collection: SwinIR
+ Metadata:
+ GPUs: '8'
+ Training Data: Others
+ Name: swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40
+ Results:
+ - Dataset: Others
+ Metrics:
+ Classic5 PSNR: 34.4344
+ Classic5 SSIM: 0.9076
+ LIVE1 PSNR: 32.761
+ LIVE1 SSIM: 0.9179
+ Task: Image Super-Resolution, Image denoising, JPEG compression artifact reduction
+ Weights: https://download.openmmlab.com/mmediting/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40-5b77a6e6.pth
diff --git a/configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..b527c7b70a
--- /dev/null
+++ b/configs/swinir/swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,5 @@
+_base_ = ['swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py']
+
+experiment_name = 'swinir_gan-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
diff --git a/configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..fff5e6b4a5
--- /dev/null
+++ b/configs/swinir/swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,5 @@
+_base_ = ['swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py']
+
+experiment_name = 'swinir_gan-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
diff --git a/configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..86bd2a9175
--- /dev/null
+++ b/configs/swinir/swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,5 @@
+_base_ = ['swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py']
+
+experiment_name = 'swinir_gan-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
diff --git a/configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..ee1dc77bfb
--- /dev/null
+++ b/configs/swinir/swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,61 @@
+_base_ = ['../_base_/default_runtime.py']
+
+experiment_name = 'swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 2
+img_size = 64
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=scale,
+ in_chans=3,
+ img_size=img_size,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='nearest+conv',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='PackEditInputs')
+]
+
+test_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type='BasicImageDataset',
+ metainfo=dict(dataset_type='realsrset', task_name='realsr'),
+ data_root='data/RealSRSet+5images',
+ data_prefix=dict(img='', gt=''),
+ pipeline=test_pipeline))
+
+test_evaluator = [dict(type='NIQE', input_order='CHW', convert_to='Y')]
+
+test_cfg = dict(type='TestLoop')
diff --git a/configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..2f8e0451ec
--- /dev/null
+++ b/configs/swinir/swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,10 @@
+_base_ = ['swinir_psnr-x2s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py']
+
+experiment_name = 'swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 4
+
+# model settings
+model = dict(generator=dict(upscale=scale))
diff --git a/configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py b/configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
new file mode 100644
index 0000000000..d84a851aa3
--- /dev/null
+++ b/configs/swinir/swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost.py
@@ -0,0 +1,13 @@
+_base_ = ['swinir_psnr-x4s64w8d6e180_8xb4-lr1e-4-600k_df2k-ost.py']
+
+experiment_name = 'swinir_psnr-x4s64w8d9e240_8xb4-lr1e-4-600k_df2k-ost'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# model settings
+model = dict(
+ generator=dict(
+ depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
+ embed_dim=240,
+ num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
+ resi_connection='3conv'))
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
new file mode 100644
index 0000000000..7eea2d978c
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py
@@ -0,0 +1,134 @@
+_base_ = [
+ '../_base_/default_runtime.py',
+ '../_base_/datasets/decompression_test_config.py'
+]
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+quality = 10
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=1,
+ in_chans=3,
+ img_size=126,
+ window_size=7,
+ img_range=255.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='CharbonnierLoss', eps=1e-9),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=1)),
+ dict(type='PairedRandomCrop', gt_patch_size=126),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[quality, quality], color_type='color'),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[quality, quality], color_type='color'),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=2,
+ batch_size=1,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DFWB8550sub_GT.txt',
+ metainfo=dict(dataset_type='dfwb', task_name='CAR'),
+ data_root=data_root + '/DFWB',
+ data_prefix=dict(img='', gt=''),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=2,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='live1', task_name='CAR'),
+ data_root=data_root + '/LIVE1',
+ data_prefix=dict(img='', gt=''),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', prefix='LIVE1'),
+ dict(type='SSIM', prefix='LIVE1'),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=1_600_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[800000, 1200000, 1400000, 1500000, 1600000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
new file mode 100644
index 0000000000..f83071f0d0
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR20'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 20
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
new file mode 100644
index 0000000000..44872b9de8
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR30'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 30
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
new file mode 100644
index 0000000000..69ae48385a
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-colorCAR40'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 40
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
new file mode 100644
index 0000000000..d4206be1c5
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py
@@ -0,0 +1,139 @@
+_base_ = [
+ '../_base_/default_runtime.py',
+ '../_base_/datasets/decompression_test_config.py'
+]
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+quality = 10
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=1,
+ in_chans=1,
+ img_size=126,
+ window_size=7,
+ img_range=255.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='CharbonnierLoss', eps=1e-9),
+ data_preprocessor=dict(type='EditDataPreprocessor', mean=[0.], std=[255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=1)),
+ dict(type='PairedRandomCrop', gt_patch_size=126),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[quality, quality], color_type='grayscale'),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[quality, quality], color_type='grayscale'),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=1,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DFWB8550sub_GT.txt',
+ metainfo=dict(dataset_type='dfwb', task_name='CAR'),
+ data_root=data_root + '/DFWB',
+ data_prefix=dict(img='', gt=''),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='classic5', task_name='CAR'),
+ data_root=data_root + '/classic5',
+ data_prefix=dict(img='', gt=''),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', prefix='classic5'),
+ dict(type='SSIM', prefix='classic5'),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=1_600_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+test_dataloader = _base_.test_dataloader
+for idx in range(len(test_dataloader)):
+ test_pipeline = test_dataloader[idx]['dataset']['pipeline']
+ if idx > 0:
+ test_pipeline[0]['to_y_channel'] = True
+ test_pipeline[1]['to_y_channel'] = True
+ else:
+ test_pipeline[0]['color_type'] = 'grayscale'
+ test_pipeline[1]['color_type'] = 'grayscale'
+ test_pipeline[2]['params']['color_type'] = 'grayscale'
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[800000, 1200000, 1400000, 1500000, 1600000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
new file mode 100644
index 0000000000..f3ea47b95f
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR20'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 20
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
new file mode 100644
index 0000000000..6054939a18
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR30'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 30
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
new file mode 100644
index 0000000000..4b0d6d6f02
--- /dev/null
+++ b/configs/swinir/swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR10.py']
+
+experiment_name = 'swinir_s126w7d6e180_8xb1-lr2e-4-1600k_dfwb-grayCAR40'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify JPEG quality factor of RandomJPEGCompression
+quality = 40
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['quality'] = [quality, quality]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['quality'] = [quality, quality]
+
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['quality'] = [quality, quality]
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
new file mode 100644
index 0000000000..469e4c6e8b
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py
@@ -0,0 +1,147 @@
+_base_ = [
+ '../_base_/default_runtime.py',
+ '../_base_/datasets/denoising-gaussian_color_test_config.py'
+]
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 15
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=1,
+ in_chans=3,
+ img_size=128,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='CharbonnierLoss', eps=1e-9),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=1)),
+ dict(type='PairedRandomCrop', gt_patch_size=128),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(
+ type='RandomNoise',
+ params=dict(
+ noise_type=['gaussian'],
+ noise_prob=[1],
+ gaussian_sigma=[sigma * 255, sigma * 255],
+ gaussian_gray_noise_prob=0),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='RandomNoise',
+ params=dict(
+ noise_type=['gaussian'],
+ noise_prob=[1],
+ gaussian_sigma=[sigma * 255, sigma * 255],
+ gaussian_gray_noise_prob=0),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=1,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DFWB8550sub_GT.txt',
+ metainfo=dict(dataset_type='dfwb', task_name='denoising'),
+ data_root=data_root + '/DFWB',
+ data_prefix=dict(img='', gt=''),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='mcmaster', task_name='denoising'),
+ data_root=data_root + '/McMaster',
+ data_prefix=dict(img='', gt=''),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', prefix='McMaster'),
+ dict(type='SSIM', prefix='McMaster'),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=1_600_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[800000, 1200000, 1400000, 1500000, 1600000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
new file mode 100644
index 0000000000..1b11cb3c80
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py']
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN25'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 25
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
new file mode 100644
index 0000000000..c57d636a71
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN15.py']
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-colorDN50'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 50
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
new file mode 100644
index 0000000000..2f765c3c3a
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py
@@ -0,0 +1,141 @@
+_base_ = [
+ '../_base_/default_runtime.py',
+ '../_base_/datasets/denoising-gaussian_gray_test_config.py'
+]
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 15
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=1,
+ in_chans=1,
+ img_size=128,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='CharbonnierLoss', eps=1e-9),
+ data_preprocessor=dict(type='EditDataPreprocessor', mean=[0.], std=[255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=1)),
+ dict(type='PairedRandomCrop', gt_patch_size=128),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(
+ type='RandomNoise',
+ params=dict(
+ noise_type=['gaussian'],
+ noise_prob=[1],
+ gaussian_sigma=[sigma, sigma],
+ gaussian_gray_noise_prob=0),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='grayscale',
+ imdecode_backend='cv2'),
+ dict(
+ type='RandomNoise',
+ params=dict(
+ noise_type=['gaussian'],
+ noise_prob=[1],
+ gaussian_sigma=[sigma, sigma],
+ gaussian_gray_noise_prob=0),
+ keys=['img']),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=1,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DFWB8550sub_GT.txt',
+ metainfo=dict(dataset_type='dfwb', task_name='denoising'),
+ data_root=data_root + '/DFWB',
+ data_prefix=dict(img='', gt=''),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='set12', task_name='denoising'),
+ data_root=data_root + '/Set12',
+ data_prefix=dict(img='', gt=''),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', prefix='Set12'),
+ dict(type='SSIM', prefix='Set12'),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=1_600_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[800000, 1200000, 1400000, 1500000, 1600000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
new file mode 100644
index 0000000000..34f364400b
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py']
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN25'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 25
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
diff --git a/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
new file mode 100644
index 0000000000..5f96a5ae6d
--- /dev/null
+++ b/configs/swinir/swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50.py
@@ -0,0 +1,20 @@
+_base_ = ['swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN15.py']
+
+experiment_name = 'swinir_s128w8d6e180_8xb1-lr2e-4-1600k_dfwb-grayDN50'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+# modify sigma of RandomNoise
+sigma = 50
+test_dataloader = _base_.test_dataloader
+for dataloader in test_dataloader:
+ test_pipeline = dataloader['dataset']['pipeline']
+ test_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[-2]['params']['gaussian_sigma'] = [sigma, sigma]
+
+val_dataloader = _base_.val_dataloader
+val_pipeline = val_dataloader['dataset']['pipeline']
+val_pipeline[2]['params']['gaussian_sigma'] = [sigma, sigma]
diff --git a/configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..7abface52f
--- /dev/null
+++ b/configs/swinir/swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,133 @@
+_base_ = [
+ '../_base_/default_runtime.py', '../_base_/datasets/sisr_x2_test_config.py'
+]
+
+experiment_name = 'swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 2
+img_size = 48
+
+# evaluated on Y channels
+test_evaluator = _base_.test_evaluator
+for evaluator in test_evaluator:
+ for metric in evaluator:
+ metric['convert_to'] = 'Y'
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=scale,
+ in_chans=3,
+ img_size=img_size,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='pixelshuffle',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=scale)),
+ dict(type='PairedRandomCrop', gt_patch_size=img_size * scale),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DIV2K800sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DIV2K',
+ data_prefix=dict(
+ img='DIV2K_train_LR_bicubic/X2_sub', gt='DIV2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='set5', task_name='sisr'),
+ data_root=data_root + '/Set5',
+ data_prefix=dict(img='LRbicx2', gt='GTmod12'),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', crop_border=scale),
+ dict(type='SSIM', crop_border=scale),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=500_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[250000, 400000, 450000, 475000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..88f5d001bf
--- /dev/null
+++ b/configs/swinir/swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,22 @@
+_base_ = ['swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x2s64w8d4e60_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 2
+img_size = 64
+
+# model settings
+model = dict(
+ generator=dict(
+ img_size=img_size,
+ depths=[6, 6, 6, 6],
+ embed_dim=60,
+ num_heads=[6, 6, 6, 6],
+ upsampler='pixelshuffledirect'))
+
+# modify patch size of train_dataloader
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[3]['gt_patch_size'] = img_size * scale
diff --git a/configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py b/configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
new file mode 100644
index 0000000000..475e42eef5
--- /dev/null
+++ b/configs/swinir/swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
@@ -0,0 +1,35 @@
+_base_ = ['swinir_x2s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x2s64w8d6e180_8xb4-lr2e-4-500k_df2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 2
+img_size = 64
+
+# model settings
+model = dict(generator=dict(img_size=img_size))
+
+# modify patch size of train_pipeline
+train_pipeline = _base_.train_pipeline
+train_pipeline[3]['gt_patch_size'] = img_size * scale
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DF2K3450sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DF2K',
+ data_prefix=dict(
+ img='DF2K_train_LR_bicubic/X2_sub', gt='DF2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
diff --git a/configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..20f8e1c533
--- /dev/null
+++ b/configs/swinir/swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,133 @@
+_base_ = [
+ '../_base_/default_runtime.py', '../_base_/datasets/sisr_x3_test_config.py'
+]
+
+experiment_name = 'swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 3
+img_size = 48
+
+# evaluated on Y channels
+test_evaluator = _base_.test_evaluator
+for evaluator in test_evaluator:
+ for metric in evaluator:
+ metric['convert_to'] = 'Y'
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=scale,
+ in_chans=3,
+ img_size=img_size,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='pixelshuffle',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=scale)),
+ dict(type='PairedRandomCrop', gt_patch_size=img_size * scale),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DIV2K800sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DIV2K',
+ data_prefix=dict(
+ img='DIV2K_train_LR_bicubic/X3_sub', gt='DIV2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='set5', task_name='sisr'),
+ data_root=data_root + '/Set5',
+ data_prefix=dict(img='LRbicx3', gt='GTmod12'),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', crop_border=scale),
+ dict(type='SSIM', crop_border=scale),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=500_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[250000, 400000, 450000, 475000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..6f929ec8d7
--- /dev/null
+++ b/configs/swinir/swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,22 @@
+_base_ = ['swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x3s64w8d4e60_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 3
+img_size = 64
+
+# model settings
+model = dict(
+ generator=dict(
+ img_size=img_size,
+ depths=[6, 6, 6, 6],
+ embed_dim=60,
+ num_heads=[6, 6, 6, 6],
+ upsampler='pixelshuffledirect'))
+
+# modify patch size of train_dataloader
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[3]['gt_patch_size'] = img_size * scale
diff --git a/configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py b/configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
new file mode 100644
index 0000000000..656046e9d1
--- /dev/null
+++ b/configs/swinir/swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
@@ -0,0 +1,35 @@
+_base_ = ['swinir_x3s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x3s64w8d6e180_8xb4-lr2e-4-500k_df2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 3
+img_size = 64
+
+# model settings
+model = dict(generator=dict(img_size=img_size))
+
+# modify patch size of train_pipeline
+train_pipeline = _base_.train_pipeline
+train_pipeline[3]['gt_patch_size'] = img_size * scale
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DF2K3450sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DF2K',
+ data_prefix=dict(
+ img='DF2K_train_LR_bicubic/X3_sub', gt='DF2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
diff --git a/configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..7409802acd
--- /dev/null
+++ b/configs/swinir/swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,133 @@
+_base_ = [
+ '../_base_/default_runtime.py', '../_base_/datasets/sisr_x4_test_config.py'
+]
+
+experiment_name = 'swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 4
+img_size = 48
+
+# evaluated on Y channels
+test_evaluator = _base_.test_evaluator
+for evaluator in test_evaluator:
+ for metric in evaluator:
+ metric['convert_to'] = 'Y'
+
+# model settings
+model = dict(
+ type='BaseEditModel',
+ generator=dict(
+ type='SwinIRNet',
+ upscale=scale,
+ in_chans=3,
+ img_size=img_size,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler='pixelshuffle',
+ resi_connection='1conv'),
+ pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
+ data_preprocessor=dict(
+ type='EditDataPreprocessor', mean=[0., 0., 0.], std=[255., 255.,
+ 255.]))
+
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='SetValues', dictionary=dict(scale=scale)),
+ dict(type='PairedRandomCrop', gt_patch_size=img_size * scale),
+ dict(
+ type='Flip',
+ keys=['img', 'gt'],
+ flip_ratio=0.5,
+ direction='horizontal'),
+ dict(
+ type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
+ dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
+ dict(type='PackEditInputs')
+]
+
+val_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ key='img',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(
+ type='LoadImageFromFile',
+ key='gt',
+ color_type='color',
+ channel_order='rgb',
+ imdecode_backend='cv2'),
+ dict(type='PackEditInputs')
+]
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DIV2K800sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DIV2K',
+ data_prefix=dict(
+ img='DIV2K_train_LR_bicubic/X4_sub', gt='DIV2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
+
+val_dataloader = dict(
+ num_workers=4,
+ persistent_workers=False,
+ drop_last=False,
+ sampler=dict(type='DefaultSampler', shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ metainfo=dict(dataset_type='set5', task_name='sisr'),
+ data_root=data_root + '/Set5',
+ data_prefix=dict(img='LRbicx4', gt='GTmod12'),
+ pipeline=val_pipeline))
+
+val_evaluator = [
+ dict(type='PSNR', crop_border=scale),
+ dict(type='SSIM', crop_border=scale),
+]
+
+train_cfg = dict(
+ type='IterBasedTrainLoop', max_iters=500_000, val_interval=5000)
+val_cfg = dict(type='ValLoop')
+
+# optimizer
+optim_wrapper = dict(
+ constructor='DefaultOptimWrapperConstructor',
+ type='OptimWrapper',
+ optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
+
+# learning policy
+param_scheduler = dict(
+ type='MultiStepLR',
+ by_epoch=False,
+ milestones=[250000, 400000, 450000, 475000],
+ gamma=0.5)
diff --git a/configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py b/configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
new file mode 100644
index 0000000000..09a08ed95a
--- /dev/null
+++ b/configs/swinir/swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k.py
@@ -0,0 +1,22 @@
+_base_ = ['swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x4s64w8d4e60_8xb4-lr2e-4-500k_div2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 4
+img_size = 64
+
+# model settings
+model = dict(
+ generator=dict(
+ img_size=img_size,
+ depths=[6, 6, 6, 6],
+ embed_dim=60,
+ num_heads=[6, 6, 6, 6],
+ upsampler='pixelshuffledirect'))
+
+# modify patch size of train_dataloader
+train_dataloader = _base_.train_dataloader
+train_pipeline = train_dataloader['dataset']['pipeline']
+train_pipeline[3]['gt_patch_size'] = img_size * scale
diff --git a/configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py b/configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
new file mode 100644
index 0000000000..87cdf5e609
--- /dev/null
+++ b/configs/swinir/swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k.py
@@ -0,0 +1,35 @@
+_base_ = ['swinir_x4s48w8d6e180_8xb4-lr2e-4-500k_div2k.py']
+
+experiment_name = 'swinir_x4s64w8d6e180_8xb4-lr2e-4-500k_df2k'
+work_dir = f'./work_dirs/{experiment_name}'
+save_dir = './work_dirs/'
+
+scale = 4
+img_size = 64
+
+# model settings
+model = dict(generator=dict(img_size=img_size))
+
+# modify patch size of train_pipeline
+train_pipeline = _base_.train_pipeline
+train_pipeline[3]['gt_patch_size'] = img_size * scale
+
+# dataset settings
+dataset_type = 'BasicImageDataset'
+data_root = 'data'
+
+train_dataloader = dict(
+ num_workers=4,
+ batch_size=4,
+ drop_last=True,
+ persistent_workers=False,
+ sampler=dict(type='InfiniteSampler', shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ ann_file='meta_info_DF2K3450sub_GT.txt',
+ metainfo=dict(dataset_type='div2k', task_name='sisr'),
+ data_root=data_root + '/DF2K',
+ data_prefix=dict(
+ img='DF2K_train_LR_bicubic/X4_sub', gt='DF2K_train_HR_sub'),
+ filename_tmpl=dict(img='{}', gt='{}'),
+ pipeline=train_pipeline))
diff --git a/mmedit/datasets/transforms/random_degradations.py b/mmedit/datasets/transforms/random_degradations.py
index 65e7fb7849..1c202125aa 100644
--- a/mmedit/datasets/transforms/random_degradations.py
+++ b/mmedit/datasets/transforms/random_degradations.py
@@ -162,11 +162,13 @@ class RandomJPEGCompression:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
+ bgr2rgb (str): Whether change channel order. Default: False.
"""
- def __init__(self, params, keys):
+ def __init__(self, params, keys, bgr2rgb=False):
self.keys = keys
self.params = params
+ self.bgr2rgb = bgr2rgb
def _apply_random_compression(self, imgs):
is_single_image = False
@@ -176,6 +178,7 @@ def _apply_random_compression(self, imgs):
# determine initial compression level and the step size
quality = self.params['quality']
+ color_type = self.params['color_type']
quality_step = self.params.get('quality_step', 0)
jpeg_param = round(np.random.uniform(quality[0], quality[1]))
@@ -183,8 +186,17 @@ def _apply_random_compression(self, imgs):
outputs = []
for img in imgs:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_param]
- _, img_encoded = cv2.imencode('.jpg', img * 255., encode_param)
- outputs.append(np.float32(cv2.imdecode(img_encoded, 1)) / 255.)
+ if self.bgr2rgb and color_type == 'color':
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ _, img_encoded = cv2.imencode('.jpg', img, encode_param)
+
+ if color_type == 'color':
+ img_encoded = cv2.imdecode(img_encoded, 1)
+ if self.bgr2rgb:
+ img_encoded = cv2.cvtColor(img_encoded, cv2.COLOR_BGR2RGB)
+ outputs.append(img_encoded)
+ else:
+ outputs.append(cv2.imdecode(img_encoded, 0))
# update compression level
jpeg_param += np.random.uniform(-quality_step, quality_step)
@@ -238,7 +250,7 @@ def _apply_gaussian_noise(self, imgs):
Tensor: images applied gaussian noise
"""
sigma_range = self.params['gaussian_sigma']
- sigma = np.random.uniform(sigma_range[0], sigma_range[1]) / 255.
+ sigma = np.random.uniform(sigma_range[0], sigma_range[1])
sigma_step = self.params.get('gaussian_sigma_step', 0)
@@ -253,9 +265,8 @@ def _apply_gaussian_noise(self, imgs):
outputs.append(img + noise)
# update noise level
- sigma += np.random.uniform(-sigma_step, sigma_step) / 255.
- sigma = np.clip(sigma, sigma_range[0] / 255.,
- sigma_range[1] / 255.)
+ sigma += np.random.uniform(-sigma_step, sigma_step)
+ sigma = np.clip(sigma, sigma_range[0], sigma_range[1])
return outputs
@@ -274,7 +285,7 @@ def _apply_poisson_noise(self, imgs):
if is_gray_noise:
noise = cv2.cvtColor(noise[..., [2, 1, 0]], cv2.COLOR_BGR2GRAY)
noise = noise[..., np.newaxis]
- noise = np.clip((noise * 255.0).round(), 0, 255) / 255.
+ noise = np.clip((noise).round(), 0, 255)
unique_val = 2**np.ceil(np.log2(len(np.unique(noise))))
noise = np.random.poisson(noise * unique_val) / unique_val - noise
diff --git a/mmedit/models/editors/__init__.py b/mmedit/models/editors/__init__.py
index a59ad82b0a..b1add6f756 100644
--- a/mmedit/models/editors/__init__.py
+++ b/mmedit/models/editors/__init__.py
@@ -55,6 +55,7 @@
from .stylegan1 import StyleGAN1
from .stylegan2 import StyleGAN2
from .stylegan3 import StyleGAN3, StyleGAN3Generator
+from .swinir import SwinIRNet
from .tdan import TDAN, TDANNet
from .tof import TOFlowVFINet, TOFlowVSRNet, ToFResBlock
from .ttsr import LTE, TTSR, SearchTransformer, TTSRDiscriminator, TTSRNet
@@ -86,5 +87,5 @@
'StyleGAN3Generator', 'InstColorization', 'NAFBaseline',
'NAFBaselineLocal', 'NAFNet', 'NAFNetLocal', 'DDIMScheduler',
'DDPMScheduler', 'DenoisingUnet', 'ClipWrapper', 'EG3D', 'Restormer',
- 'StableDiffusion'
+ 'SwinIRNet', 'StableDiffusion'
]
diff --git a/mmedit/models/editors/swinir/__init__.py b/mmedit/models/editors/swinir/__init__.py
new file mode 100644
index 0000000000..18c102944a
--- /dev/null
+++ b/mmedit/models/editors/swinir/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .swinir_net import SwinIRNet
+
+__all__ = ['SwinIRNet']
diff --git a/mmedit/models/editors/swinir/swinir_modules.py b/mmedit/models/editors/swinir/swinir_modules.py
new file mode 100644
index 0000000000..42d6a9c6fc
--- /dev/null
+++ b/mmedit/models/editors/swinir/swinir_modules.py
@@ -0,0 +1,148 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+
+import torch.nn as nn
+
+from .swinir_utils import to_2tuple
+
+
+class PatchEmbed(nn.Module):
+ r""" Image to Patch Embedding
+ Args:
+ img_size (int): Image size. Default: 224.
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels.
+ Default: 96.
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self,
+ img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ norm_layer=None):
+ super().__init__()
+ img_size = to_2tuple(img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [
+ img_size[0] // patch_size[0], img_size[1] // patch_size[1]
+ ]
+ self.img_size = img_size
+ self.patch_size = patch_size
+ self.patches_resolution = patches_resolution
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ if norm_layer is not None:
+ self.norm = norm_layer(embed_dim)
+ else:
+ self.norm = None
+
+ def forward(self, x):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, C, Ph, Pw).
+
+ Returns:
+ Tensor: Forward results.
+ """
+ x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
+ if self.norm is not None:
+ x = self.norm(x)
+ return x
+
+
+class PatchUnEmbed(nn.Module):
+ r""" Image to Patch Unembedding
+ Args:
+ img_size (int): Image size. Default: 224.
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels.
+ Default: 96.
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self,
+ img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ norm_layer=None):
+ super().__init__()
+ img_size = to_2tuple(img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [
+ img_size[0] // patch_size[0], img_size[1] // patch_size[1]
+ ]
+ self.img_size = img_size
+ self.patch_size = patch_size
+ self.patches_resolution = patches_resolution
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ def forward(self, x, x_size):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+ x_size (tuple[int]): Resolution of input feature.
+
+ Returns:
+ Tensor: Forward results.
+ """
+ B, HW, C = x.shape
+ x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0],
+ x_size[1]) # B Ph*Pw C
+ return x
+
+
+class Upsample(nn.Sequential):
+ """Upsample module.
+
+ Args:
+ scale (int): Scale factor. Supported scales: 2^n and 3.
+ num_feat (int): Channel number of intermediate features.
+ """
+
+ def __init__(self, scale, num_feat):
+ m = []
+ if (scale & (scale - 1)) == 0: # scale = 2^n
+ for _ in range(int(math.log(scale, 2))):
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
+ m.append(nn.PixelShuffle(2))
+ elif scale == 3:
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
+ m.append(nn.PixelShuffle(3))
+ else:
+ raise ValueError(f'scale {scale} is not supported. '
+ 'Supported scales: 2^n and 3.')
+ super(Upsample, self).__init__(*m)
+
+
+class UpsampleOneStep(nn.Sequential):
+ """UpsampleOneStep module (the difference with Upsample is that it always
+ only has 1conv + 1pixelshuffle) Used in lightweight SR to save parameters.
+
+ Args:
+ scale (int): Scale factor. Supported scales: 2^n and 3.
+ num_feat (int): Channel number of intermediate features.
+ num_out_ch (int): Channel number for PixelShuffle.
+ input_resolution (tuple[int], optional): Input resolution.
+ Default: None
+ """
+
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
+ self.num_feat = num_feat
+ self.input_resolution = input_resolution
+ m = []
+ m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
+ m.append(nn.PixelShuffle(scale))
+ super(UpsampleOneStep, self).__init__(*m)
diff --git a/mmedit/models/editors/swinir/swinir_net.py b/mmedit/models/editors/swinir/swinir_net.py
new file mode 100644
index 0000000000..859cfa634b
--- /dev/null
+++ b/mmedit/models/editors/swinir/swinir_net.py
@@ -0,0 +1,307 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmengine.model import BaseModule
+from mmengine.model.weight_init import trunc_normal_
+
+from mmedit.registry import MODELS
+from .swinir_modules import PatchEmbed, PatchUnEmbed, Upsample, UpsampleOneStep
+from .swinir_rstb import RSTB
+
+
+@MODELS.register_module()
+class SwinIRNet(BaseModule):
+ r""" SwinIR
+ A PyTorch impl of: `SwinIR: Image Restoration Using Swin Transformer`,
+ based on Swin Transformer.
+ Ref repo: https://github.com/JingyunLiang/SwinIR
+
+ Args:
+ img_size (int | tuple(int)): Input image size. Default 64
+ patch_size (int | tuple(int)): Patch size. Default: 1
+ in_chans (int): Number of input image channels. Default: 3
+ embed_dim (int): Patch embedding dimension. Default: 96
+ depths (tuple(int)): Depth of each Swin Transformer layer.
+ Default: [6, 6, 6, 6]
+ num_heads (tuple(int)): Number of attention heads in different layers.
+ Default: [6, 6, 6, 6]
+ window_size (int): Window size. Default: 7
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ Default: True
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
+ Default: None
+ drop_rate (float): Dropout rate. Default: 0
+ attn_drop_rate (float): Attention dropout rate. Default: 0
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
+ ape (bool): If True, add absolute position embedding to the
+ patch embedding. Default: False
+ patch_norm (bool): If True, add normalization after patch embedding.
+ Default: True
+ use_checkpoint (bool): Whether to use checkpointing to save memory.
+ Default: False
+ upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising
+ and compress artifact reduction. Default: 2
+ img_range (float): Image range. 1. or 255. Default: 1.0
+ upsampler (string, optional): The reconstruction module.
+ 'pixelshuffle' / 'pixelshuffledirect' /'nearest+conv'/None.
+ Default: ''
+ resi_connection (string): The convolutional block before residual
+ connection. '1conv'/'3conv'. Default: '1conv'
+ """
+
+ def __init__(self,
+ img_size=64,
+ patch_size=1,
+ in_chans=3,
+ embed_dim=96,
+ depths=[6, 6, 6, 6],
+ num_heads=[6, 6, 6, 6],
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.1,
+ norm_layer=nn.LayerNorm,
+ ape=False,
+ patch_norm=True,
+ use_checkpoint=False,
+ upscale=2,
+ img_range=1.,
+ upsampler='',
+ resi_connection='1conv',
+ **kwargs):
+ super(SwinIRNet, self).__init__()
+ num_in_ch = in_chans
+ num_out_ch = in_chans
+ num_feat = 64
+ self.img_range = img_range
+ if in_chans == 3:
+ rgb_mean = (0.4488, 0.4371, 0.4040)
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
+ else:
+ self.mean = torch.zeros(1, 1, 1, 1)
+ self.upscale = upscale
+ self.upsampler = upsampler
+ self.window_size = window_size
+
+ # 1, shallow feature extraction
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
+
+ # 2, deep feature extraction
+ self.num_layers = len(depths)
+ self.embed_dim = embed_dim
+ self.ape = ape
+ self.patch_norm = patch_norm
+ self.num_features = embed_dim
+ self.mlp_ratio = mlp_ratio
+
+ # split image into non-overlapping patches
+ self.patch_embed = PatchEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_chans=embed_dim,
+ embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None)
+ num_patches = self.patch_embed.num_patches
+ patches_resolution = self.patch_embed.patches_resolution
+ self.patches_resolution = patches_resolution
+
+ # merge non-overlapping patches into image
+ self.patch_unembed = PatchUnEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_chans=embed_dim,
+ embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None)
+
+ # absolute position embedding
+ if self.ape:
+ self.absolute_pos_embed = nn.Parameter(
+ torch.zeros(1, num_patches, embed_dim))
+ trunc_normal_(self.absolute_pos_embed, std=.02)
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ # stochastic depth decay rule
+ dpr = [
+ x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
+ ]
+
+ # build Residual Swin Transformer blocks (RSTB)
+ self.layers = nn.ModuleList()
+ for i_layer in range(self.num_layers):
+ layer = RSTB(
+ dim=embed_dim,
+ input_resolution=(patches_resolution[0],
+ patches_resolution[1]),
+ depth=depths[i_layer],
+ num_heads=num_heads[i_layer],
+ window_size=window_size,
+ mlp_ratio=self.mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
+ norm_layer=norm_layer,
+ downsample=None,
+ use_checkpoint=use_checkpoint,
+ img_size=img_size,
+ patch_size=patch_size,
+ resi_connection=resi_connection)
+ self.layers.append(layer)
+ self.norm = norm_layer(self.num_features)
+
+ # build the last conv layer in deep feature extraction
+ if resi_connection == '1conv':
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
+ elif resi_connection == '3conv':
+ # to save parameters and memory
+ self.conv_after_body = nn.Sequential(
+ nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
+
+ # 3, high quality image reconstruction
+ if self.upsampler == 'pixelshuffle':
+ # for classical SR
+ self.conv_before_upsample = nn.Sequential(
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
+ nn.LeakyReLU(inplace=True))
+ self.upsample = Upsample(upscale, num_feat)
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
+ elif self.upsampler == 'pixelshuffledirect':
+ # for lightweight SR (to save parameters)
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
+ (patches_resolution[0],
+ patches_resolution[1]))
+ elif self.upsampler == 'nearest+conv':
+ # for real-world SR (less artifacts)
+ self.conv_before_upsample = nn.Sequential(
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
+ nn.LeakyReLU(inplace=True))
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+ if self.upscale == 4:
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
+ else:
+ # for image denoising and JPEG compression artifact reduction
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
+
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'absolute_pos_embed'}
+
+ @torch.jit.ignore
+ def no_weight_decay_keywords(self):
+ return {'relative_position_bias_table'}
+
+ def check_image_size(self, x):
+ """Check image size and pad images so that it has enough dimension do
+ window size.
+
+ args:
+ x: input tensor image with (B, C, H, W) shape.
+ """
+ _, _, h, w = x.size()
+ mod_pad_h = (self.window_size -
+ h % self.window_size) % self.window_size
+ mod_pad_w = (self.window_size -
+ w % self.window_size) % self.window_size
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
+ return x
+
+ def forward_features(self, x):
+ """Forward function of Deep Feature Extraction.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, C, H, W).
+
+ Returns:
+ Tensor: Forward results.
+ """
+ x_size = (x.shape[2], x.shape[3])
+ x = self.patch_embed(x)
+ if self.ape:
+ x = x + self.absolute_pos_embed
+ x = self.pos_drop(x)
+
+ for layer in self.layers:
+ x = layer(x, x_size)
+
+ x = self.norm(x) # B L C
+ x = self.patch_unembed(x, x_size)
+
+ return x
+
+ def forward(self, x):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, C, H, W).
+
+ Returns:
+ Tensor: Forward results.
+ """
+ H, W = x.shape[2:]
+ x = self.check_image_size(x)
+
+ self.mean = self.mean.type_as(x)
+ x = (x - self.mean) * self.img_range
+
+ if self.upsampler == 'pixelshuffle':
+ # for classical SR
+ x = self.conv_first(x)
+ x = self.conv_after_body(self.forward_features(x)) + x
+ x = self.conv_before_upsample(x)
+ x = self.conv_last(self.upsample(x))
+ elif self.upsampler == 'pixelshuffledirect':
+ # for lightweight SR
+ x = self.conv_first(x)
+ x = self.conv_after_body(self.forward_features(x)) + x
+ x = self.upsample(x)
+ elif self.upsampler == 'nearest+conv':
+ # for real-world SR
+ x = self.conv_first(x)
+ x = self.conv_after_body(self.forward_features(x)) + x
+ x = self.conv_before_upsample(x)
+ x = self.lrelu(
+ self.conv_up1(
+ torch.nn.functional.interpolate(
+ x, scale_factor=2, mode='nearest')))
+ if self.upscale == 4:
+ x = self.lrelu(
+ self.conv_up2(
+ torch.nn.functional.interpolate(
+ x, scale_factor=2, mode='nearest')))
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
+ else:
+ # for image denoising and JPEG compression artifact reduction
+ x_first = self.conv_first(x)
+ res = self.conv_after_body(
+ self.forward_features(x_first)) + x_first
+ x = x + self.conv_last(res)
+
+ x = x / self.img_range + self.mean
+
+ return x[:, :, :H * self.upscale, :W * self.upscale]
diff --git a/mmedit/models/editors/swinir/swinir_rstb.py b/mmedit/models/editors/swinir/swinir_rstb.py
new file mode 100644
index 0000000000..48b0b56d3b
--- /dev/null
+++ b/mmedit/models/editors/swinir/swinir_rstb.py
@@ -0,0 +1,583 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint as checkpoint
+from mmengine.model.weight_init import trunc_normal_
+
+from .swinir_modules import PatchEmbed, PatchUnEmbed
+from .swinir_utils import (drop_path, to_2tuple, window_partition,
+ window_reverse)
+
+
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
+ residual blocks)."""
+
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+ self.scale_by_keep = scale_by_keep
+
+ def forward(self, x):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+
+ Returns:
+ Tensor: Forward results.
+ """
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
+
+ def extra_repr(self):
+ return f'drop_prob={round(self.drop_prob, 3):0.3f}'
+
+
+class Mlp(nn.Module):
+ """Multilayer Perceptron layer.
+
+ Args:
+ in_features (int): Number of input channels.
+ hidden_features (int | None, optional): Number of hidden layer
+ channels. Default: None
+ out_features (int | None, optional): Number of output channels.
+ Default: None
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
+ drop (float, optional): Dropout ratio of attention weight. Default: 0.0
+ """
+
+ def __init__(self,
+ in_features,
+ hidden_features=None,
+ out_features=None,
+ act_layer=nn.GELU,
+ drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+
+ Returns:
+ Tensor: Forward results.
+ """
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class WindowAttention(nn.Module):
+ r""" Window based multi-head self attention (W-MSA)
+ module with relative position bias.
+ It supports both of shifted and non-shifted window.
+ Args:
+ dim (int): Number of input channels.
+ window_size (tuple[int]): The height and width of the window.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool, optional): If True, add a learnable bias to
+ query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale
+ of head_dim ** -0.5 if set
+ attn_drop (float, optional): Dropout ratio of attention weight.
+ Default: 0.0
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
+ """
+
+ def __init__(self,
+ dim,
+ window_size,
+ num_heads,
+ qkv_bias=True,
+ qk_scale=None,
+ attn_drop=0.,
+ proj_drop=0.):
+
+ super().__init__()
+ self.dim = dim
+ self.window_size = window_size # Wh, Ww
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = qk_scale or head_dim**-0.5
+
+ # define a parameter table of relative position bias
+ # 2*Wh-1 * 2*Ww-1, nH
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
+ num_heads))
+
+ # get pair-wise relative position index
+ # for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = \
+ coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ # Wh*Ww, Wh*Ww, 2
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
+ # shift to start from 0
+ relative_coords[:, :, 0] += self.window_size[0] - 1
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ self.register_buffer('relative_position_index',
+ relative_position_index)
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ trunc_normal_(self.relative_position_bias_table, std=.02)
+ self.softmax = nn.Softmax(dim=-1)
+
+ def forward(self, x, mask=None):
+ """
+ Args:
+ x: input features with shape of (num_windows*B, N, C)
+ mask: (0/-inf) mask with shape of
+ (num_windows, Wh*Ww, Wh*Ww) or None
+ """
+ B_, N, C = x.shape
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,
+ C // self.num_heads).permute(2, 0, 3, 1, 4)
+ # make torchscript happy (cannot use tensor as tuple)
+ q, k, v = qkv[0], qkv[1], qkv[2]
+
+ q = q * self.scale
+ attn = (q @ k.transpose(-2, -1))
+
+ relative_position_bias = self.relative_position_bias_table[
+ self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1],
+ self.window_size[0] * self.window_size[1],
+ -1) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(
+ 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0)
+
+ if mask is not None:
+ nW = mask.shape[0]
+ attn = attn.view(B_ // nW, nW, self.num_heads, N,
+ N) + mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.view(-1, self.num_heads, N, N)
+ attn = self.softmax(attn)
+ else:
+ attn = self.softmax(attn)
+
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+ def extra_repr(self) -> str:
+ return f'dim={self.dim}, window_size={self.window_size}, ' \
+ f'num_heads={self.num_heads}'
+
+
+class SwinTransformerBlock(nn.Module):
+ r""" Swin Transformer Block.
+ Args:
+ dim (int): Number of input channels.
+ input_resolution (tuple[int]): Input resolution.
+ num_heads (int): Number of attention heads.
+ window_size (int): Window size.
+ shift_size (int): Shift size for SW-MSA.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias
+ to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk
+ scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
+ norm_layer (nn.Module, optional): Normalization layer.
+ Default: nn.LayerNorm
+ """
+
+ def __init__(self,
+ dim,
+ input_resolution,
+ num_heads,
+ window_size=7,
+ shift_size=0,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ act_layer=nn.GELU,
+ norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.input_resolution = input_resolution
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.shift_size = shift_size
+ self.mlp_ratio = mlp_ratio
+ if min(self.input_resolution) <= self.window_size:
+ # if window size is larger than input resolution,
+ # we don't partition windows
+ self.shift_size = 0
+ self.window_size = min(self.input_resolution)
+ assert 0 <= self.shift_size < self.window_size, \
+ 'shift_size must in 0-window_size'
+
+ self.norm1 = norm_layer(dim)
+ self.attn = WindowAttention(
+ dim,
+ window_size=to_2tuple(self.window_size),
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ attn_drop=attn_drop,
+ proj_drop=drop)
+
+ self.drop_path = DropPath(
+ drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(
+ in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ act_layer=act_layer,
+ drop=drop)
+
+ if self.shift_size > 0:
+ attn_mask = self.calculate_mask(self.input_resolution)
+ else:
+ attn_mask = None
+
+ self.register_buffer('attn_mask', attn_mask)
+
+ def calculate_mask(self, x_size):
+ # calculate attention mask for SW-MSA
+ """Calculate attention mask for SW-MSA.
+
+ Args:
+ x_size (tuple[int]): Resolution of input feature.
+
+ Returns:
+ Tensor: Attention mask
+ """
+ H, W = x_size
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
+ h_slices = (slice(0, -self.window_size),
+ slice(-self.window_size,
+ -self.shift_size), slice(-self.shift_size, None))
+ w_slices = (slice(0, -self.window_size),
+ slice(-self.window_size,
+ -self.shift_size), slice(-self.shift_size, None))
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+
+ mask_windows = window_partition(
+ img_mask, self.window_size) # nW, window_size, window_size, 1
+ mask_windows = mask_windows.view(-1,
+ self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0,
+ float(-100.0)).masked_fill(
+ attn_mask == 0, float(0.0))
+
+ return attn_mask
+
+ def forward(self, x, x_size):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+ x_size (tuple[int]): Resolution of input feature.
+
+ Returns:
+ Tensor: Forward results.
+ """
+ H, W = x_size
+ B, L, C = x.shape
+ # assert L == H * W, "input feature has wrong size"
+
+ shortcut = x
+ x = self.norm1(x)
+ x = x.view(B, H, W, C)
+
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_x = torch.roll(
+ x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ else:
+ shifted_x = x
+
+ # partition windows
+ x_windows = window_partition(
+ shifted_x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size,
+ C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA (to be compatible for testing on images
+ # whose shapes are the multiple of window size
+ if self.input_resolution == x_size:
+ attn_windows = self.attn(
+ x_windows,
+ mask=self.attn_mask) # nW*B, window_size*window_size, C
+ else:
+ attn_windows = self.attn(
+ x_windows, mask=self.calculate_mask(x_size).to(x.device))
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size,
+ self.window_size, C)
+ shifted_x = window_reverse(attn_windows, self.window_size, H,
+ W) # B H' W' C
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ x = torch.roll(
+ shifted_x,
+ shifts=(self.shift_size, self.shift_size),
+ dims=(1, 2))
+ else:
+ x = shifted_x
+ x = x.view(B, H * W, C)
+
+ # FFN
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ return x
+
+ def extra_repr(self) -> str:
+ return f'dim={self.dim}, ' \
+ f'input_resolution={self.input_resolution}, ' \
+ f'num_heads={self.num_heads}, ' \
+ f'window_size={self.window_size}, ' \
+ f'shift_size={self.shift_size}, ' \
+ f'mlp_ratio={self.mlp_ratio}'
+
+
+class BasicLayer(nn.Module):
+ """A basic Swin Transformer layer for one stage.
+
+ Args:
+ dim (int): Number of input channels.
+ input_resolution (tuple[int]): Input resolution.
+ depth (int): Number of blocks.
+ num_heads (int): Number of attention heads.
+ window_size (int): Local window size.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias
+ to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk
+ scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate.
+ Default: 0.0
+ norm_layer (nn.Module, optional): Normalization layer.
+ Default: nn.LayerNorm
+ downsample (nn.Module | None, optional): Downsample layer at the
+ end of the layer. Default: None
+ use_checkpoint (bool): Whether to use checkpointing to save memory.
+ Default: False.
+ """
+
+ def __init__(self,
+ dim,
+ input_resolution,
+ depth,
+ num_heads,
+ window_size,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ norm_layer=nn.LayerNorm,
+ downsample=None,
+ use_checkpoint=False):
+
+ super().__init__()
+ self.dim = dim
+ self.input_resolution = input_resolution
+ self.depth = depth
+ self.use_checkpoint = use_checkpoint
+
+ # build blocks
+ self.blocks = nn.ModuleList([
+ SwinTransformerBlock(
+ dim=dim,
+ input_resolution=input_resolution,
+ num_heads=num_heads,
+ window_size=window_size,
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path[i]
+ if isinstance(drop_path, list) else drop_path,
+ norm_layer=norm_layer) for i in range(depth)
+ ])
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(
+ input_resolution, dim=dim, norm_layer=norm_layer)
+ else:
+ self.downsample = None
+
+ def forward(self, x, x_size):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+ x_size (tuple[int]): Resolution of input feature.
+
+ Returns:
+ Tensor: Forward results.
+ """
+ for blk in self.blocks:
+ if self.use_checkpoint:
+ x = checkpoint.checkpoint(blk, x, x_size)
+ else:
+ x = blk(x, x_size)
+ if self.downsample is not None:
+ x = self.downsample(x)
+ return x
+
+ def extra_repr(self) -> str:
+ return f'dim={self.dim}, ' \
+ f'input_resolution={self.input_resolution}, ' \
+ f'depth={self.depth}'
+
+
+class RSTB(nn.Module):
+ """Residual Swin Transformer Block (RSTB).
+
+ Args:
+ dim (int): Number of input channels.
+ input_resolution (tuple[int]): Input resolution.
+ depth (int): Number of blocks.
+ num_heads (int): Number of attention heads.
+ window_size (int): Local window size.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ Default: 4.0
+ qkv_bias (bool, optional): If True, add a learnable bias to
+ query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale
+ of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate.
+ Default: 0.0
+ norm_layer (nn.Module, optional): Normalization layer.
+ Default: nn.LayerNorm
+ downsample (nn.Module | None, optional): Downsample layer at the
+ end of the layer. Default: None
+ use_checkpoint (bool): Whether to use checkpointing to save memory.
+ Default: False.
+ img_size (int): Input image size. Default: 224
+ patch_size (int): Patch size. Default: 4
+ resi_connection (string): The convolutional block before
+ residual connection. Default: '1conv'
+ """
+
+ def __init__(self,
+ dim,
+ input_resolution,
+ depth,
+ num_heads,
+ window_size,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ norm_layer=nn.LayerNorm,
+ downsample=None,
+ use_checkpoint=False,
+ img_size=224,
+ patch_size=4,
+ resi_connection='1conv'):
+ super(RSTB, self).__init__()
+
+ self.dim = dim
+ self.input_resolution = input_resolution
+
+ self.residual_group = BasicLayer(
+ dim=dim,
+ input_resolution=input_resolution,
+ depth=depth,
+ num_heads=num_heads,
+ window_size=window_size,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path,
+ norm_layer=norm_layer,
+ downsample=downsample,
+ use_checkpoint=use_checkpoint)
+
+ if resi_connection == '1conv':
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
+ elif resi_connection == '3conv':
+ # to save parameters and memory
+ self.conv = nn.Sequential(
+ nn.Conv2d(dim, dim // 4, 3, 1, 1),
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
+
+ self.patch_embed = PatchEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_chans=0,
+ embed_dim=dim,
+ norm_layer=None)
+
+ self.patch_unembed = PatchUnEmbed(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_chans=0,
+ embed_dim=dim,
+ norm_layer=None)
+
+ def forward(self, x, x_size):
+ """Forward function.
+
+ Args:
+ x (Tensor): Input tensor with shape (B, L, C).
+ x_size (tuple[int]): Resolution of input feature.
+
+ Returns:
+ Tensor: Forward results.
+ """
+ return self.patch_embed(
+ self.conv(
+ self.patch_unembed(self.residual_group(x, x_size),
+ x_size))) + x
diff --git a/mmedit/models/editors/swinir/swinir_utils.py b/mmedit/models/editors/swinir/swinir_utils.py
new file mode 100644
index 0000000000..d0f6e69051
--- /dev/null
+++ b/mmedit/models/editors/swinir/swinir_utils.py
@@ -0,0 +1,84 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import collections.abc
+from itertools import repeat
+
+
+# From PyTorch internals
+def _ntuple(n):
+ """A `to_tuple` function generator. It returns a function, this function
+ will repeat the input to a tuple of length ``n`` if the input is not an
+ Iterable object, otherwise, return the input directly.
+
+ Args:
+ n (int): The number of the target length.
+ """
+
+ def parse(x):
+ if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
+ return x
+ return tuple(repeat(x, n))
+
+ return parse
+
+
+to_2tuple = _ntuple(2)
+
+
+def drop_path(x,
+ drop_prob: float = 0.,
+ training: bool = False,
+ scale_by_keep: bool = True):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
+ residual blocks).
+
+ This is the same as the DropConnect impl I created for
+ EfficientNet, etc networks, however, the original name is misleading
+ as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion:
+ https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956
+ I've opted for changing the layer and argument names to 'drop path'
+ rather than mix DropConnect as a layer name and use
+ 'survival rate' as the argument.
+ """
+ if drop_prob == 0. or not training:
+ return x
+ keep_prob = 1 - drop_prob
+ # work with diff dim tensors, not just 2D ConvNets
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0 and scale_by_keep:
+ random_tensor.div_(keep_prob)
+ return x * random_tensor
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size,
+ C)
+ windows = x.permute(0, 1, 3, 2, 4,
+ 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size,
+ window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
diff --git a/model-index.yml b/model-index.yml
index bedf04aac2..f1de04f77e 100644
--- a/model-index.yml
+++ b/model-index.yml
@@ -44,6 +44,7 @@ Import:
- configs/styleganv1/metafile.yml
- configs/styleganv2/metafile.yml
- configs/styleganv3/metafile.yml
+- configs/swinir/metafile.yml
- configs/tdan/metafile.yml
- configs/tof/metafile.yml
- configs/ttsr/metafile.yml
diff --git a/tests/test_datasets/test_transforms/test_random_degradations.py b/tests/test_datasets/test_transforms/test_random_degradations.py
index 2008033322..12546ac5eb 100644
--- a/tests/test_datasets/test_transforms/test_random_degradations.py
+++ b/tests/test_datasets/test_transforms/test_random_degradations.py
@@ -9,7 +9,7 @@
def test_random_noise():
results = {}
- results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
+ results['lq'] = np.ones((8, 8, 3)).astype(np.uint8)
# Gaussian noise
model = RandomNoise(
@@ -49,17 +49,21 @@ def test_random_noise():
def test_random_jpeg_compression():
results = {}
- results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
+ results['lq'] = np.ones((8, 8, 3)).astype(np.uint8)
- model = RandomJPEGCompression(params=dict(quality=[5, 50]), keys=['lq'])
+ model = RandomJPEGCompression(
+ params=dict(quality=[5, 50], color_type='color'), keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
- params = dict(quality=[5, 50], prob=0)
+ params = dict(quality=[5, 50], color_type='color', prob=0)
model = RandomJPEGCompression(params=params, keys=['lq'])
assert model(results) == results
+ model = RandomJPEGCompression(params=params, keys=['lq'], bgr2rgb=True)
+ assert model(results)['lq'].shape == results['lq'].shape
+
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
@@ -334,7 +338,7 @@ def test_random_blur():
def test_degradations_with_shuffle():
results = {}
- results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
+ results['lq'] = np.ones((8, 8, 3)).astype(np.uint8)
# shuffle all
model = DegradationsWithShuffle(
@@ -360,10 +364,10 @@ def test_degradations_with_shuffle():
[
dict(
type='RandomJPEGCompression',
- params=dict(quality=[5, 10])),
+ params=dict(quality=[5, 10], color_type='color')),
dict(
type='RandomJPEGCompression',
- params=dict(quality=[15, 20]))
+ params=dict(quality=[15, 20], color_type='color'))
]
],
keys=['lq'],
@@ -391,8 +395,12 @@ def test_degradations_with_shuffle():
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
- dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])),
- dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[5, 10], color_type='color')),
+ dict(
+ type='RandomJPEGCompression',
+ params=dict(quality=[15, 20], color_type='color'))
]
]
model = DegradationsWithShuffle(
diff --git a/tests/test_models/test_editors/test_swinir/test_swinir_modules.py b/tests/test_models/test_editors/test_swinir/test_swinir_modules.py
new file mode 100644
index 0000000000..3f02556236
--- /dev/null
+++ b/tests/test_models/test_editors/test_swinir/test_swinir_modules.py
@@ -0,0 +1,84 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import platform
+
+import pytest
+import torch
+
+from mmedit.models.editors.swinir.swinir_modules import (PatchEmbed,
+ PatchUnEmbed,
+ Upsample,
+ UpsampleOneStep)
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_patchEmbed():
+
+ net = PatchEmbed(
+ img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None)
+
+ img = torch.randn(1, 3, 4, 4)
+ output = net(img)
+ assert output.shape == (1, 16, 3)
+
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda())
+ assert output.shape == (1, 16, 3)
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_patchUnEmbed():
+
+ net = PatchUnEmbed(
+ img_size=16, patch_size=4, in_chans=3, embed_dim=3, norm_layer=None)
+
+ img = torch.randn(1, 64, 3)
+ output = net(img, (8, 8))
+ assert output.shape == (1, 3, 8, 8)
+
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda(), (8, 8))
+ assert output.shape == (1, 3, 8, 8)
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_upsample():
+
+ net = Upsample(scale=2, num_feat=3)
+
+ img = torch.randn(1, 3, 8, 8)
+ output = net(img)
+ assert output.shape == (1, 3, 16, 16)
+
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda())
+ assert output.shape == (1, 3, 16, 16)
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_upsampleOneStep():
+
+ net = UpsampleOneStep(
+ scale=2,
+ num_feat=3,
+ num_out_ch=4,
+ )
+
+ img = torch.randn(1, 3, 8, 8)
+ output = net(img)
+ assert output.shape == (1, 4, 16, 16)
+
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda())
+ assert output.shape == (1, 4, 16, 16)
diff --git a/tests/test_models/test_editors/test_swinir/test_swinir_net.py b/tests/test_models/test_editors/test_swinir/test_swinir_net.py
new file mode 100644
index 0000000000..b2944baa89
--- /dev/null
+++ b/tests/test_models/test_editors/test_swinir/test_swinir_net.py
@@ -0,0 +1,108 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import platform
+
+import pytest
+import torch
+
+from mmedit.models.editors import SwinIRNet
+
+
+def test_swinir_cpu():
+ """Test SwinIRNet."""
+
+ # x2 model classical SR
+ net = SwinIRNet(
+ upscale=2,
+ in_channels=3,
+ img_size=48,
+ window_size=8,
+ img_range=1.0,
+ depths=[6],
+ embed_dim=60,
+ num_heads=[6],
+ mlp_ratio=2,
+ upsampler='pixelshuffledirect',
+ resi_connection='3conv')
+ img = torch.rand(1, 3, 16, 16)
+ output = net(img)
+ assert isinstance(output, torch.Tensor)
+ assert output.shape == (1, 3, 32, 32)
+
+ net = SwinIRNet(
+ upscale=1,
+ in_channels=3,
+ img_size=48,
+ window_size=8,
+ img_range=1.0,
+ depths=[6],
+ embed_dim=60,
+ num_heads=[6],
+ mlp_ratio=2,
+ upsampler='',
+ resi_connection='1conv')
+ img = torch.rand(1, 3, 16, 16)
+ output = net(img)
+ assert isinstance(output, torch.Tensor)
+ assert output.shape == (1, 3, 16, 16)
+
+ # x3 model classical SR, initialization and forward (cpu)
+ net = SwinIRNet(
+ upscale=3,
+ in_channels=3,
+ img_size=16,
+ window_size=8,
+ img_range=1.0,
+ depths=[2],
+ embed_dim=8,
+ num_heads=[2],
+ mlp_ratio=2,
+ upsampler='pixelshuffle',
+ resi_connection='1conv')
+ img = torch.rand(1, 3, 16, 16)
+ output = net(img)
+ assert isinstance(output, torch.Tensor)
+ assert output.shape == (1, 3, 48, 48)
+
+ # x4 model lightweight SR, initialization and forward (cpu)
+ net = SwinIRNet(
+ upscale=4,
+ in_channels=3,
+ img_size=16,
+ window_size=8,
+ img_range=1.0,
+ depths=[2],
+ embed_dim=8,
+ num_heads=[2],
+ mlp_ratio=2,
+ ape=True,
+ upsampler='nearest+conv',
+ resi_connection='1conv')
+ output = net(img)
+ assert isinstance(output, torch.Tensor)
+ assert output.shape == (1, 3, 64, 64)
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_swinir_cuda():
+ net = SwinIRNet(
+ upscale=4,
+ in_channels=3,
+ img_size=16,
+ window_size=8,
+ img_range=1.0,
+ depths=[2],
+ embed_dim=8,
+ num_heads=[2],
+ mlp_ratio=2,
+ upsampler='pixelshuffledirect',
+ resi_connection='1conv')
+ img = torch.rand(1, 3, 16, 16)
+
+ # x4 model lightweight SR forward (gpu)
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda())
+ assert isinstance(output, torch.Tensor)
+ assert output.shape == (1, 3, 64, 64)
diff --git a/tests/test_models/test_editors/test_swinir/test_swinir_rstb.py b/tests/test_models/test_editors/test_swinir/test_swinir_rstb.py
new file mode 100644
index 0000000000..b5f9c1db1d
--- /dev/null
+++ b/tests/test_models/test_editors/test_swinir/test_swinir_rstb.py
@@ -0,0 +1,25 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import platform
+
+import pytest
+import torch
+
+from mmedit.models.editors.swinir.swinir_rstb import RSTB
+
+
+@pytest.mark.skipif(
+ 'win' in platform.system().lower() and 'cu' in torch.__version__,
+ reason='skip on windows-cuda due to limited RAM.')
+def test_rstb():
+
+ net = RSTB(
+ dim=6, input_resolution=(8, 8), depth=6, num_heads=6, window_size=8)
+
+ img = torch.randn(1, 64, 6)
+ output = net(img, (8, 8))
+ assert output.shape == (1, 64, 6)
+
+ if torch.cuda.is_available():
+ net = net.cuda()
+ output = net(img.cuda(), (8, 8))
+ assert output.shape == (1, 64, 6)
diff --git a/tests/test_models/test_editors/test_swinir/test_swinir_utils.py b/tests/test_models/test_editors/test_swinir/test_swinir_utils.py
new file mode 100644
index 0000000000..0971960401
--- /dev/null
+++ b/tests/test_models/test_editors/test_swinir/test_swinir_utils.py
@@ -0,0 +1,26 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+from mmedit.models.editors.swinir.swinir_utils import (drop_path, to_2tuple,
+ window_partition,
+ window_reverse)
+
+
+def test_drop_path():
+ x = torch.randn(1, 3, 8, 8)
+ x = drop_path(x)
+ assert x.shape == (1, 3, 8, 8)
+
+
+def test_to_2tuple():
+ x = 8
+ x = to_2tuple(x)
+ assert x == (8, 8)
+
+
+def test_window():
+ x = torch.randn(1, 8, 8, 3)
+ x = window_partition(x, 4)
+ assert x.shape == (4, 4, 4, 3)
+ x = window_reverse(x, 4, 8, 8)
+ assert x.shape == (1, 8, 8, 3)