Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Support CIHP dataset #1493

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
59 changes: 59 additions & 0 deletions configs/_base_/datasets/cihp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# dataset settings
dataset_type = 'CIHPDataset'
data_root = 'data/CIHP'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (473, 473)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='Resize',
img_scale=img_scale,
keep_ratio=False,
ratio_range=(0.75, 1.25)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomRotate', prob=0.6, degree=30),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
_base_ = [
'../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cihp.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
decode_head=dict(num_classes=20), auxiliary_head=dict(num_classes=20))
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
_base_ = [
'../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cihp.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
decode_head=dict(num_classes=20), auxiliary_head=dict(num_classes=20))
2 changes: 2 additions & 0 deletions configs/pspnet/pspnet_r101-d8_473x473_160k_CIHP.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
_base_ = './pspnet_r50-d8_473x473_160k_CIHP.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
2 changes: 2 additions & 0 deletions configs/pspnet/pspnet_r101-d8_473x473_80k_CIHP.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
_base_ = './pspnet_r50-d8_473x473_80k_CIHP.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
6 changes: 6 additions & 0 deletions configs/pspnet/pspnet_r50-d8_473x473_160k_CIHP.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
_base_ = [
'../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cihp.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
decode_head=dict(num_classes=20), auxiliary_head=dict(num_classes=20))
6 changes: 6 additions & 0 deletions configs/pspnet/pspnet_r50-d8_473x473_80k_CIHP.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
_base_ = [
'../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cihp.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
decode_head=dict(num_classes=20), auxiliary_head=dict(num_classes=20))
25 changes: 25 additions & 0 deletions docs/en/dataset_prepare.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,13 @@ mmsegmentation
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ ├── val
│ ├── CIHP
│ │ ├── annotations
│ │ │ ├── training
│ │ │ ├── validation
│ │ ├── images
│ │ │ ├── training
│ │ │ ├── validation
```

### Cityscapes
Expand Down Expand Up @@ -375,3 +382,21 @@ python tools/convert_datasets/isaid.py /path/to/iSAID
```

In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation.

### CIHP

The data images and annotations could be download from [CIHP](https://lip.sysuhcp.com/overview.php) (train/val/test)

CIHP is tasked for Multiple human parsing. There are 19 semantic classes and 1 background class. The dataset is divided into 28K/5K/5K images for training, validation and testing.

You may need to rename the training set and validation set (images and annotations) and follow the following structure for dataset preparation after downloading CIHP dataset.

```
│ ├── CIHP
│ │ ├── annotations
│ │ │ ├── training
│ │ │ ├── validation
│ │ ├── images
│ │ │ ├── training
│ │ │ ├── validation
```
25 changes: 25 additions & 0 deletions docs/zh_cn/dataset_prepare.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,13 @@ mmsegmentation
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ ├── val
│ ├── CIHP
│ │ ├── annotations
│ │ │ ├── training
│ │ │ ├── validation
│ │ ├── images
│ │ │ ├── training
│ │ │ ├── validation
```

### Cityscapes
Expand Down Expand Up @@ -316,3 +323,21 @@ python tools/convert_datasets/isaid.py /path/to/iSAID
```

使用我们默认的配置 (`patch_width`=896, `patch_height`=896, `overlap_area`=384), 将生成 33978 张图片的训练集和 11644 张图片的验证集。

### CIHP

CIHP 数据集(训练集/验证集/测试集)的图像和注释可以从 [CIHP](https://lip.sysuhcp.com/overview.php) 下载.

该数据集是一个多人人体解析数据集,总共19个语义类和一个背景类别。数据集被划分成28K/5K/5K的图像进行训练/验证/测试。

下载后,在使用数据集进行训练前,您需要将数据集文件夹中的训练集和验证集(图像和注释)进行重新命名调整成如下格式.

```
│ ├── CIHP
│ │ ├── annotations
│ │ │ ├── training
│ │ │ ├── validation
│ │ ├── images
│ │ │ ├── training
│ │ │ ├── validation
```
22 changes: 21 additions & 1 deletion mmseg/core/evaluation/class_names.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,16 @@ def stare_classes():
return ['background', 'vessel']


def cihp_classes():
"""CIHP class names for external use."""
return [
'background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'torsoSkin', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe'
]


def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
Expand Down Expand Up @@ -265,6 +275,15 @@ def stare_palette():
return [[120, 120, 120], [6, 230, 230]]


def cihp_palette():
"""CIHP palette for external use."""
return [[0, 0, 0], [128, 0, 0], [255, 0, 0], [0, 85, 0], [170, 0, 51],
[255, 85, 0], [0, 0, 85], [0, 119, 221], [85, 85, 0], [0, 85, 85],
[85, 51, 0], [52, 86, 128], [0, 128, 0], [0, 0, 255],
[51, 170, 221], [0, 255, 255], [85, 255, 170], [170, 255, 85],
[255, 255, 0], [255, 170, 0]]


dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
Expand All @@ -278,7 +297,8 @@ def stare_palette():
'coco_stuff164k'
],
'isaid': ['isaid', 'iSAID'],
'stare': ['stare', 'STARE']
'stare': ['stare', 'STARE'],
'cihp': ['cihp']
}


Expand Down
3 changes: 2 additions & 1 deletion mmseg/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from .ade import ADE20KDataset
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .chase_db1 import ChaseDB1Dataset
from .cihp import CIHPDataset
from .cityscapes import CityscapesDataset
from .coco_stuff import COCOStuffDataset
from .custom import CustomDataset
Expand All @@ -26,5 +27,5 @@
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset',
'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset',
'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset'
'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'CIHPDataset'
]
33 changes: 33 additions & 0 deletions mmseg/datasets/cihp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Copyright (c) OpenMMLab. All rights reserved.

from .builder import DATASETS
from .custom import CustomDataset


@DATASETS.register_module()
class CIHPDataset(CustomDataset):
"""CIHP dataset.

In segmentation map annotation for CIHP, 0 stands for background, which is
included in 20 categories. ``reduce_zero_label`` is fixed to False. The
``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
CLASSES = ('background', 'hat', 'hair', 'glove', 'sunglasses',
'upperclothes', 'dress', 'coat', 'socks', 'pants', 'torsoSkin',
'scarf', 'skirt', 'face', 'leftArm', 'rightArm', 'leftLeg',
'rightLeg', 'leftShoe', 'rightShoe')

PALETTE = [[0, 0, 0], [128, 0, 0], [255, 0, 0], [0, 85, 0], [170, 0, 51],
[255, 85, 0], [0, 0, 85], [0, 119, 221], [85, 85,
0], [0, 85, 85],
[85, 51, 0], [52, 86, 128], [0, 128, 0], [0, 0, 255],
[51, 170, 221], [0, 255, 255], [85, 255, 170], [170, 255, 85],
[255, 255, 0], [255, 170, 0]]

def __init__(self, **kwargs):
super(CIHPDataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
reduce_zero_label=False,
**kwargs)
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/data/pseudo_cihp_dataset/images/0038267.jpg
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
32 changes: 27 additions & 5 deletions tests/test_data/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@
from PIL import Image

from mmseg.core.evaluation import get_classes, get_palette
from mmseg.datasets import (DATASETS, ADE20KDataset, CityscapesDataset,
COCOStuffDataset, ConcatDataset, CustomDataset,
ISPRSDataset, LoveDADataset, MultiImageMixDataset,
PascalVOCDataset, PotsdamDataset, RepeatDataset,
build_dataset, iSAIDDataset)
from mmseg.datasets import (DATASETS, ADE20KDataset, CIHPDataset,
CityscapesDataset, COCOStuffDataset, ConcatDataset,
CustomDataset, ISPRSDataset, LoveDADataset,
MultiImageMixDataset, PascalVOCDataset,
PotsdamDataset, RepeatDataset, build_dataset,
iSAIDDataset)


def test_classes():
Expand All @@ -30,6 +31,7 @@ def test_classes():
assert list(PotsdamDataset.CLASSES) == get_classes('potsdam')
assert list(ISPRSDataset.CLASSES) == get_classes('vaihingen')
assert list(iSAIDDataset.CLASSES) == get_classes('isaid')
assert list(CIHPDataset.CLASSES) == get_classes('cihp')

with pytest.raises(ValueError):
get_classes('unsupported')
Expand Down Expand Up @@ -75,6 +77,7 @@ def test_palette():
assert PotsdamDataset.PALETTE == get_palette('potsdam')
assert COCOStuffDataset.PALETTE == get_palette('cocostuff')
assert iSAIDDataset.PALETTE == get_palette('isaid')
assert CIHPDataset.PALETTE == get_palette('cihp')

with pytest.raises(ValueError):
get_palette('unsupported')
Expand Down Expand Up @@ -753,6 +756,25 @@ def test_isaid():
assert len(isaid_info) == 1


def test_lip():
test_dataset = CIHPDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cihp_dataset/images'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cihp_dataset/annotations'))
assert len(test_dataset) == 1
cihp_info = test_dataset.load_annotations(
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cihp_dataset/images'),
img_suffix='.jpg',
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cihp_dataset/annotations'),
seg_map_suffix='.png',
split=None)
assert len(cihp_info) == 1


@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
Expand Down