Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] Fix reduce_zero_label in evaluation #2504

Merged
merged 9 commits into from
Jan 30, 2023
24 changes: 13 additions & 11 deletions mmseg/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ class CustomDataset(Dataset):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
gt_seg_map_loader_cfg (dict, optional): build LoadAnnotations to
load gt for evaluation, load from disk by default. Default: None.
gt_seg_map_loader_cfg (dict): build LoadAnnotations to load gt for
evaluation, load from disk by default. Default: ``dict()``.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
Expand All @@ -90,7 +90,7 @@ def __init__(self,
reduce_zero_label=False,
classes=None,
palette=None,
gt_seg_map_loader_cfg=None,
gt_seg_map_loader_cfg=dict(),
file_client_args=dict(backend='disk')):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
Expand All @@ -106,8 +106,7 @@ def __init__(self,
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
self.gt_seg_map_loader = LoadAnnotations(
) if gt_seg_map_loader_cfg is None else LoadAnnotations(
**gt_seg_map_loader_cfg)
reduce_zero_label=reduce_zero_label, **gt_seg_map_loader_cfg)

self.file_client_args = file_client_args
self.file_client = mmcv.FileClient.infer_client(self.file_client_args)
Expand Down Expand Up @@ -303,13 +302,16 @@ def pre_eval(self, preds, indices):
seg_map,
len(self.CLASSES),
self.ignore_index,
# as the labels has been converted when dataset initialized
# in `get_palette_for_custom_classes ` this `label_map`
# should be `dict()`, see
# as the label map has already been applied and zero label
# has already been reduced by get_gt_seg_map_by_idx() i.e.
# LoadAnnotations.__call__(), these operations should not
# be duplicated. See the following issues/PRs:
# https://github.com/open-mmlab/mmsegmentation/issues/1415
# for more ditails
# https://github.com/open-mmlab/mmsegmentation/pull/1417
# https://github.com/open-mmlab/mmsegmentation/pull/2504
# for more details
label_map=dict(),
reduce_zero_label=self.reduce_zero_label))
reduce_zero_label=False))

return pre_eval_results

Expand Down Expand Up @@ -427,7 +429,7 @@ def evaluate(self,
self.ignore_index,
metric,
label_map=dict(),
reduce_zero_label=self.reduce_zero_label)
reduce_zero_label=False)
# test a list of pre_eval_results
else:
ret_metrics = pre_eval_to_metrics(results, metric)
Expand Down
63 changes: 63 additions & 0 deletions tests/test_data/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,69 @@ def test_custom_dataset():
assert not np.isnan(eval_results['mRecall'])


def test_custom_dataset_pre_eval():
"""Test pre-eval function of custom dataset with reduce zero label and
removed classes.

The GT segmentation contain 4 classes: "A", "B", "C", "D", as well as
a zero label. Therefore, the labels go from 0 to 4.

Then, we will remove class "C" while instantiating the dataset. Therefore,
pre-eval must reduce the zero label and also apply label_map in the correct
order.
"""

# create a dummy dataset on disk
img = np.random.rand(10, 10)
ann = np.zeros_like(img)
ann[2:4, 2:4] = 1
ann[2:4, 6:8] = 2
ann[6:8, 2:4] = 3
ann[6:8, 6:8] = 4

tmp_dir = tempfile.TemporaryDirectory()
img_path = osp.join(tmp_dir.name, 'img', '00000.jpg')
ann_path = osp.join(tmp_dir.name, 'ann', '00000.png')

import mmcv
mmcv.imwrite(img, img_path)
mmcv.imwrite(ann, ann_path)

class FourClassDatasetWithZeroLabel(CustomDataset):
CLASSES = ['A', 'B', 'C', 'D'] # 4 classes
PALETTE = [(0, 0, 0)] * 4 # dummy palette

# with img_dir, ann_dir, split
dataset = FourClassDatasetWithZeroLabel(
[],
classes=['A', 'B', 'D'], # original classes with class "C" removed
reduce_zero_label=True, # reduce zero label set to True
data_root=osp.join(osp.dirname(__file__), tmp_dir.name),
img_dir='img/',
ann_dir='ann/',
img_suffix='.jpg',
seg_map_suffix='.png')
assert len(dataset) == 1

# there are three classes ("A", "B", "D") that the network predicts
perfect_pred = np.zeros([10, 10], dtype=np.int64)
perfect_pred[2:4, 2:4] = 0 # 'A': 1 reduced to 0 that maps to 0
perfect_pred[2:4, 6:8] = 1 # 'B': 2 reduced to 1 that maps to 1
perfect_pred[6:8, 2:4] = 0 # 'C': 3 reduced to 2 that maps to -1, ignored
perfect_pred[6:8, 6:8] = 2 # 'D': 4 reduced to 3 that maps to 2

results = dataset.pre_eval([perfect_pred], [0])
from mmseg.core.evaluation.metrics import pre_eval_to_metrics
eval_results = pre_eval_to_metrics(results, ['mIoU', 'mDice', 'mFscore'])

# the results should be perfect
for metric in 'IoU', 'aAcc', 'Acc', 'Dice', 'Fscore', 'Precision', \
'Recall':
assert (eval_results[metric] == 1.0).all()

tmp_dir.cleanup()


@pytest.mark.parametrize('separate_eval', [True, False])
def test_eval_concat_custom_dataset(separate_eval):
img_norm_cfg = dict(
Expand Down