Skip to content

Commit

Permalink
Cityscapes semantic segmentation evaluator
Browse files Browse the repository at this point in the history
Summary: semantic segmentation evaluation from Cityscapes scripts

Reviewed By: ppwwyyxx

Differential Revision: D19453083

fbshipit-source-id: bb51e743e29d5fcc54d845e0c55c6ae8baae60f7
  • Loading branch information
alexander-kirillov authored and facebook-github-bot committed May 1, 2020
1 parent 62cf3a2 commit e7aa570
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 18 deletions.
4 changes: 2 additions & 2 deletions detectron2/data/datasets/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,15 +183,15 @@ def register_all_cityscapes(root):
),
)
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes", **meta
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
)

sem_key = key.format(task="sem_seg")
DatasetCatalog.register(
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
)
MetadataCatalog.get(sem_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="sem_seg", **meta
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_sem_seg", **meta
)


Expand Down
2 changes: 1 addition & 1 deletion detectron2/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .cityscapes_evaluation import CityscapesEvaluator
from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
from .coco_evaluation import COCOEvaluator
from .rotated_coco_evaluation import RotatedCOCOEvaluator
from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
Expand Down
87 changes: 81 additions & 6 deletions detectron2/evaluation/cityscapes_evaluation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import logging
import numpy as np
import os
import tempfile
from collections import OrderedDict
Expand All @@ -16,12 +17,7 @@

class CityscapesEvaluator(DatasetEvaluator):
"""
Evaluate instance segmentation results using cityscapes API.
Note:
* It does not work in multi-machine distributed training.
* It contains a synchronization, therefore has to be used on all ranks.
* Only the main process runs evaluation.
Base class for evaluation using cityscapes API.
"""

def __init__(self, dataset_name):
Expand All @@ -47,6 +43,17 @@ def reset(self):
"Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
)


class CityscapesInstanceEvaluator(CityscapesEvaluator):
"""
Evaluate instance segmentation results using cityscapes API.
Note:
* It does not work in multi-machine distributed training.
* It contains a synchronization, therefore has to be used on all ranks.
* Only the main process runs evaluation.
"""

def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import name2label

Expand Down Expand Up @@ -110,3 +117,71 @@ def evaluate(self):
ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
self._working_dir.cleanup()
return ret


class CityscapesSemSegEvaluator(CityscapesEvaluator):
"""
Evaluate semantic segmentation results using cityscapes API.
Note:
* It does not work in multi-machine distributed training.
* It contains a synchronization, therefore has to be used on all ranks.
* Only the main process runs evaluation.
"""

def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import trainId2label

for input, output in zip(inputs, outputs):
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")

output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
pred = 255 * np.ones(output.shape, dtype=np.uint8)
for train_id, label in trainId2label.items():
if label.ignoreInEval:
continue
pred[output == train_id] = label.id
Image.fromarray(pred).save(pred_filename)

def evaluate(self):
comm.synchronize()
if comm.get_rank() > 0:
return
# Load the Cityscapes eval script *after* setting the required env var,
# since the script reads CITYSCAPES_DATASET into global variables at load time.
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval

self._logger.info("Evaluating results under {} ...".format(self._temp_dir))

# set some global states in cityscapes evaluation API, before evaluating
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
cityscapes_eval.args.predictionWalk = None
cityscapes_eval.args.JSONOutput = False
cityscapes_eval.args.colorized = False

# These lines are adopted from
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
assert len(
groundTruthImgList
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
cityscapes_eval.args.groundTruthSearch
)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
results = cityscapes_eval.evaluateImgLists(
predictionImgList, groundTruthImgList, cityscapes_eval.args
)
ret = OrderedDict()
ret["sem_seg"] = {
"IoU": 100.0 * results["averageScoreClasses"],
"iIoU": 100.0 * results["averageScoreInstClasses"],
"IoU_sup": 100.0 * results["averageScoreCategories"],
"iIoU_sup": 100.0 * results["averageScoreInstCategories"],
}
self._working_dir.cleanup()
return ret
12 changes: 9 additions & 3 deletions projects/PointRend/train_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesEvaluator,
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
DatasetEvaluators,
LVISEvaluator,
Expand Down Expand Up @@ -50,11 +51,16 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None):
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if evaluator_type == "coco":
return COCOEvaluator(dataset_name, cfg, True, output_folder)
if evaluator_type == "cityscapes":
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
Expand Down
12 changes: 9 additions & 3 deletions tools/plain_train_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesEvaluator,
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
Expand Down Expand Up @@ -82,11 +83,16 @@ def get_evaluator(cfg, dataset_name, output_folder=None):
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes":
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
Expand Down
12 changes: 9 additions & 3 deletions tools/train_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesEvaluator,
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
Expand Down Expand Up @@ -74,11 +75,16 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None):
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
elif evaluator_type == "cityscapes":
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
Expand Down

0 comments on commit e7aa570

Please sign in to comment.