This repository has been archived by the owner on Jun 15, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 23
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
add AP calculation for bboxes / modify mask visualization
- Loading branch information
1 parent
0d5d992
commit 050a0d9
Showing
7 changed files
with
441 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,3 @@ | ||
__pycache* | ||
results | ||
result | ||
*.png |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,118 @@ | ||
import copy | ||
import numpy as np | ||
|
||
from chainer import reporter | ||
import chainer.training.extensions | ||
|
||
from utils import eval_detection_coco | ||
from chainercv.utils import apply_prediction_to_iterator | ||
|
||
|
||
class DetectionCOCOEvaluator(chainer.training.extensions.Evaluator): | ||
|
||
"""An extension that evaluates a detection model by PASCAL VOC metric. | ||
This extension iterates over an iterator and evaluates the prediction | ||
results by average precisions (APs) and mean of them | ||
(mean Average Precision, mAP). | ||
This extension reports the following values with keys. | ||
Please note that :obj:`'ap/<label_names[l]>'` is reported only if | ||
:obj:`label_names` is specified. | ||
* :obj:`'map'`: Mean of average precisions (mAP). | ||
* :obj:`'ap/<label_names[l]>'`: Average precision for class \ | ||
:obj:`label_names[l]`, where :math:`l` is the index of the class. \ | ||
For example, this evaluator reports :obj:`'ap/aeroplane'`, \ | ||
:obj:`'ap/bicycle'`, etc. if :obj:`label_names` is \ | ||
:obj:`~chainercv.datasets.voc_bbox_label_names`. \ | ||
If there is no bounding box assigned to class :obj:`label_names[l]` \ | ||
in either ground truth or prediction, it reports :obj:`numpy.nan` as \ | ||
its average precision. \ | ||
In this case, mAP is computed without this class. | ||
Args: | ||
iterator (chainer.Iterator): An iterator. Each sample should be | ||
following tuple :obj:`img, bbox, label` or | ||
:obj:`img, bbox, label, difficult`. | ||
:obj:`img` is an image, :obj:`bbox` is coordinates of bounding | ||
boxes, :obj:`label` is labels of the bounding boxes and | ||
:obj:`difficult` is whether the bounding boxes are difficult or | ||
not. If :obj:`difficult` is returned, difficult ground truth | ||
will be ignored from evaluation. | ||
target (chainer.Link): A detection link. This link must have | ||
:meth:`predict` method that takes a list of images and returns | ||
:obj:`bboxes`, :obj:`labels` and :obj:`scores`. | ||
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric | ||
for calculating average precision. The default value is | ||
:obj:`False`. | ||
label_names (iterable of strings): An iterable of names of classes. | ||
If this value is specified, average precision for each class is | ||
also reported with the key :obj:`'ap/<label_names[l]>'`. | ||
""" | ||
|
||
trigger = 1, 'epoch' | ||
default_name = 'validation' | ||
priority = chainer.training.PRIORITY_WRITER | ||
|
||
def __init__( | ||
self, iterator, target, use_07_metric=False, label_names=None): | ||
super(DetectionCOCOEvaluator, self).__init__( | ||
iterator, target) | ||
self.use_07_metric = use_07_metric | ||
self.label_names = ['background', # class zero | ||
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', | ||
'fire hydrant', 'street sign', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', | ||
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', | ||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', | ||
'plate', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', | ||
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', | ||
'mirror', 'dining table', 'window', 'desk','toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', | ||
'toaster', 'sink', 'refrigerator', 'blender', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] | ||
|
||
def evaluate(self): | ||
iterator = self._iterators['main'] | ||
target = self._targets['main'] | ||
|
||
if hasattr(iterator, 'reset'): | ||
iterator.reset() | ||
it = iterator | ||
else: | ||
it = copy.copy(iterator) | ||
|
||
in_values, out_values, rest_values = apply_prediction_to_iterator( | ||
target.predict, it) | ||
# delete unused iterators explicitly | ||
del in_values | ||
|
||
pred_bboxes, _, pred_labels, pred_scores, _ = out_values | ||
|
||
if len(rest_values) == 3: | ||
gt_bboxes, gt_labels, gt_difficults = rest_values | ||
elif len(rest_values) == 2: | ||
gt_bboxes, gt_labels = rest_values | ||
gt_difficults = None | ||
elif len(rest_values) == 4: | ||
gt_bboxes, gt_labels, _, _ = rest_values | ||
gt_difficults = None | ||
|
||
result = eval_detection_coco.eval_detection_coco( | ||
pred_bboxes, pred_labels, pred_scores, | ||
gt_bboxes, gt_labels, gt_difficults, | ||
use_07_metric=self.use_07_metric) | ||
|
||
report = {'map': result['map']} | ||
|
||
if self.label_names is not None: | ||
for l, label_name in enumerate(self.label_names): | ||
try: | ||
report['ap/{:s}'.format(label_name)] = result['ap'][l] | ||
except IndexError: | ||
report['ap/{:s}'.format(label_name)] = np.nan | ||
if True: | ||
print(report) | ||
|
||
observation = {} | ||
with reporter.report_scope(observation): | ||
reporter.report(report, target) | ||
return observation |
Oops, something went wrong.