Skip to content
This repository has been archived by the owner on Jun 15, 2022. It is now read-only.

Commit

Permalink
add AP calculation for bboxes / modify mask visualization
Browse files Browse the repository at this point in the history
  • Loading branch information
hirotomusiker committed Apr 10, 2018
1 parent 0d5d992 commit 050a0d9
Show file tree
Hide file tree
Showing 7 changed files with 441 additions and 7 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
__pycache*
results
result
*.png
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ $ pip install cupy


## TODOs
- [ ] Precision Evaluator
- [x] Precision Evaluator (bbox, VOC metric)
- [ ] Precision Evaluator (bbox and mask, COCO metric)
- [ ] Feature Pyramid Network
- [ ] Pose Estimation

Expand Down
4 changes: 2 additions & 2 deletions demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--modelfile')
parser.add_argument('--image', type=str)
parser.add_argument('--roi_size', '-r', type=int, default=7, help='ROI size for mask head input')
Expand Down Expand Up @@ -49,7 +49,7 @@ def main():
)
vis_bbox(
img, roi, roi, label=label, score=score, mask=mask, label_names=coco_label_names, contour=args.contour, labeldisplay=True)
#plot.show()
plot.show()
filename = "output.png"
plot.savefig(filename)

Expand Down
5 changes: 4 additions & 1 deletion train.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from mask_rcnn_resnet import MaskRCNNResNet
from coco_dataset import COCODataset
from mask_rcnn_train_chain import MaskRCNNTrainChain
from utils.detection_coco_evaluator import DetectionCOCOEvaluator
import logging
import traceback
from utils.updater import SubDivisionUpdater
Expand Down Expand Up @@ -136,7 +137,8 @@ def main():
plot_interval = 160, 'iteration'
print_interval = 40, 'iteration'

trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(args.validation, 'iteration'))
#trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(args.validation, 'iteration'))
trainer.extend(DetectionCOCOEvaluator(test_iter, model.mask_rcnn), trigger=(args.validation, 'iteration')) #COCO AP Evaluator with VOC metric
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
Expand All @@ -150,6 +152,7 @@ def main():
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'validation/main/loss',
'validation/main/map',
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=1000))
#trainer.extend(extensions.dump_graph('main/loss'))
Expand Down
118 changes: 118 additions & 0 deletions utils/detection_coco_evaluator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import copy
import numpy as np

from chainer import reporter
import chainer.training.extensions

from utils import eval_detection_coco
from chainercv.utils import apply_prediction_to_iterator


class DetectionCOCOEvaluator(chainer.training.extensions.Evaluator):

"""An extension that evaluates a detection model by PASCAL VOC metric.
This extension iterates over an iterator and evaluates the prediction
results by average precisions (APs) and mean of them
(mean Average Precision, mAP).
This extension reports the following values with keys.
Please note that :obj:`'ap/<label_names[l]>'` is reported only if
:obj:`label_names` is specified.
* :obj:`'map'`: Mean of average precisions (mAP).
* :obj:`'ap/<label_names[l]>'`: Average precision for class \
:obj:`label_names[l]`, where :math:`l` is the index of the class. \
For example, this evaluator reports :obj:`'ap/aeroplane'`, \
:obj:`'ap/bicycle'`, etc. if :obj:`label_names` is \
:obj:`~chainercv.datasets.voc_bbox_label_names`. \
If there is no bounding box assigned to class :obj:`label_names[l]` \
in either ground truth or prediction, it reports :obj:`numpy.nan` as \
its average precision. \
In this case, mAP is computed without this class.
Args:
iterator (chainer.Iterator): An iterator. Each sample should be
following tuple :obj:`img, bbox, label` or
:obj:`img, bbox, label, difficult`.
:obj:`img` is an image, :obj:`bbox` is coordinates of bounding
boxes, :obj:`label` is labels of the bounding boxes and
:obj:`difficult` is whether the bounding boxes are difficult or
not. If :obj:`difficult` is returned, difficult ground truth
will be ignored from evaluation.
target (chainer.Link): A detection link. This link must have
:meth:`predict` method that takes a list of images and returns
:obj:`bboxes`, :obj:`labels` and :obj:`scores`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
label_names (iterable of strings): An iterable of names of classes.
If this value is specified, average precision for each class is
also reported with the key :obj:`'ap/<label_names[l]>'`.
"""

trigger = 1, 'epoch'
default_name = 'validation'
priority = chainer.training.PRIORITY_WRITER

def __init__(
self, iterator, target, use_07_metric=False, label_names=None):
super(DetectionCOCOEvaluator, self).__init__(
iterator, target)
self.use_07_metric = use_07_metric
self.label_names = ['background', # class zero
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'street sign', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle',
'plate', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'mirror', 'dining table', 'window', 'desk','toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'blender', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']

def evaluate(self):
iterator = self._iterators['main']
target = self._targets['main']

if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)

in_values, out_values, rest_values = apply_prediction_to_iterator(
target.predict, it)
# delete unused iterators explicitly
del in_values

pred_bboxes, _, pred_labels, pred_scores, _ = out_values

if len(rest_values) == 3:
gt_bboxes, gt_labels, gt_difficults = rest_values
elif len(rest_values) == 2:
gt_bboxes, gt_labels = rest_values
gt_difficults = None
elif len(rest_values) == 4:
gt_bboxes, gt_labels, _, _ = rest_values
gt_difficults = None

result = eval_detection_coco.eval_detection_coco(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=self.use_07_metric)

report = {'map': result['map']}

if self.label_names is not None:
for l, label_name in enumerate(self.label_names):
try:
report['ap/{:s}'.format(label_name)] = result['ap'][l]
except IndexError:
report['ap/{:s}'.format(label_name)] = np.nan
if True:
print(report)

observation = {}
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
Loading

0 comments on commit 050a0d9

Please sign in to comment.