Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

add arg --dataset in examples demo #877

Merged
merged 12 commits into from
May 29, 2019
4 changes: 2 additions & 2 deletions examples/deeplab/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ So we evaluated the pretrained graph using `eval_semantic_segmentation` in Chain
Although we could generate a graph the biggest image can be input, it resulted 40.13% in official evaluation code.

## Demo
This demo downloads Cityscapes pretrained model automatically if a pretrained model path is not given.
This demo downloads a pretrained model automatically if a pretrained model path is not given.
```
$ python demo.py [--gpu <gpu>] [--pretrained-model <model_path>] [--input-size <size>] <image>.jpg
$ python demo.py [--dataset cityscapes|ade20k|voc] [--gpu <gpu>] [--pretrained-model <model_path>] [--min-input-size <size>] <image>.jpg
```


Expand Down
28 changes: 25 additions & 3 deletions examples/deeplab/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@

import chainer

from chainercv.datasets import ade20k_semantic_segmentation_label_colors
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import cityscapes_semantic_segmentation_label_colors
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import voc_semantic_segmentation_label_colors
from chainercv.datasets import voc_semantic_segmentation_label_names
from chainercv.links import DeepLabV3plusXception65
Expand All @@ -15,11 +19,30 @@
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='cityscapes')
parser.add_argument('--pretrained-model')
parser.add_argument('--min-input-size', type=int, default=None)
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'voc'),
default='cityscapes')
parser.add_argument('image')
args = parser.parse_args()

if args.dataset == 'cityscapes':
if args.pretrained_model is None:
args.pretrained_model = 'cityscapes'
label_names = cityscapes_semantic_segmentation_label_names
colors = cityscapes_semantic_segmentation_label_colors
elif args.dataset == 'ade20k':
if args.pretrained_model is None:
args.pretrained_model = 'ade20k'
label_names = ade20k_semantic_segmentation_label_names
colors = ade20k_semantic_segmentation_label_colors
elif args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc'
label_names = voc_semantic_segmentation_label_names
colors = voc_semantic_segmentation_label_colors

model = DeepLabV3plusXception65(
pretrained_model=args.pretrained_model,
min_input_size=args.min_input_size)
Expand All @@ -38,8 +61,7 @@ def main():
ax2 = fig.add_subplot(1, 2, 2)
# Do not overlay the label image on the color image
vis_semantic_segmentation(
None, label, voc_semantic_segmentation_label_names,
voc_semantic_segmentation_label_colors, ax=ax2)
None, label, label_names, colors, ax=ax2)
plt.show()


Expand Down
13 changes: 10 additions & 3 deletions examples/faster_rcnn/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,19 @@
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='voc07')
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()

if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc07'
label_names = voc_bbox_label_names

model = FasterRCNNVGG16(
n_fg_class=len(voc_bbox_label_names),
n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)

if args.gpu >= 0:
Expand All @@ -29,7 +36,7 @@ def main():
bbox, label, score = bboxes[0], labels[0], scores[0]

vis_bbox(
img, bbox, label, score, label_names=voc_bbox_label_names)
img, bbox, label, score, label_names=label_names)
plt.show()


Expand Down
2 changes: 1 addition & 1 deletion examples/fcis/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
Segment objects in an given image. This demo downloads SBD pretrained model automatically if a pretrained model path is not given.

```bash
python demo.py [--gpu <gpu>] [--pretrained-model <model_path>] [--dataset <sbd, coco>] <image.jpg>
python demo.py [--dataset sbd|coco] [--gpu <gpu>] [--pretrained-model <model_path>] <image.jpg>
```

## Evaluation
Expand Down
36 changes: 20 additions & 16 deletions examples/fpn/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,30 +22,35 @@ def main():
'mask_rcnn_fpn_resnet50', 'mask_rcnn_fpn_resnet101'),
default='faster_rcnn_fpn_resnet50')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='coco')
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('coco',), default='coco')
parser.add_argument('image')
args = parser.parse_args()

if args.model == 'faster_rcnn_fpn_resnet50':
mode = 'bbox'
model = FasterRCNNFPNResNet50(
n_fg_class=len(coco_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = FasterRCNNFPNResNet50
elif args.model == 'faster_rcnn_fpn_resnet101':
mode = 'bbox'
model = FasterRCNNFPNResNet101(
n_fg_class=len(coco_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = FasterRCNNFPNResNet101
elif args.model == 'mask_rcnn_fpn_resnet50':
mode = 'instance_segmentation'
model = MaskRCNNFPNResNet50(
n_fg_class=len(coco_instance_segmentation_label_names),
pretrained_model=args.pretrained_model)
cls = MaskRCNNFPNResNet50
elif args.model == 'mask_rcnn_fpn_resnet101':
mode = 'instance_segmentation'
model = MaskRCNNFPNResNet101(
n_fg_class=len(coco_instance_segmentation_label_names),
pretrained_model=args.pretrained_model)
cls = MaskRCNNFPNResNet101

if args.dataset == 'coco':
if args.pretrained_model is None:
args.pretrained_model = 'coco'
if mode == 'bbox':
label_names = coco_bbox_label_names
elif mode == 'instance_segmentation':
label_names = coco_instance_segmentation_label_names

model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Expand All @@ -60,15 +65,14 @@ def main():
score = scores[0]

vis_bbox(
img, bbox, label, score, label_names=coco_bbox_label_names)
img, bbox, label, score, label_names=label_names)
elif mode == 'instance_segmentation':
masks, labels, scores = model.predict([img])
mask = masks[0]
label = labels[0]
score = scores[0]
vis_instance_segmentation(
img, mask, label, score,
label_names=coco_instance_segmentation_label_names)
img, mask, label, score, label_names=label_names)
plt.show()


Expand Down
2 changes: 1 addition & 1 deletion examples/pspnet/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
## Demo
This demo downloads a pretrained model automatically if a pretrained model path is not given.
```
$ python demo.py [--gpu <gpu>] [--pretrained-model <model_path>] [--input-size <size>] <image>.jpg
$ python demo.py [--dataset cityscapes|ade20k] [--gpu <gpu>] [--pretrained-model <model_path>] [--input-size <size>] <image>.jpg
```

## Weight Covnersion
Expand Down
21 changes: 17 additions & 4 deletions examples/pspnet/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import matplotlib.pyplot as plt

import chainer
from chainercv.datasets import ade20k_semantic_segmentation_label_colors
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import cityscapes_semantic_segmentation_label_colors
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.experimental.links import PSPNetResNet101
Expand All @@ -16,15 +18,26 @@ def main():
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--input-size', type=int, default=713)
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k'), default='cityscapes')
parser.add_argument('image')
args = parser.parse_args()

label_names = cityscapes_semantic_segmentation_label_names
colors = cityscapes_semantic_segmentation_label_colors
n_class = len(label_names)
if args.dataset == 'cityscapes':
if args.pretrained_model is None:
args.pretrained_model = 'cityscapes'
label_names = cityscapes_semantic_segmentation_label_names
colors = cityscapes_semantic_segmentation_label_colors
elif args.dataset == 'ade20k':
if args.pretrained_model is None:
args.pretrained_model = 'ade20k'
label_names = ade20k_semantic_segmentation_label_names
colors = ade20k_semantic_segmentation_label_colors

input_size = (args.input_size, args.input_size)
model = PSPNetResNet101(n_class, args.pretrained_model, input_size)
model = PSPNetResNet101(
n_class=len(label_names),
pretrained_model=args.pretrained_model, input_size=input_size)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Expand Down
14 changes: 10 additions & 4 deletions examples/segnet/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,19 @@ def main():

parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='camvid')
parser.add_argument('--pretrained-model')
parser.add_argument('--dataset', choices=('camvid',), default='camvid')
parser.add_argument('image')
args = parser.parse_args()

if args.dataset == 'camvid':
if args.pretrained_model is None:
args.pretrained_model = 'camvid'
label_names = camvid_label_names
colors = camvid_label_colors

model = SegNetBasic(
n_class=len(camvid_label_names),
n_class=len(label_names),
pretrained_model=args.pretrained_model)

if args.gpu >= 0:
Expand All @@ -37,8 +44,7 @@ def main():
vis_image(img, ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
# Do not overlay the label image on the color image
vis_semantic_segmentation(
None, label, camvid_label_names, camvid_label_colors, ax=ax2)
vis_semantic_segmentation(None, label, label_names, colors, ax=ax2)
plt.show()


Expand Down
22 changes: 14 additions & 8 deletions examples/ssd/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,24 @@ def main():
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='voc0712')
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()

if args.model == 'ssd300':
model = SSD300(
n_fg_class=len(voc_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = SSD300
elif args.model == 'ssd512':
model = SSD512(
n_fg_class=len(voc_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = SSD512

if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc0712'
label_names = voc_bbox_label_names

model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Expand All @@ -37,7 +43,7 @@ def main():
bbox, label, score = bboxes[0], labels[0], scores[0]

vis_bbox(
img, bbox, label, score, label_names=voc_bbox_label_names)
img, bbox, label, score, label_names=label_names)
plt.show()


Expand Down
26 changes: 15 additions & 11 deletions examples/yolo/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,26 @@ def main():
'--model', choices=('yolo_v2', 'yolo_v2_tiny', 'yolo_v3'),
default='yolo_v2')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model', default='voc0712')
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()

if args.model == 'yolo_v2':
model = YOLOv2(
n_fg_class=len(voc_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = YOLOv2
elif args.model == 'yolo_v2_tiny':
model = YOLOv2Tiny(
n_fg_class=len(voc_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = YOLOv2Tiny
elif args.model == 'yolo_v3':
model = YOLOv3(
n_fg_class=len(voc_bbox_label_names),
pretrained_model=args.pretrained_model)
cls = YOLOv3

if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc0712'
label_names = voc_bbox_label_names

model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Expand All @@ -43,7 +47,7 @@ def main():
bbox, label, score = bboxes[0], labels[0], scores[0]

vis_bbox(
img, bbox, label, score, label_names=voc_bbox_label_names)
img, bbox, label, score, label_names=label_names)
plt.show()


Expand Down