Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Commit

Permalink
Merge pull request #772 from Hakuyume/misc-fix
Browse files Browse the repository at this point in the history
misc fixes
  • Loading branch information
yuyu2172 committed Feb 8, 2019
2 parents 1fc9ba8 + ba2ce6b commit 9f21b47
Show file tree
Hide file tree
Showing 15 changed files with 104 additions and 72 deletions.
12 changes: 8 additions & 4 deletions chainercv/datasets/ade20k/ade20k_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# The values used here are copied from CSAILVision/sceneparsing:
# https://github.com/CSAILVision/sceneparsing

import filelock
import os

from chainer.dataset import download
Expand All @@ -9,10 +10,13 @@

def get_ade20k(root, url):
data_root = download.get_dataset_directory(root)
if os.path.exists(os.path.join(data_root, 'ADEChallengeData2016')):
return data_root
cache_path = utils.cached_download(url)
utils.extractall(cache_path, data_root, os.path.splitext(url)[1])
# To support ChainerMN, target directory should be locked
# before extracting ADE20K.
with filelock.FileLock(os.path.join(data_root, 'lock')):
if os.path.exists(os.path.join(data_root, 'ADEChallengeData2016')):
return data_root
cache_path = utils.cached_download(url)
utils.extractall(cache_path, data_root, os.path.splitext(url)[1])
return data_root


Expand Down
22 changes: 13 additions & 9 deletions chainercv/datasets/camvid/camvid_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import shutil

import filelock
import numpy as np

from chainer.dataset import download
Expand Down Expand Up @@ -49,15 +50,18 @@

def get_camvid():
data_root = download.get_dataset_directory(root)
download_file_path = utils.cached_download(url)
if len(glob.glob(os.path.join(data_root, '*'))) != 9:
utils.extractall(
download_file_path, data_root, os.path.splitext(url)[1])
data_dir = os.path.join(data_root, 'SegNet-Tutorial-master/CamVid')
if os.path.exists(data_dir):
for fn in glob.glob(os.path.join(data_dir, '*')):
shutil.move(fn, os.path.join(data_root, os.path.basename(fn)))
shutil.rmtree(os.path.dirname(data_dir))
# To support ChainerMN, target directory should be locked
# before extracting CamVid.
with filelock.FileLock(os.path.join(data_root, 'lock')):
download_file_path = utils.cached_download(url)
if len(glob.glob(os.path.join(data_root, '*'))) != 10:
utils.extractall(
download_file_path, data_root, os.path.splitext(url)[1])
data_dir = os.path.join(data_root, 'SegNet-Tutorial-master/CamVid')
if os.path.exists(data_dir):
for fn in glob.glob(os.path.join(data_dir, '*')):
shutil.move(fn, os.path.join(data_root, os.path.basename(fn)))
shutil.rmtree(os.path.dirname(data_dir))
return data_root


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def __call__(self, imgs, bboxes, labels, scales):
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
scales = cuda.to_cpu(scales)
scales = cuda.to_cpu(scales).tolist()

_, _, H, W = imgs.shape
img_size = (H, W)
Expand Down
3 changes: 1 addition & 2 deletions chainercv/utils/image/read_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,10 @@ def _read_image_pil(path, dtype, color, alpha):
img = f.convert('RGB')
else:
img = f.convert('L')
img = np.asarray(img, dtype=dtype)
img = np.array(img, dtype=dtype)
if img.shape[-1] == 4:
img = _handle_four_channel_image(
img, alpha).astype(dtype, copy=False)
img.flags.writeable = True
finally:
if hasattr(f, 'close'):
f.close()
Expand Down
3 changes: 1 addition & 2 deletions chainercv/utils/image/read_label.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ def read_label(path, dtype=np.int32):
f = Image.open(path)
try:
img = f.convert('P')
img = np.asarray(img, dtype=dtype)
img.flags.writeable = True
img = np.array(img, dtype=dtype)
finally:
if hasattr(f, 'close'):
f.close()
Expand Down
2 changes: 1 addition & 1 deletion chainercv/utils/link.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def prepare_pretrained_model(param, pretrained_model, models, default={}):
if key not in overwritable \
and not param[key] == model_param[key]:
raise ValueError(
'{} must be {:d}'.format(key, model_param[key]))
'{} must be {}'.format(key, model_param[key]))

path = download_model(model['url'])

Expand Down
27 changes: 18 additions & 9 deletions examples/classification/train_imagenet_multi.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import division
import argparse
import multiprocessing
import numpy as np

import chainer
from chainer.datasets import TransformDataset
Expand Down Expand Up @@ -122,17 +123,25 @@ def main():
if isinstance(l, Bottleneck):
l.conv3.bn.gamma.data[:] = 0

train_data = DirectoryParsingLabelDataset(args.train)
val_data = DirectoryParsingLabelDataset(args.val)
train_data = TransformDataset(
train_data, TrainTransform(extractor.mean))
val_data = TransformDataset(val_data, ValTransform(extractor.mean))
print('finished loading dataset')

if comm.rank == 0:
train_data = DirectoryParsingLabelDataset(args.train)
val_data = DirectoryParsingLabelDataset(args.val)
train_data = TransformDataset(
train_data, TrainTransform(extractor.mean))
val_data = TransformDataset(val_data, ValTransform(extractor.mean))
print('finished loading dataset')
train_indices = np.arange(len(train_data))
val_indices = np.arange(len(val_data))
else:
train_data, val_data = None, None
train_data = chainermn.scatter_dataset(train_data, comm, shuffle=True)
val_data = chainermn.scatter_dataset(val_data, comm, shuffle=True)
train_indices = None
val_indices = None

train_indices = chainermn.scatter_dataset(
train_indices, comm, shuffle=True)
val_indices = chainermn.scatter_dataset(val_indices, comm, shuffle=True)
train_data = train_data.slice[train_indices]
val_data = val_data.slice[val_indices]
train_iter = chainer.iterators.MultiprocessIterator(
train_data, args.batchsize, n_processes=args.loaderjob)
val_iter = iterators.MultiprocessIterator(
Expand Down
38 changes: 22 additions & 16 deletions examples/pspnet/train_multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,28 +219,34 @@ def main():
else:
n_iter = args.iteration

if comm.rank == 0:
if args.dataset == 'ade20k':
train = ADE20KSemanticSegmentationDataset(
data_dir=args.data_dir, split='train')
if args.dataset == 'ade20k':
train = ADE20KSemanticSegmentationDataset(
data_dir=args.data_dir, split='train')
if comm.rank == 0:
val = ADE20KSemanticSegmentationDataset(
data_dir=args.data_dir, split='val')
label_names = ade20k_semantic_segmentation_label_names
elif args.dataset == 'cityscapes':
train = CityscapesSemanticSegmentationDataset(
args.data_dir,
label_resolution='fine', split='train')
label_names = ade20k_semantic_segmentation_label_names
elif args.dataset == 'cityscapes':
train = CityscapesSemanticSegmentationDataset(
args.data_dir,
label_resolution='fine', split='train')
if comm.rank == 0:
val = CityscapesSemanticSegmentationDataset(
args.data_dir,
label_resolution='fine', split='val')
label_names = cityscapes_semantic_segmentation_label_names
train = TransformDataset(
train,
('img', 'label'),
Transform(model.mean, dataset_cfg['input_size']))
label_names = cityscapes_semantic_segmentation_label_names
train = TransformDataset(
train,
('img', 'label'),
Transform(model.mean, dataset_cfg['input_size']))

if comm.rank == 0:
indices = np.arange(len(train))
else:
train, val = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train = train.slice[indices]

train_iter = chainer.iterators.MultiprocessIterator(
train, batch_size=args.batch_size, n_processes=2)

Expand Down
7 changes: 6 additions & 1 deletion examples/semantic_segmentation/eval_semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,14 @@ def main():
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()

if args.input_size is None:
input_size = None
else:
input_size = (args.input_size, args.input_size)

dataset, label_names, model = get_dataset_and_model(
args.dataset, args.model, args.pretrained_model,
(args.input_size, args.input_size))
input_size)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,14 @@ def main():
comm = chainermn.create_communicator()
device = comm.intra_rank

if args.input_size is None:
input_size = None
else:
input_size = (args.input_size, args.input_size)

dataset, label_names, model = get_dataset_and_model(
args.dataset, args.model, args.pretrained_model,
(args.input_size, args.input_size))
input_size)
assert len(dataset) % comm.size == 0, \
"The size of the dataset should be a multiple "\
"of the number of GPUs"
Expand Down
2 changes: 1 addition & 1 deletion examples_tests/fpn_tests/test_demo.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cd examples/ssd
cd examples/fpn
curl -L https://cloud.githubusercontent.com/assets/2062128/26187667/9cb236da-3bd5-11e7-8bcf-7dbd4302e2dc.jpg \
-o sample.jpg

Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
cd examples/semantic_segmentation

sed -e 's/CamVidDataset(split='\''test'\'')/CamVidDataset(split='\''test'\'').slice[:20]/' -i eval_semantic_segmentation_multi.py
sed -e 's/CamVidDataset(split='\''test'\'')/CamVidDataset(split='\''test'\'').slice[:20]/' -i eval_semantic_segmentation.py
$MPIEXEC $PYTHON eval_semantic_segmentation_multi.py --dataset camvid --model segnet

sed -e 's/label_resolution='\''fine'\'')/label_resolution='\''fine'\'').slice[:20]/' \
-i eval_semantic_segmentation_multi.py
-i eval_semantic_segmentation.py
$MPIEXEC $PYTHON eval_semantic_segmentation_multi.py --dataset cityscapes --model pspnet_resnet101
Original file line number Diff line number Diff line change
Expand Up @@ -53,22 +53,12 @@ def test_predict_gpu(self):
assert_is_semantic_segmentation_link(self.link, self.n_class)


def _create_paramters():
params = testing.product({
'model': [PSPNetResNet50],
'pretrained_model': ['imagenet', 'cityscapes', 'ade20k'],
'n_class': [None, 5],
})
params += testing.product({
'model': [PSPNetResNet101],
'pretrained_model': ['imagenet', 'cityscapes', 'ade20k'],
'n_class': [None, 5, 19],
})
return params


@testing.parameterize(*_create_paramters())
class TestPSPNetPretrained(unittest.TestCase):
@testing.parameterize(*testing.product({
'model': [PSPNetResNet50, PSPNetResNet101],
'pretrained_model': ['cityscapes', 'ade20k', 'imagenet'],
'n_class': [None, 19, 150],
}))
class TestPSPNetResNetPretrained(unittest.TestCase):

@attr.slow
def test_pretrained(self):
Expand All @@ -77,17 +67,18 @@ def test_pretrained(self):
'pretrained_model': self.pretrained_model,
}

valid = True
if self.pretrained_model == 'cityscapes':
valid = self.n_class in {None, 19}
elif self.pretrained_model == 'ade20k':
valid = self.n_class in {None, 150}
elif self.pretrained_model == 'imagenet':
valid = self.n_class in {5, 19}
valid = self.n_class is not None

if valid:
self.model(**kwargs)
else:
with self.assertRaises(ValueError):
PSPNetResNet101(**kwargs)
self.model(**kwargs)


testing.run_module(__name__, __file__)
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import numpy as np
import unittest

import chainer
from chainer import testing
from chainer.testing import attr

from chainercv.links import FasterRCNNFPNResNet101
from chainercv.links import FasterRCNNFPNResNet50
from chainercv.utils import assert_is_detection_link


@testing.parameterize(*testing.product({
Expand All @@ -17,15 +18,24 @@ class TestFasterRCNNFPNResNet(unittest.TestCase):
def setUp(self):
self.link = self.model(n_fg_class=self.n_fg_class)

def _check_call(self):
imgs = [
np.random.uniform(-1, 1, size=(3, 48, 48)).astype(np.float32),
np.random.uniform(-1, 1, size=(3, 32, 64)).astype(np.float32),
]
x, _ = self.link.prepare(imgs)
with chainer.using_config('train', False):
self.link(self.link.xp.array(x))

@attr.slow
def test_call_cpu(self):
assert_is_detection_link(self.link, self.n_fg_class)
self._check_call()

@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
assert_is_detection_link(self.link, self.n_fg_class)
self._check_call()


@testing.parameterize(*testing.product({
Expand Down
2 changes: 1 addition & 1 deletion tests/links_tests/model_tests/resnet_tests/test_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def test_pretrained(self):
}

if self.pretrained_model == 'imagenet':
valid = self.n_class in {None, 1000} and self.arch == 'he'
valid = self.n_class in {None, 1000}

if valid:
self.model(**kwargs)
Expand Down

0 comments on commit 9f21b47

Please sign in to comment.