Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Commit

Permalink
make another test class
Browse files Browse the repository at this point in the history
  • Loading branch information
Hakuyume committed Apr 11, 2018
1 parent cfe96ad commit f3ee52e
Showing 1 changed file with 68 additions and 7 deletions.
75 changes: 68 additions & 7 deletions tests/links_tests/model_tests/ssd_tests/test_multibox_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@ def setUp(self):
size=self.gt_mb_labels.shape) > 0.1] = 0

def _check_forward(
self, mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k, comm=None):
self, mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k):
if self.variable:
mb_locs = chainer.Variable(mb_locs)
mb_confs = chainer.Variable(mb_confs)
gt_mb_locs = chainer.Variable(gt_mb_locs)
gt_mb_labels = chainer.Variable(gt_mb_labels)

loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k, comm)
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k)

self.assertIsInstance(loc_loss, chainer.Variable)
self.assertEqual(loc_loss.shape, ())
Expand Down Expand Up @@ -129,20 +129,81 @@ def test_forward_gpu(self):
cuda.to_gpu(self.gt_mb_locs), cuda.to_gpu(self.gt_mb_labels),
self.k)

@unittest.skipIf(not _chainermn_available, 'ChainerMN is not installed')

@unittest.skipIf(not _chainermn_available, 'ChainerMN is not installed')
class TestMultiNodeMultiboxLoss(unittest.TestCase):

k = 3
batchsize = 5
n_bbox = 10
n_class = 3

def setUp(self):
self.comm = create_communicator('naive')
batchsize = self.comm.size * self.batchsize

np.random.seed(0)
self.mb_locs = np.random.uniform(
-10, 10, size=(batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.mb_confs = np.random.uniform(
-50, 50, size=(batchsize, self.n_bbox, self.n_class)) \
.astype(np.float32)
self.gt_mb_locs = np.random.uniform(
-10, 10, size=(batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.gt_mb_labels = np.random.randint(
self.n_class, size=(batchsize, self.n_bbox)) \
.astype(np.int32)

self.mb_locs_local = self.comm.mpi_comm.scatter(
self.mb_locs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, 4)))
self.mb_confs_local = self.comm.mpi_comm.scatter(
self.mb_confs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, self.n_class)))
self.gt_mb_locs_local = self.comm.mpi_comm.scatter(
self.gt_mb_locs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, 4)))
self.gt_mb_labels_local = self.comm.mpi_comm.scatter(
self.gt_mb_labels.reshape(
(self.comm.size, self.batchsize, self.n_bbox, self.n_class)))

def _check_forward(
self, mb_locs_local, mb_confs_local,
gt_mb_locs_local, gt_mb_labels_local, k):

loc_loss, conf_loss = multibox_loss(
self.mb_locs, self.mb_confs, self.gt_mb_locs, self.gt_mb_labels, k)

loc_loss_local, conf_loss_local = multibox_loss(
mb_locs_local, mb_confs_local,
gt_mb_locs_local, gt_mb_labels_local, k, self.comm)

loc_loss_local = cuda.to_cpu(loc_loss_local.array)
conf_loss_local = cuda.to_cpu(conf_loss_local.array)

from mpi4py import MPI
self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, loc_loss_local)
self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, conf_loss_local)

np.testing.assert_almost_equal(
loc_loss_local, loc_loss, decimal=2)
np.testing.assert_almost_equal(
conf_loss_local, conf_loss, decimal=2)

def test_multi_node_forward_cpu(self):
self._check_forward(
self.mb_locs, self.mb_confs,
self.gt_mb_locs, self.gt_mb_labels,
self.k, create_communicator('naive'))
self.k)

@unittest.skipIf(not _chainermn_available, 'ChainerMN is not installed')
@attr.gpu
def test_multi_node_forward_gpu(self):
def test_multi_node__forward_gpu(self):
self._check_forward(
cuda.to_gpu(self.mb_locs), cuda.to_gpu(self.mb_confs),
cuda.to_gpu(self.gt_mb_locs), cuda.to_gpu(self.gt_mb_labels),
self.k, create_communicator('naive'))
self.k)


testing.run_module(__name__, __file__)

0 comments on commit f3ee52e

Please sign in to comment.