Skip to content

Commit

Permalink
Apply nose-to-pytest shift to latest master
Browse files Browse the repository at this point in the history
  • Loading branch information
kuenishi committed Dec 18, 2017
1 parent 6f3641a commit 157a7a5
Showing 1 changed file with 81 additions and 84 deletions.
165 changes: 81 additions & 84 deletions tests/optimizer_tests/test_multi_node_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,35 +19,43 @@ def __init__(self):

class TestMultiNodeOptimizer(unittest.TestCase):

def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = ExampleModel()
def setup_target(self):
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0

def setup_optimizer(self):
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock

def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = ExampleModel()
self.setup_target()
self.setup_optimizer()

def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('naive')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
self.target = ExampleModel()
self.target.to_gpu()
self.setup_target()
self.setup_optimizer()

def setup_nccl(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
self.target = ExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
self.setup_target()
self.setup_optimizer()

def test_update_with_cpu(self):
self.setup_cpu()
def check_update(self):
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
Expand All @@ -74,34 +82,19 @@ def test_update_with_cpu(self):
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))

@chainer.testing.attr.gpu
def test_update_with_cpu(self):
self.setup_cpu()
self.check_update()

@attr.gpu
def test_update_with_gpu(self):
self.setup_gpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2

self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
self.check_update()

base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
@pytest.mark.nccl
def test_update_with_nccl(self):
self.setup_nccl()
self.check_update()


class DynamicExampleModel(chainer.Chain):
Expand All @@ -115,41 +108,41 @@ def __init__(self):

class TestMultiNodeOptimizerWithDynamicModel(unittest.TestCase):

def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = DynamicExampleModel()
def setup_target(self):
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0

def setup_optimizer(self):
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock

def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = DynamicExampleModel()
self.setup_target()
self.setup_optimizer()

def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
self.comm = chainermn.create_communicator('naive')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
self.target = DynamicExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
self.setup_target()
self.setup_optimizer()

def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
def setup_nccl(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
self.target = DynamicExampleModel()
self.target.to_gpu()
self.setup_target()
self.setup_optimizer()

with self.target.init_scope():
self.target.c = chainer.links.Linear(4, 4)
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
def check_update(self):
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
Expand Down Expand Up @@ -179,7 +172,21 @@ def test_update_with_cpu(self):
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))

@chainer.testing.attr.gpu
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)

with self.target.init_scope():
self.target.c = chainer.links.Linear(4, 4)
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.check_update()

@pytest.mark.gpu
def test_update_with_gpu(self):
self.setup_gpu()
self.optimizer = chainermn.create_multi_node_optimizer(
Expand All @@ -194,31 +201,21 @@ def test_update_with_gpu(self):
self.target.c = c
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.check_update()

@pytest.mark.nccl
def test_update_with_nccl(self):
self.setup_nccl()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)

send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])

self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)

base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))
with self.target.init_scope():
c = chainer.links.Linear(4, 4)
c.to_gpu()
self.target.c = c
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.check_update()

0 comments on commit 157a7a5

Please sign in to comment.