Skip to content

Commit

Permalink
Replace get_device (#231)
Browse files Browse the repository at this point in the history
* use get_device_from_id
* replace all get_device
  • Loading branch information
shu65 authored and kuenishi committed May 7, 2018
1 parent 0330ffb commit f45b5e3
Show file tree
Hide file tree
Showing 17 changed files with 20 additions and 20 deletions.
4 changes: 2 additions & 2 deletions docs/source/tutorial/step1_communicators_optimizers.rst
Expand Up @@ -25,12 +25,12 @@ Therefore, it is often convenient to use the ``intra_rank``-th GPU.

The following line of code is found in the original MNIST example::

chainer.cuda.get_device(args.gpu).use()
chainer.cuda.get_device_from_id(args.gpu).use()

which we modify as follows::

device = comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()


Creating a Multi-Node Optimizer
Expand Down
2 changes: 1 addition & 1 deletion examples/imagenet/train_imagenet.py
Expand Up @@ -149,7 +149,7 @@ def main():
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)

chainer.cuda.get_device(device).use() # Make the GPU current
chainer.cuda.get_device_from_id(device).use() # Make the GPU current
model.to_gpu()

# Split and distribute the dataset. Only worker 0 loads the whole dataset.
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist/train_mnist.py
Expand Up @@ -74,7 +74,7 @@ def main():

model = L.Classifier(MLP(args.unit, 10))
if device >= 0:
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()

# Create a multi node optimizer from a standard Chainer optimizer.
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist/train_mnist_checkpoint.py
Expand Up @@ -77,7 +77,7 @@ def main():

model = L.Classifier(MLP(args.unit, 10))
if device >= 0:
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()

# Create a multi node optimizer from a standard Chainer optimizer.
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist/train_mnist_dual_parallel.py
Expand Up @@ -110,7 +110,7 @@ def main():
model = MLP1(model_comm, args.unit, 10)

if device >= 0:
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()

optimizer = chainermn.create_multi_node_optimizer(
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist/train_mnist_model_parallel.py
Expand Up @@ -103,7 +103,7 @@ def main():
model = MLP1(comm, args.unit, 10)

if device >= 0:
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()

optimizer = chainer.optimizers.Adam()
Expand Down
2 changes: 1 addition & 1 deletion examples/seq2seq/seq2seq.py
Expand Up @@ -427,7 +427,7 @@ def main():
model = Seq2seq(3, len(source_ids), len(target_ids), args.unit)

if dev >= 0:
chainer.cuda.get_device(dev).use()
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)

# determine the stop trigger
Expand Down
2 changes: 1 addition & 1 deletion examples/seq2seq/seq2seq_mp1.py
Expand Up @@ -455,7 +455,7 @@ def main():
comm, n_lstm_layers, len(source_ids), len(target_ids), args.unit)

if dev >= 0:
chainer.cuda.get_device(dev).use()
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)

# determine the stop trigger
Expand Down
Expand Up @@ -117,7 +117,7 @@ def create_communicator(param, use_gpu):
communicator = param.communicator_class(mpi_comm)

if use_gpu:
chainer.cuda.get_device(communicator.intra_rank).use()
chainer.cuda.get_device_from_id(communicator.intra_rank).use()

return communicator

Expand Down
2 changes: 1 addition & 1 deletion tests/chainermn_tests/datasets_tests/test_mnist.py
Expand Up @@ -37,7 +37,7 @@ def check_mnist(gpu, display_log=True):
comm = chainermn.create_communicator('naive')
if gpu:
device = comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
else:
device = -1

Expand Down
Expand Up @@ -44,7 +44,7 @@ def test_allreduce_persistent_cpu(self):
def test_allreduce_persistent_gpu(self):
comm = chainermn.create_communicator('hierarchical')
device = comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()

model = ExampleModel()
model.to_gpu()
Expand Down
Expand Up @@ -17,7 +17,7 @@ def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('hierarchical')
self.device = self.communicator.intra_rank
chainer.cuda.get_device(self.device).use()
chainer.cuda.get_device_from_id(self.device).use()
else:
self.communicator = chainermn.create_communicator('naive')
self.device = -1
Expand Down
Expand Up @@ -19,7 +19,7 @@ def setup(self, gpu):
if self.gpu:
self.communicator = chainermn.create_communicator('hierarchical')
device = self.communicator.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
else:
self.communicator = chainermn.create_communicator('naive')
device = -1
Expand Down
Expand Up @@ -205,7 +205,7 @@ def __init__(self, comm, size, rank_parent):
def create_communicator(gpu):
if gpu:
communicator = chainermn.create_communicator('hierarchical')
chainer.cuda.get_device(communicator.intra_rank).use()
chainer.cuda.get_device_from_id(communicator.intra_rank).use()
else:
communicator = chainermn.create_communicator('naive')

Expand Down
2 changes: 1 addition & 1 deletion tests/chainermn_tests/links_tests/test_n_step_rnn.py
Expand Up @@ -43,7 +43,7 @@ class TestNStepRNN(unittest.TestCase):
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('hierarchical')
chainer.cuda.get_device(self.communicator.intra_rank).use()
chainer.cuda.get_device_from_id(self.communicator.intra_rank).use()
else:
self.communicator = chainermn.create_communicator('naive')

Expand Down
Expand Up @@ -26,7 +26,7 @@ def setup_gpu(self, device=None):
pytest.skip('This test requires NCCL version >= 2.0')
self.comm = chainermn.create_communicator('pure_nccl')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
self.target = ExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
Expand Down Expand Up @@ -103,7 +103,7 @@ def setup_gpu(self, device=None):
pytest.skip('This test requires NCCL version >= 2.0')
self.comm = chainermn.create_communicator('pure_nccl')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
self.target = DynamicExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
Expand Down
Expand Up @@ -34,7 +34,7 @@ def setup_cpu(self):
def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
self.target = ExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
Expand Down Expand Up @@ -128,7 +128,7 @@ def setup_cpu(self):
def setup_gpu(self, device=None):
self.comm = chainermn.create_communicator('hierarchical')
device = self.comm.intra_rank
chainer.cuda.get_device(device).use()
chainer.cuda.get_device_from_id(device).use()
self.target = DynamicExampleModel()
self.target.to_gpu()
self.target.a.W.data[:] = self.comm.rank
Expand Down

0 comments on commit f45b5e3

Please sign in to comment.