-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
train_mnist_data_parallel.py
executable file
·77 lines (64 loc) · 3.02 KB
/
train_mnist_data_parallel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/env python
from __future__ import print_function
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
import train_mnist
def main():
# This script is almost identical to train_mnist.py. The only difference is
# that this script uses data-parallel computation on two GPUs.
# See train_mnist.py for more details.
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=400,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu0', '-g', type=int, default=0,
help='First GPU ID')
parser.add_argument('--gpu1', '-G', type=int, default=1,
help='Second GPU ID')
parser.add_argument('--out', '-o', default='result_parallel',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
print('GPU: {}, {}'.format(args.gpu0, args.gpu1))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
model = L.Classifier(train_mnist.MLP(784, args.unit, 10))
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# ParallelUpdater implements the data-parallel gradient computation on
# multiple GPUs. It accepts "devices" argument that specifies which GPU to
# use.
updater = training.ParallelUpdater(
train_iter,
optimizer,
# The device of the name 'main' is used as a "master", while others are
# used as slaves. Names other than 'main' are arbitrary.
devices={'main': args.gpu0, 'second': args.gpu1},
)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu0))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()