/
train_mnist_custom_loop.py
executable file
·105 lines (86 loc) · 3.75 KB
/
train_mnist_custom_loop.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#!/usr/bin/env python
"""Fully-connected neural network example using MNIST dataset
This code is a custom loop version of train_mnist.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
from __future__ import print_function
import argparse
import chainer
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
import train_mnist
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
model = L.Classifier(train_mnist.MLP(args.unit, 10))
if args.gpu >= 0:
# Make a speciied GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
train_count = len(train)
test_count = len(test)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < args.epoch:
batch = train_iter.next()
x_array, t_array = convert.concat_examples(batch, args.gpu)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
optimizer.update(model, x, t)
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
if train_iter.is_new_epoch:
print('epoch: ', train_iter.epoch)
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
sum_accuracy = 0
sum_loss = 0
for batch in test_iter:
x_array, t_array = convert.concat_examples(batch, args.gpu)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
test_iter.reset()
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
if __name__ == '__main__':
main()