/
2_mnist.py
156 lines (125 loc) · 5.38 KB
/
2_mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import torch
import torch.nn as nn
import numpy as np
import argparse
import sys
from torchvision import datasets, transforms
from exprnn import ExpRNN, get_parameters, orthogonal_step
from initialization import henaff_init, cayley_init
parser = argparse.ArgumentParser(description='Exponential Layer MNIST Task')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--hidden_size', type=int, default=170)
parser.add_argument('--epochs', type=int, default=70)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--lr_orth', type=float, default=7e-5)
parser.add_argument("--permute", action="store_true")
parser.add_argument("-m", "--mode",
choices=["exprnn", "lstm"],
default="exprnn",
type=str)
parser.add_argument("--init",
choices=["cayley", "henaff"],
default="cayley",
type=str)
args = parser.parse_args()
# Fix seed across experiments
# Same seed as that used in "Orthogonal Recurrent Neural Networks with Scaled Cayley Transform"
# https://github.com/SpartinStuff/scoRNN/blob/master/scoRNN_copying.py#L79
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(5544)
np.random.seed(5544)
n_classes = 10
batch_size = args.batch_size
hidden_size = args.hidden_size
epochs = args.epochs
device = torch.device('cuda')
if args.init == "cayley":
init = cayley_init
elif args.init == "henaff":
init = henaff_init
class Model(torch.jit.ScriptModule):
__constants__ = ["permute", "permutation"]
def __init__(self, hidden_size, permute):
super(Model, self).__init__()
self.permute = permute
permute = np.random.RandomState(92916)
self.register_buffer("permutation", torch.LongTensor(permute.permutation(784)))
if args.mode == "lstm":
self.rnn = nn.LSTMCell(1, hidden_size)
else:
self.rnn = ExpRNN(1, hidden_size, skew_initializer=init)
self.lin = nn.Linear(hidden_size, n_classes)
self.loss_func = nn.CrossEntropyLoss()
@torch.jit.script_method
def forward(self, inputs):
if self.permute:
inputs = inputs[:, self.permutation]
state = self.rnn.default_hidden(inputs[:,0,...])
for input in torch.unbind(inputs, dim=1):
_, state = self.rnn(input.unsqueeze(dim=1), state)
return self.lin(state)
def loss(self, logits, y):
return self.loss_func(logits, y)
def correct(self, logits, y):
return torch.eq(torch.argmax(logits, dim=1), y).float().sum()
def main():
# Load data
kwargs = {'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist', train=True, download=True, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist', train=False, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
# Model and optimizers
model = Model(hidden_size, args.permute).to(device)
model.train()
if args.mode == "lstm":
optim = torch.optim.RMSprop(model.parameters(), lr=args.lr)
optim_orth = None
else:
non_orth_params, log_orth_params = get_parameters(model)
optim = torch.optim.RMSprop(non_orth_params, args.lr)
optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)
best_test_acc = 0.
for epoch in range(epochs):
processed = 0
for batch_idx, (batch_x, batch_y) in enumerate(train_loader):
batch_x, batch_y = batch_x.to(device).view(-1, 784), batch_y.to(device)
logits = model(batch_x)
loss = model.loss(logits, batch_y)
# Zeroing out the optim_orth is not really necessary, but we do it for consistency
if optim_orth:
optim_orth.zero_grad()
optim.zero_grad()
loss.backward()
if optim_orth:
model.apply(orthogonal_step(optim_orth))
optim.step()
with torch.no_grad():
correct = model.correct(logits, batch_y)
processed += len(batch_x)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.2f}%\tBest: {:.2f}%'.format(
epoch, processed, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), 100 * correct/len(batch_x), best_test_acc))
model.rnn.recurrent_kernel.orthogonalise()
model.eval()
with torch.no_grad():
test_loss = 0.
correct = 0.
for batch_x, batch_y in test_loader:
batch_x, batch_y = batch_x.to(device).view(-1, 784), batch_y.to(device)
logits = model(batch_x)
test_loss += model.loss(logits, batch_y).float()
correct += model.correct(logits, batch_y).float()
test_loss /= len(test_loader)
test_acc = 100 * correct / len(test_loader.dataset)
best_test_acc = max(test_acc, best_test_acc)
print("\n")
print(args)
print("Test set: Average loss: {:.4f}, Accuracy: {:.2f}%, Best Accuracy: {:.2f}%\n"
.format(test_loss, test_acc, best_test_acc))
model.train()
if __name__ == "__main__":
main()