-
Notifications
You must be signed in to change notification settings - Fork 3
/
train_schools.py
89 lines (80 loc) · 2.41 KB
/
train_schools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import numpy as np
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
import pdb
import os
import math
import random
import time
import load
# from this: http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
class the_linear(nn.Module):
def __init__(self, d):
super(the_linear, self).__init__()
self.lin = nn.Linear(d, 1, bias=False)
def forward(self, X):
P = self.lin(X)
return P
# X - (n,d), this is the concatenation of the matrices
def train(model, n_epochs, X, y, print_every=1):
random.seed(0)
start = time.time()
optimizer = optim.LBFGS(model.parameters())
loss_func = nn.MSELoss()
for i in range(1, n_epochs + 1):
def closure():
optimizer.zero_grad()
p = model(X)
loss = loss_func(p, y)
loss.backward()
return loss
optimizer.step(closure)
with torch.no_grad():
p = model(X)
loss = loss_func(p, y)
print('train loss:', loss.item())
# EVAL MODEL HERE!
return model
def all_of_it(sim, frac):
use_cuda = torch.cuda.is_available()
CUDA_GPU = 0
# get data
print('sim: ' + str(sim))
print('frac: ' + str(frac))
X, y, _, _, _, _, _ = load.get_data(sim, 0)
(n,d) = X.shape
X_t = Variable(torch.FloatTensor(X))
y_t = Variable(torch.FloatTensor(y))
if use_cuda:
X_t = X_t.cuda(CUDA_GPU)
y_t = y_t.cuda(CUDA_GPU)
n_epochs = 100
# model
#model = nn.Sequential(nn.Linear(d,1,bias=False))
model = the_linear(d)
if use_cuda:
model = model.cuda(CUDA_GPU)
model = train(model, n_epochs, X_t, y_t)
weights = list(model.parameters())[0]
weights = weights.detach().cpu().numpy()
if frac:
np.savez_compressed('school_weights_linear_mse_sim' + str(sim) + '_max1_frac', w=weights)
else:
np.savez_compressed('school_weights_linear_mse_sim' + str(sim) + '_max1', w=weights)
if __name__ == '__main__':
all_of_it(5, 1)