forked from eminorhan/recurrent-memory
-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
103 lines (72 loc) · 3.3 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 16:57:28 2017 by emin
"""
from LeInit import LeInit
import torch
from torch import nn
class RNN(nn.Module):
def __init__(self, input_size=100, hidden_size=200, output_size=1, out_nlin='linear'):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.rnn = nn.RNNCell(input_size, hidden_size, nonlinearity='relu')
self.i2o = nn.Linear(hidden_size, output_size)
if out_nlin == 'linear':
self.non_linearity = nn.Identity()
elif out_nlin == 'sigmoid':
self.non_linearity = nn.Sigmoid()
def forward(self, input):
hidden = torch.zeros(input.shape[0], self.hidden_size)
outputs = []
hiddens = []
for i in range(input.shape[1]):
hidden = self.rnn(input[:, i, :], hidden)
hiddens.append(hidden)
outputs.append(self.non_linearity(self.i2o(hidden)))
outputs = torch.stack(outputs, 1)
return outputs, hiddens
class GRU(nn.Module):
def __init__(self, input_size=100, hidden_size=200, out_nlin='linear', output_size=1):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.rnn = nn.GRUCell(input_size, hidden_size)
self.i2o = nn.Linear(hidden_size, output_size)
if out_nlin == 'linear':
self.non_linearity = nn.Identity()
elif out_nlin == 'sigmoid':
self.non_linearity = nn.Sigmoid()
def forward(self, input):
hidden = torch.zeros(input.shape[0], self.hidden_size)
outputs = []
hiddens = []
for i in range(input.shape[1]):
hidden = self.rnn(input[:,i,:], hidden)
hiddens.append(hidden)
outputs.append(self.non_linearity(self.i2o(hidden)))
outputs = torch.stack(outputs, 1)
return outputs, hiddens
def OrthoInitRecurrent(input_var, batch_size=1, n_in=100, n_out=1, n_hid=200, init_val=0.9, out_nlin='linear'):
model = RNN(input_size=n_in, hidden_size=n_hid, output_size=n_out, out_nlin=out_nlin)
nn.init.xavier_normal_(model.rnn.weight_ih, gain=0.95)
nn.init.orthogonal_(model.rnn.weight_hh, gain=init_val)
nn.init.xavier_normal_(model.i2o.weight, gain=0.95)
return model
def LeInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1,
n_hid=200, diag_val=0.9, offdiag_val=0.01,
out_nlin='linear'):
model = RNN(input_size=n_in, hidden_size=n_hid,output_size=n_out, out_nlin=out_nlin)
nn.init.xavier_normal_(model.rnn.weight_ih, gain=0.95)
leint = LeInit(diag_val=diag_val, offdiag_val=offdiag_val)
model.rnn.weight_hh.data = leint.sample(model.rnn.weight_hh.data.shape)
nn.init.xavier_normal_(model.i2o.weight, gain=0.95)
return model
def GRURecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01,
out_nlin='linear'):
# Input Layer
model = GRU(input_size=n_in, hidden_size=n_hid, output_size=n_out, mask_var=mask_var)
nn.init.xavier_normal_(model.rnn.weight_hh, gain=0.05)
nn.init.xavier_normal_(model.rnn.weight_ih, gain=0.05)
nn.init.xavier_normal_(model.i2o.weight, gain=0.05)
return model