This repository has been archived by the owner on May 2, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rnns.py
93 lines (71 loc) · 3.12 KB
/
rnns.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import torch
import torch.nn as nn
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, 10) # '10' here means num_classes
# nn.Linear(hidden_size*28, 10) # when all the sequences' o/p are used
def forward(self, t):
h_0 = torch.zeros(self.num_layers, t.size(0),
self.hidden_size).to(device)
# when all the sequences' o/p are used:
# out, _ = self.rnn(t, h_0)
# out = out.reshape(out.size(0), -1)
# out = self.fc(out)
# only the features from last sequence are used,
# as they have information from the previous sequences.
out, _ = self.rnn(t, h_0)
out = self.fc(out[:, -1, :])
return out
class GRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, 10)
def forward(self, t):
h_0 = torch.zeros(self.num_layers, t.size(0),
self.hidden_size).to(device)
out, _ = self.gru(t, h_0)
out = self.fc(out[:, -1, :])
return out
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, 10)
def forward(self, t):
h_0 = torch.zeros(self.num_layers, t.size(0),
self.hidden_size).to(device)
c_0 = torch.zeros(self.num_layers, t.size(0),
self.hidden_size).to(device)
out, _ = self.lstm(t, (h_0, c_0))
out = self.fc(out[:, -1, :])
return out
class BLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True,
bidirectional=True)
self.fc = nn.Linear(hidden_size*2, 10)
def forward(self, t):
h_0 = torch.zeros(self.num_layers*2, t.size(0),
self.hidden_size).to(device)
c_0 = torch.zeros(self.num_layers*2, t.size(0),
self.hidden_size).to(device)
out, _ = self.lstm(t, (h_0, c_0))
out = self.fc(out[:, -1, :])
return out