-
Notifications
You must be signed in to change notification settings - Fork 2
/
model.py
61 lines (50 loc) · 1.94 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 3 11:43:03 2022
@author: mobeets
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence
class ValueRNN(nn.Module):
def __init__(self, input_size=4, output_size=1, hidden_size=15,
num_layers=1, gamma=0.9):
super(ValueRNN, self).__init__()
self.gamma = gamma
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers)
self.value = lambda x: torch.sum(x,2)[:,:,None]
# self.value = nn.Linear(in_features=hidden_size, out_features=output_size, bias=True)
def forward(self, x):
x, (hx, cx) = self.rnn(x)
if type(x) is torch.nn.utils.rnn.PackedSequence:
x, output_lengths = pad_packed_sequence(x, batch_first=False)
x = F.relu(x)
return self.value(x), (hx, cx)
def freeze_weights(self):
for name, p in self.named_parameters():
p.requires_grad = False
def unfreeze_weights(self):
for name, p in self.named_parameters():
p.requires_grad = True
def n_parameters(self):
return sum([p.numel() for p in self.parameters()])
def get_features(self, name):
def hook(mdl, input, output):
self.features[name] = output
return hook
def save_weights_to_path(self, path):
torch.save(self.state_dict(), path)
def load_weights_from_path(self, path):
self.load_state_dict(torch.load(path))
def prepare_to_gather_activity(self):
if hasattr(self, 'handle'):
self.handle.remove()
self.features = {}
self.hook = self.get_features('hidden')
self.handle = self.rnn.register_forward_hook(self.hook)