-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
79 lines (66 loc) · 2.1 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from contextlib import contextmanager
import torch.nn.functional as F
import torch.nn as nn
import torch
import math
# Initializes a layer with normally-distributed weights.
def normal_weights(layer):
classname = layer.__class__.__name__
if classname.find('Linear') != -1:
n = layer.in_features
y = 1.0 / math.sqrt(n)
layer.weight.data.normal_(0, y)
# A Dueling DQN.
class QNetwork(nn.Module):
def __init__(self, state_size, action_size, seed=1337):
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
state_val_net_layer_dims = [
state_size,
128,
32,
# 1
]
advantage_net_layer_dims = [
state_size,
128,
32,
# 4
]
# V(s)
self.state_val_net = nn.Sequential(
*self.gen_linear_layers(state_val_net_layer_dims),
nn.Linear(state_val_net_layer_dims[-1], 1)
)
# A(s, a)
self.advantage_net = nn.Sequential(
*self.gen_linear_layers(advantage_net_layer_dims),
nn.Linear(advantage_net_layer_dims[-1], action_size)
)
self.apply(normal_weights)
def gen_linear_layers(self, layer_dims):
return [
nn.Sequential(
nn.Linear(layer_dims[i], layer_dims[i + 1]),
nn.BatchNorm1d(layer_dims[i + 1]),
nn.ReLU(),
)
for i in range(len(layer_dims) - 1)
]
def forward(self, state):
state_vals = self.state_val_net(state)
advantages = self.advantage_net(state)
# Q(s, a) = V(s) + A(s, a) - mean(A(s, a'))
return state_vals + advantages - advantages.mean()
# Use this to interact with the environment
# since action ranks don't change with V(s).
def get_advantages(self, state):
return self.advantage_net(state)
@contextmanager
def eval_no_grad(self):
with torch.no_grad():
try:
self.eval()
yield
finally:
self.train()