-
Notifications
You must be signed in to change notification settings - Fork 50
/
models.py
97 lines (78 loc) · 3.49 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, args, num_inputs):
super(MLP, self).__init__()
self.args = args
self.affine1 = nn.Linear(num_inputs, args.hid_size)
self.affine2 = nn.Linear(args.hid_size, args.hid_size)
self.continuous = args.continuous
if self.continuous:
self.action_mean = nn.Linear(args.hid_size, args.dim_actions)
self.action_log_std = nn.Parameter(torch.zeros(1, args.dim_actions))
else:
self.heads = nn.ModuleList([nn.Linear(args.hid_size, o) for o in args.naction_heads])
self.value_head = nn.Linear(args.hid_size, 1)
self.tanh = nn.Tanh()
def forward(self, x, info={}):
x = self.tanh(self.affine1(x))
h = self.tanh(sum([self.affine2(x), x]))
v = self.value_head(h)
if self.continuous:
action_mean = self.action_mean(h)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return (action_mean, action_log_std, action_std), v
else:
return [F.log_softmax(head(h), dim=-1) for head in self.heads], v
class Random(nn.Module):
def __init__(self, args, num_inputs):
super(Random, self).__init__()
self.naction_heads = args.naction_heads
# Just so that pytorch is happy
self.parameter = nn.Parameter(torch.randn(3))
def forward(self, x, info={}):
sizes = x.size()[:-1]
v = Variable(torch.rand(sizes + (1,)), requires_grad=True)
out = []
for o in self.naction_heads:
var = Variable(torch.randn(sizes + (o, )), requires_grad=True)
out.append(F.log_softmax(var, dim=-1))
return out, v
class RNN(MLP):
def __init__(self, args, num_inputs):
super(RNN, self).__init__(args, num_inputs)
self.nagents = self.args.nagents
self.hid_size = self.args.hid_size
if self.args.rnn_type == 'LSTM':
del self.affine2
self.lstm_unit = nn.LSTMCell(self.hid_size, self.hid_size)
def forward(self, x, info={}):
x, prev_hid = x
encoded_x = self.affine1(x)
if self.args.rnn_type == 'LSTM':
batch_size = encoded_x.size(0)
encoded_x = encoded_x.view(batch_size * self.nagents, self.hid_size)
output = self.lstm_unit(encoded_x, prev_hid)
next_hid = output[0]
cell_state = output[1]
ret = (next_hid.clone(), cell_state.clone())
next_hid = next_hid.view(batch_size, self.nagents, self.hid_size)
else:
next_hid = F.tanh(self.affine2(prev_hid) + encoded_x)
ret = next_hid
v = self.value_head(next_hid)
if self.continuous:
action_mean = self.action_mean(next_hid)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return (action_mean, action_log_std, action_std), v, ret
else:
return [F.log_softmax(head(next_hid), dim=-1) for head in self.heads], v, ret
def init_hidden(self, batch_size):
# dim 0 = num of layers * num of direction
return tuple(( torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True),
torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True)))