-
Notifications
You must be signed in to change notification settings - Fork 0
/
my_model.py
203 lines (145 loc) · 6.69 KB
/
my_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
# coding: utf-8
# In[10]:
import numpy as np
import copy
import random
from collections import namedtuple, deque
import os
from keras import layers, models, optimizers
from keras import backend as K
os.environ["KERAS_BACKEND"] = "tensorflow"
# In[1]:
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size: maximum size of buffer
batch_size: size of each training batch
"""
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size=64):
"""Randomly sample a batch of experiences from memory."""
return random.sample(self.memory, k=self.batch_size)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
# In[8]:
class Actor:
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, action_low, action_high):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
# Initialize any other variables here
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (states)
states = layers.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = layers.Dense(units=32, activation='relu')(states)
net = layers.Dense(units=128, activation='relu')(net)
net = layers.Dense(units=32, activation='relu')(net)
Normailization = layers.BatchNormalization()(net)
dropout = layers.Dropout(rate=0.3)(Normailization)
# Try different layer sizes, activations, add batch normalization, regularizers, etc.
# Add final output layer with sigmoid activation
raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
name='raw_actions')(dropout)
# Scale [0, 1] output for each action dimension to proper range
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = models.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
# Incorporate any additional losses here (e.g. from regularizers)
# Define optimizer and training function
optimizer = optimizers.Adam(lr=0.01)
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, action_gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
# In[9]:
class Critic:
"""Critic (Value) Model."""
def __init__(self, state_size, action_size):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
"""
self.state_size = state_size
self.action_size = action_size
# Initialize any other variables here
self.build_model()
def build_model(self):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# Define input layers
states = layers.Input(shape=(self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
# Add hidden layer(s) for state pathway
net_states = layers.Dense(units=32, activation='relu')(states)
net_states = layers.Dense(units=128, activation='relu')(net_states)
Normailization_states = layers.BatchNormalization()(net_states)
dropout_states = layers.Dropout(rate=0.3)(Normailization_states)
# Add hidden layer(s) for action pathway
net_actions = layers.Dense(units=32, activation='relu')(actions)
net_actions = layers.Dense(units=128, activation='relu')(net_actions)
Normailization_actions = layers.BatchNormalization()(net_actions)
dropout_actions = layers.Dropout(rate=0.3)(Normailization_actions)
# Combine state and action pathways
net = layers.Add()([dropout_states, dropout_actions])
net = layers.Activation('relu')(net)
# Add final output layer to prduce action values (Q values)
Q_values = layers.Dense(units=1, name='q_values')(net)
# Create Keras model
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
# Define optimizer and compile model for training with built-in loss function
optimizer = optimizers.Adam(lr=0.03)
self.model.compile(optimizer=optimizer, loss='mse')
# Compute action gradients (derivative of Q values w.r.t. to actions)
action_gradients = K.gradients(Q_values, actions)
# Define an additional function to fetch action gradients (to be used by actor model)
self.get_action_gradients = K.function(
inputs=[*self.model.input, K.learning_phase()],
outputs=action_gradients)
# In[ ]:
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, mu, theta, sigma):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state