-
Notifications
You must be signed in to change notification settings - Fork 0
/
her.py
137 lines (121 loc) · 4.98 KB
/
her.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import math
import random
import time
import torch
import gym
import numpy as np
from collections import deque
"""
Works only for DQN without PER
"""
class HER:
def __init__(self, env, agent):
self.agent = agent
self.env = env
self.concat = lambda s, g: np.concatenate([s, g], axis=-1)
if not hasattr(self.agent, 'step'):
print("""Only Off-policy algorithms can be used.
Algorithms to use: DQN(rainbow.py), DDPG, TD3, SAC.""")
raise ValueError
def train(self, n_traj, t_max, n_epochs, max_score, render_freq=None,
test_freq=1, save_models=False, checkpoint_name=None,
strategy=None, n_strategy=10):
if strategy == 'final':
strategy = self.final_state_strategy
elif strategy == 'random':
strategy = self.random_strategy
elif strategy == 'next':
strategy = self.next_strategy
else:
strategy = self.random_bias_strategy
scores = []
scores_window = deque(maxlen=100)
test_scores = []
test_scores_window = deque(maxlen=100)
for i_episode in range(1, n_traj+1):
render = ((render_freq is not None)
and (i_episode % render_freq == 0))
score = self.train_episode(t_max, n_epochs, n_strategy, strategy,
render=render)
scores_window.append(score)
scores.append(score)
avg_score = np.mean(scores_window)
if (i_episode % test_freq == 0):
t_score = self.test(t_max, render=False)
test_scores.append(t_score)
test_scores_window.append(t_score)
print("Avg Test score: ", np.mean(test_scores_window))
print("\rEpisode %d, AVG. Score %.2f" %(i_episode, avg_score))
if avg_score >= max_score:
print("Solved! Episode %d" %(i_episode))
if save_models:
fname = "checkpoints/{}.pth".format(checkpoint_name)
torch.save(agent.online_net.state_dict(), fname)
break
self.env.close()
return scores
def train_episode(self, t_max, n_epochs, n_strategy, strategy,
render=False):
"""
strategy(episode, state): returns list of goals
"""
concat = self.concat
episode = []
state, goal = self.env.reset()
score = 0
# ============================= HER =============================
for t in range(t_max):
action = self.agent.act(concat(state, goal))
if render:
self.env.render()
(next_state, next_goal), \
reward, done, info = self.env.step(action)
episode.append((state, goal, action,
reward, next_state, done, info))
state = next_state
goal = next_goal
score += reward
if done:
break
for state, goal, action, reward, next_state, done, info in episode:
self.agent.append_to_buffer(concat(state, goal), action, reward,
concat(next_state, goal), done)
goals = strategy(episode, next_state, n_strategy)
for g in goals:
r = self.env.compute_reward(next_state, g, info)
self.agent.append_to_buffer(concat(state, g), action, r,
concat(next_state, g), done)
if len(self.agent.replay_buffer) >= self.agent.batch_size:
for e in range(n_epochs):
exp = self.agent.replay_buffer.sample()
self.agent.learn(exp)
return score
def test(self, t_max, render=True, num_of_episodes=1):
concat = self.concat
score_avg = 0
for i in range(num_of_episodes):
state, goal = self.env.reset()
score = 0
for j in range(t_max):
action = self.agent.act(concat(state, goal), test=True)
if render:
self.env.render()
(state, goal), reward, done, _ = self.env.step(action)
score += reward
if done:
break
score_avg += score
self.env.close()
return score_avg/num_of_episodes
# DIFFERENT STRATEGIES:
def final_state_strategy(self, episode, state, n):
# Take final state as the goal
if not hasattr(self, 'cache'):
self.cache = [list(zip(*episode))[0][-1]]
return self.cache
def random_bias_strategy(self, episode, state, n):
return random.choices(list(zip(*episode))[0], k=n) + [state]
def random_strategy(self, episode, state, n):
return random.choices(list(zip(*episode))[0], k=n)
def next_strategy(self, episode, state, n):
return [state]