-
Notifications
You must be signed in to change notification settings - Fork 0
/
TransitionManager.py
126 lines (95 loc) · 5.28 KB
/
TransitionManager.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import numpy as np
import torch
class TransitionManager(object):
def __init__(self, args):
super(TransitionManager, self).__init__()
args.hist_space = 1
args.hist_idx = [(i+1)*args.hist_space-1 for i in range(args.hist_len)]
args.recent_mem_size = args.hist_space*args.hist_len
self.args = args
self.n_states, self.insert_idx = 0, 0
self.states = np.zeros((self.args.replay_memory, self.args.im_size, self.args.im_size), dtype=np.uint8)
self.dones = np.zeros(self.args.replay_memory, dtype=np.bool)
self.actions = np.zeros(self.args.replay_memory, dtype=np.uint8)
self.rewards = np.zeros(self.args.replay_memory, dtype=np.float32)
self.buffer_idx = None
self.state_buffer = torch.FloatTensor(self.args.buffer_size, self.args.n_input_channels, self.args.im_size, self.args.im_size).zero_()
self.action_buffer = torch.LongTensor(self.args.buffer_size).zero_()
self.reward_buffer = torch.FloatTensor(self.args.buffer_size).zero_()
self.next_state_buffer = torch.FloatTensor(self.args.buffer_size, self.args.n_input_channels, self.args.im_size, self.args.im_size).zero_()
self.done_buffer = torch.FloatTensor(self.args.buffer_size).zero_()
if self.args.gpu >= 0:
self.state_buffer, self.next_state_buffer, self.reward_buffer, self.action_buffer, self.done_buffer = self.state_buffer.cuda(self.args.gpu), self.next_state_buffer.cuda(self.args.gpu), self.reward_buffer.cuda(self.args.gpu), self.action_buffer.cuda(self.args.gpu), self.done_buffer.cuda(self.args.gpu)
self.recent_states = []
self.recent_dones = []
self.recent_actions = []
def reset_recent(self):
self.recent_states = []
self.recent_dones = []
self.recent_actions = []
def add_recent_state(self, state, done):
state = np.asarray(state*255, dtype=np.uint8)
if len(self.recent_states) == 0:
self.recent_states = [np.zeros_like(state) for _ in range(self.args.recent_mem_size)]
self.recent_dones = [True for _ in range(self.args.recent_mem_size)]
self.recent_states.append(state)
self.recent_dones.append(done)
if len(self.recent_states) > self.args.recent_mem_size:
self.recent_states.pop(0)
self.recent_dones.pop(0)
def get_recent(self):
return self.concat_frames(0, True).astype(np.float32) / 255
def concat_frames(self, idx, use_recent=False):
states = self.recent_states if use_recent else self.states
dones = self.recent_dones if use_recent else self.dones
full_state = np.zeros((self.args.hist_len, self.args.im_size, self.args.im_size), dtype=np.uint8)
start, done = 0, False
for i in range(self.args.hist_len-1, 0, -1):
for j in range(idx+self.args.hist_idx[i-1], idx+self.args.hist_idx[i]):
if dones[j]:
start = i
done = True
break
if done:
break
for i in range(start, self.args.hist_len):
full_state[i] = states[idx+self.args.hist_idx[i]]
return full_state
def add(self, state, action, reward, done):
if self.n_states < self.args.replay_memory:
self.n_states += 1
self.states[self.insert_idx] = np.asarray(state*255, dtype=np.uint8)
self.actions[self.insert_idx] = action
self.rewards[self.insert_idx] = reward
self.dones[self.insert_idx] = done
self.insert_idx = (self.insert_idx+1)%self.args.replay_memory
def add_recent_action(self, action):
if len(self.recent_actions) == 0:
self.recent_actions = [0 for _ in range(self.args.recent_mem_size)]
self.recent_actions.append(action)
if len(self.recent_actions) > self.args.recent_mem_size:
self.recent_actions.pop(0)
def sample(self, batch_size):
if self.buffer_idx == None or self.buffer_idx+batch_size > self.args.buffer_size:
self.fill_buffer()
idx = self.buffer_idx
self.buffer_idx += batch_size
return self.state_buffer[idx:self.buffer_idx], self.action_buffer[idx:self.buffer_idx], self.reward_buffer[idx:self.buffer_idx], self.next_state_buffer[idx:self.buffer_idx], self.done_buffer[idx:self.buffer_idx]
def sample_one(self):
while True:
idx = np.random.randint(0, self.n_states - self.args.recent_mem_size)
if not self.dones[idx+self.args.hist_idx[-1]]:
break
return self.get(idx)
def get(self, idx):
action_reward_idx = idx + self.args.hist_idx[-1]
return self.concat_frames(idx), self.actions[action_reward_idx], self.rewards[action_reward_idx], self.concat_frames(idx+1), self.dones[action_reward_idx+1]
def fill_buffer(self):
self.buffer_idx = 0
for i in range(self.args.buffer_size):
state, action, reward, next_state, done = self.sample_one()
self.state_buffer[i] = torch.from_numpy(state.astype(np.float32) / 255)
self.action_buffer[i] = int(action)
self.reward_buffer[i] = float(reward)
self.next_state_buffer[i] = torch.from_numpy(next_state.astype(np.float32) / 255)
self.done_buffer[i] = float(done)