Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve collector #125

Merged
merged 20 commits into from
Jul 12, 2020
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
24 changes: 16 additions & 8 deletions test/base/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@


class MyTestEnv(gym.Env):
def __init__(self, size, sleep=0, dict_state=False):
def __init__(self, size, sleep=0, dict_state=False, ma_rew=0):
self.size = size
self.sleep = sleep
self.dict_state = dict_state
self.ma_rew = ma_rew
self.action_space = Discrete(2)
self.reset()

Expand All @@ -16,6 +17,12 @@ def reset(self, state=0):
self.index = state
return {'index': self.index} if self.dict_state else self.index

def _get_reward(self, x):
x = int(x)
if self.ma_rew > 0:
return [x] * self.ma_rew
return x

def step(self, action):
if self.done:
raise ValueError('step after done !!!')
Expand All @@ -24,21 +31,22 @@ def step(self, action):
if self.index == self.size:
self.done = True
if self.dict_state:
return {'index': self.index}, 0, True, {}
return {'index': self.index}, self._get_reward(0), True, {}
else:
return self.index, 0, True, {}
return self.index, self._get_reward(0), True, {}
if action == 0:
self.index = max(self.index - 1, 0)
if self.dict_state:
return {'index': self.index}, 0, False, {'key': 1, 'env': self}
return {'index': self.index}, self._get_reward(0), False, \
{'key': 1, 'env': self}
else:
return self.index, 0, False, {}
return self.index, self._get_reward(0), False, {}
elif action == 1:
self.index += 1
self.done = self.index == self.size
if self.dict_state:
return {'index': self.index}, int(self.done), self.done, \
{'key': 1, 'env': self}
return {'index': self.index}, self._get_reward(self.done), \
self.done, {'key': 1, 'env': self}
else:
return self.index, int(self.done), self.done, \
return self.index, self._get_reward(self.done), self.done, \
{'key': 1, 'env': self}
39 changes: 35 additions & 4 deletions test/base/test_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,16 @@ def learn(self):

def preprocess_fn(**kwargs):
# modify info before adding into the buffer
if kwargs.get('info', None) is not None:
# if info is not provided from env, it will be a `Batch()`.
if not kwargs.get('info', Batch()).is_empty():
n = len(kwargs['obs'])
info = kwargs['info']
for i in range(n):
info[i].update(rew=kwargs['rew'][i])
return {'info': info}
# or
# return Batch(info=info)
# or: return Batch(info=info)
youkaichao marked this conversation as resolved.
Show resolved Hide resolved
else:
return {}
return Batch()


class Logger(object):
Expand Down Expand Up @@ -119,6 +119,37 @@ def test_collector_with_dict_state():
print(batch['obs_next']['index'])


def test_collector_with_ma():
def reward_metric(x):
return x.sum()
env = MyTestEnv(size=5, sleep=0, ma_rew=4)
policy = MyPolicy()
c0 = Collector(policy, env, ReplayBuffer(size=100),
preprocess_fn, reward_metric=reward_metric)
c0.collect(n_step=3)
c0.collect(n_episode=3)
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0, ma_rew=4)
for i in [2, 3, 4, 5]]
envs = VectorEnv(env_fns)
c1 = Collector(policy, envs, ReplayBuffer(size=100),
preprocess_fn, reward_metric=reward_metric)
c1.collect(n_step=10)
c1.collect(n_episode=[2, 1, 1, 2])
batch = c1.sample(10)
print(batch)
c0.buffer.update(c1.buffer)
assert np.allclose(c0.buffer[:len(c0.buffer)].obs, [
0., 1., 2., 3., 4., 0., 1., 2., 3., 4., 0., 1., 2., 3., 4., 0., 1.,
0., 1., 2., 0., 1., 0., 1., 2., 3., 0., 1., 2., 3., 4., 0., 1., 0.,
1., 2., 0., 1., 0., 1., 2., 3., 0., 1., 2., 3., 4.])
c2 = Collector(policy, envs, ReplayBuffer(size=100, stack_num=4),
preprocess_fn, reward_metric=reward_metric)
c2.collect(n_episode=[0, 0, 0, 10])
batch = c2.sample(10)
print(batch['obs_next'])


if __name__ == '__main__':
test_collector()
test_collector_with_dict_state()
test_collector_with_ma()