-
Notifications
You must be signed in to change notification settings - Fork 547
/
ddpg.py
98 lines (92 loc) · 3.17 KB
/
ddpg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
Example of running PyTorch implementation of DDPG on HalfCheetah.
"""
import copy
from gym.envs.mujoco import HalfCheetahEnv
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.networks import FlattenMlp, TanhMlpPolicy
from rlkit.torch.ddpg.ddpg import DDPGTrainer
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def experiment(variant):
eval_env = NormalizedBoxEnv(HalfCheetahEnv())
expl_env = NormalizedBoxEnv(HalfCheetahEnv())
# Or for a specific version:
# import gym
# env = NormalizedBoxEnv(gym.make('HalfCheetah-v1'))
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
qf = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
target_qf = copy.deepcopy(qf)
target_policy = copy.deepcopy(policy)
eval_path_collector = MdpPathCollector(eval_env, policy)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=OUStrategy(action_space=expl_env.action_space),
policy=policy,
)
expl_path_collector = MdpPathCollector(expl_env, exploration_policy)
replay_buffer = EnvReplayBuffer(variant['replay_buffer_size'], expl_env)
trainer = DDPGTrainer(
qf=qf,
target_qf=target_qf,
policy=policy,
target_policy=target_policy,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=1000,
batch_size=128,
),
trainer_kwargs=dict(
use_soft_update=True,
tau=1e-2,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_size=int(1E6),
)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
setup_logger('name-of-experiment', variant=variant)
experiment(variant)