Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make FiniteDifferenceHvp pickleable #745

Merged
merged 2 commits into from Jun 25, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/tf/cluster_demo.py
Expand Up @@ -39,7 +39,7 @@ def run_task(v):
baseline=baseline,
max_path_length=100,
discount=0.99,
step_size=v['step_size'],
max_kl_step=v['step_size'],
)

runner.setup(algo=algo, env=env)
Expand Down
2 changes: 1 addition & 1 deletion examples/tf/cluster_gym_mujoco_demo.py
Expand Up @@ -40,7 +40,7 @@ def run_task(vv):
baseline=baseline,
max_path_length=100,
discount=0.99,
step_size=vv['step_size'],
max_kl_step=vv['step_size'],
)

runner.setup(algo=algo, env=env)
Expand Down
2 changes: 1 addition & 1 deletion examples/tf/trpo_swimmer.py
Expand Up @@ -22,7 +22,7 @@ def run_task(*_):
baseline=baseline,
max_path_length=500,
discount=0.99,
step_size=0.01)
max_kl_step=0.01)

runner.setup(algo, env)
runner.train(n_epochs=40, batch_size=4000)
Expand Down
Binary file added itr_0.pkl
Binary file not shown.
6 changes: 6 additions & 0 deletions src/garage/tf/optimizers/conjugate_gradient_optimizer.py
Expand Up @@ -133,6 +133,12 @@ def eval(x):

return eval

def __getstate__(self):
"""Object.__getstate__."""
new_dict = self.__dict__.copy()
del new_dict['opt_fun']
return new_dict


class ConjugateGradientOptimizer(Serializable):
"""Performs constrained optimization via line search.
Expand Down
2 changes: 2 additions & 0 deletions tests/garage/tf/algos/test_trpo_with_model.py
Expand Up @@ -6,6 +6,7 @@

from garage.envs import normalize
from garage.experiment import LocalRunner
from garage.experiment import snapshotter
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.envs import TfEnv
Expand Down Expand Up @@ -35,6 +36,7 @@ def test_trpo_lstm_cartpole(self):
optimizer_args=dict(
hvp_approach=FiniteDifferenceHvp(base_eps=1e-5)))

snapshotter.snapshot_dir = './'
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 80
Expand Down