Skip to content

Commit

Permalink
Merge pull request #125 from kiudee/124_fast_resume
Browse files Browse the repository at this point in the history
Implement fast resume functionality
  • Loading branch information
kiudee committed Jun 26, 2021
2 parents bd89f3d + 103efe1 commit 8b24eb7
Show file tree
Hide file tree
Showing 5 changed files with 91 additions and 16 deletions.
2 changes: 2 additions & 0 deletions HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ History

0.7.3 (2021-06-26)
------------------
* Add ``--fast-resume`` switch to the tuner, which allows instant resume
functionality from disk (new default).
* Fix the match parser producing incorrect results, when concurrency > 1 is
used for playing matches.
* Fix the server for distributed tuning trying to compute the current optimum
Expand Down
11 changes: 11 additions & 0 deletions docs/parameters.myst
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,17 @@ fitting process:
- `--resume / --no-resume`
- Let the optimizer resume, if it finds points it can use.
[default: True]
* -
- `--fast-resume / --no-fast-resume`
- If set, resume the tuning process with the model in the file specified by
the `--model-path`.
Note, that a full reinitialization will be performed, if the parameter
ranges have been changed.
[default: True]
* -
- `--model-path PATH`
- The current optimizer will be saved for fast resuming to this file.
[default: model.pkl]
* -
- `-v --verbose`
- Turn on debug output. `-vv` turns on the debug flag for cutechess-cli.
Expand Down
17 changes: 16 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ scikit-optimize = "^0.8"
emcee = "^3.0.2"
atomicwrites = "^1.4.0"
scikit-learn = ">=0.22,<0.24"
dill = "^0.3.4"
joblib = {version = "^0.16.0", optional = true}
psycopg2 = {version = "^2.8.5", optional = true}
sqlalchemy = {version = "^1.3.18", optional = true}
Expand Down
76 changes: 61 additions & 15 deletions tune/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from datetime import datetime

import click
import dill
import matplotlib.pyplot as plt
import numpy as np
from atomicwrites import AtomicWriter
Expand Down Expand Up @@ -226,6 +227,22 @@ def run_server(verbose, logfile, command, experiment_file, dbconfig):
help="Let the optimizer resume, if it finds points it can use.",
show_default=True,
)
@click.option(
"--fast-resume/--no-fast-resume",
default=True,
help="If set, resume the tuning process with the model in the file specified by"
" the --model-path. "
"Note, that a full reinitialization will be performed, if the parameter"
"ranges have been changed.",
show_default=True,
)
@click.option(
"--model-path",
default="model.pkl",
help="The current optimizer will be saved for fast resuming to this file.",
type=click.Path(exists=False),
show_default=True,
)
@click.option("--verbose", "-v", count=True, default=0, help="Turn on debug output.")
@click.option(
"--warp-inputs/--no-warp-inputs",
Expand All @@ -252,6 +269,8 @@ def local( # noqa: C901
random_seed=0,
result_every=1,
resume=True,
fast_resume=True,
model_path="model.pkl",
verbose=0,
warp_inputs=True,
):
Expand All @@ -278,6 +297,7 @@ def local( # noqa: C901
ss = np.random.SeedSequence(settings.get("random_seed", random_seed))
# 2. Create kernel
# 3. Create optimizer

random_state = np.random.RandomState(np.random.MT19937(ss.spawn(1)[0]))
gp_kwargs = dict(
# TODO: Due to a bug in scikit-learn 0.23.2, we set normalize_y=False:
Expand Down Expand Up @@ -336,21 +356,43 @@ def local( # noqa: C901
X = X_reduced
y = y_reduced
noise = noise_reduced

iteration = len(X)
root_logger.info(
f"Importing {iteration} existing datapoints. This could take a while..."
)
opt.tell(
X,
y,
noise_vector=noise,
gp_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
gp_samples=settings.get("gp_initial_samples", gp_initial_samples),
n_samples=settings.get("n_samples", 1),
progress=True,
)
root_logger.info("Importing finished.")
reinitialize = True
if fast_resume:
path = pathlib.Path(model_path)
if path.exists():
with open(model_path, mode="rb") as model_file:
old_opt = dill.load(model_file)
root_logger.info(
f"Resuming from existing optimizer in {model_path}."
)
if opt.space == old_opt.space:
old_opt.acq_func = opt.acq_func
old_opt.acq_func_kwargs = opt.acq_func_kwargs
opt = old_opt
reinitialize = False
else:
root_logger.info(
"Parameter ranges have been changed and the "
"existing optimizer instance is no longer "
"valid. Reinitializing now."
)

if reinitialize:
root_logger.info(
f"Importing {iteration} existing datapoints. "
f"This could take a while..."
)
opt.tell(
X,
y,
noise_vector=noise,
gp_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
gp_samples=settings.get("gp_initial_samples", gp_initial_samples),
n_samples=settings.get("n_samples", 1),
progress=True,
)
root_logger.info("Importing finished.")

# 4. Main optimization loop:
while True:
Expand Down Expand Up @@ -457,7 +499,9 @@ def local( # noqa: C901
root_logger.info(f"Experiment finished ({difference}s elapsed).")

score, error_variance = parse_experiment_result(out_exp, **settings)
root_logger.info("Got Elo: {} +- {}".format(-score * 100, np.sqrt(error_variance) * 100))
root_logger.info(
"Got Elo: {} +- {}".format(-score * 100, np.sqrt(error_variance) * 100)
)
root_logger.info("Updating model")
while True:
try:
Expand Down Expand Up @@ -511,6 +555,8 @@ def local( # noqa: C901

with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))
with AtomicWriter(model_path, mode="wb", overwrite=True).open() as f:
dill.dump(opt, f)


if __name__ == "__main__":
Expand Down

0 comments on commit 8b24eb7

Please sign in to comment.