Skip to content

Commit

Permalink
Reformat using black
Browse files Browse the repository at this point in the history
  • Loading branch information
kiudee committed Feb 1, 2020
1 parent 7c0152e commit 65f17c7
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 89 deletions.
4 changes: 2 additions & 2 deletions tests/test_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,6 @@ def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
help_result = runner.invoke(cli.cli, ['--help'])
help_result = runner.invoke(cli.cli, ["--help"])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
assert "--help Show this message and exit." in help_result.output
37 changes: 14 additions & 23 deletions tune/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ def cli():


@cli.command()
@click.option('--verbose', '-v', is_flag=True, default=False, help='Turn on debug output.')
@click.option('--logfile', default=None, help='Path to where the log is saved to.')
@click.argument('dbconfig')
@click.option("--verbose", "-v", is_flag=True, default=False, help="Turn on debug output.")
@click.option("--logfile", default=None, help="Path to where the log is saved to.")
@click.argument("dbconfig")
def run_client(verbose, logfile, dbconfig):
""" Run the client to generate games for distributed tuning.
Expand All @@ -24,21 +24,18 @@ def run_client(verbose, logfile, dbconfig):
"""
log_level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=log_level,
filename=logfile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
level=log_level, filename=logfile, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
tc = TuningClient(dbconfig_path=dbconfig)
tc.run()


@cli.command()
@click.option('--verbose', '-v', is_flag=True, default=False, help='Turn on debug output.')
@click.option('--logfile', default=None, help='Path to where the log is saved to.')
@click.argument('command')
@click.argument('experiment_file')
@click.argument('dbconfig')
@click.option("--verbose", "-v", is_flag=True, default=False, help="Turn on debug output.")
@click.option("--logfile", default=None, help="Path to where the log is saved to.")
@click.argument("command")
@click.argument("experiment_file")
@click.argument("dbconfig")
def run_server(verbose, logfile, command, experiment_file, dbconfig):
"""Run the tuning server for a given EXPERIMENT_FILE (json).
Expand All @@ -52,20 +49,14 @@ def run_server(verbose, logfile, command, experiment_file, dbconfig):
"""
log_level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(
level=log_level,
filename=logfile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
level=log_level, filename=logfile, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
tc = TuningServer(
experiment_path=experiment_file,
dbconfig_path=dbconfig
)
if command == 'run':
tc = TuningServer(experiment_path=experiment_file, dbconfig_path=dbconfig)
if command == "run":
tc.run()
elif command == 'deactivate':
elif command == "deactivate":
tc.deactivate()
elif command == 'reactivate':
elif command == "reactivate":
tc.reactivate()
else:
raise ValueError(f"Command {command} is not recognized. Terminating...")
Expand Down
35 changes: 19 additions & 16 deletions tune/db_workers/tuning_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ def parse_experiment(self, results):
return MatchResult(wins=w, losses=l, draws=d)

def run_benchmark(self):
path = os.path.join(os.path.curdir, 'lc0')
path = os.path.join(os.path.curdir, "lc0")
out = subprocess.run([path, "benchmark"], capture_output=True)
s = out.stdout.decode("utf-8")
result = float(re.findall(r"([0-9]+\.[0-9]+)\snodes per second", s)[0])
self.lc0_benchmark = result

path = os.path.join(os.path.curdir, 'sf')
path = os.path.join(os.path.curdir, "sf")
out = subprocess.run([path, "bench"], capture_output=True)
# Stockfish outputs results as stderr:
s = out.stderr.decode("utf-8")
Expand All @@ -112,20 +112,19 @@ def adjust_time_control(self, time_control, lc0_nodes, sf_nodes):
tc_lc0 = [x * lc0_ratio for x in tc_lc0]
tc_sf = [x * sf_ratio for x in tc_sf]
# TODO: support non-increment time-control
return TimeControl(engine1=f"{tc_lc0[0]}+{tc_lc0[1]}",
engine2=f"{tc_sf[0]}+{tc_sf[1]}")
return TimeControl(engine1=f"{tc_lc0[0]}+{tc_lc0[1]}", engine2=f"{tc_sf[0]}+{tc_sf[1]}")

@staticmethod
def set_working_directories(engine_config):
path = os.getcwd()
engine_config[0]['workingDirectory'] = path
engine_config[1]['workingDirectory'] = path
if os.name == 'nt': # Windows needs .exe files to work correctly
engine_config[0]['command'] = 'lc0.exe'
engine_config[1]['command'] = 'sf.exe'
engine_config[0]["workingDirectory"] = path
engine_config[1]["workingDirectory"] = path
if os.name == "nt": # Windows needs .exe files to work correctly
engine_config[0]["command"] = "lc0.exe"
engine_config[1]["command"] = "sf.exe"
else:
engine_config[0]['command'] = './lc0'
engine_config[1]['command'] = './sf'
engine_config[0]["command"] = "./lc0"
engine_config[1]["command"] = "./sf"

def run(self):
while True:
Expand Down Expand Up @@ -168,15 +167,19 @@ def run(self):
sleep(2)
# b) Adjust time control:
if self.lc0_benchmark is None:
self.logger.info('Running initial nodes/second benchmark to calibrate time controls...')
self.logger.info("Running initial nodes/second benchmark to calibrate time controls...")
self.run_benchmark()
self.logger.info(f"Benchmark complete. Results: lc0: {self.lc0_benchmark} nps, sf: {self.sf_benchmark} nps")
self.logger.info(
f"Benchmark complete. Results: lc0: {self.lc0_benchmark} nps, sf: {self.sf_benchmark} nps"
)
else:
self.logger.debug(f"Initial benchmark results: lc0: {self.lc0_benchmark} nps, sf: {self.sf_benchmark} nps")
self.logger.debug(
f"Initial benchmark results: lc0: {self.lc0_benchmark} nps, sf: {self.sf_benchmark} nps"
)
time_control = self.adjust_time_control(
TimeControl(engine1=config["time_control"][0], engine2=config["time_control"][1]),
float(job['lc0_nodes']),
float(job['sf_nodes'])
float(job["lc0_nodes"]),
float(job["sf_nodes"]),
)
self.logger.debug(f"Adjusted time control from {config['time_control']} to {time_control}")

Expand Down
102 changes: 54 additions & 48 deletions tune/db_workers/tuning_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,43 +55,42 @@ def __init__(self, experiment_path, dbconfig_path, **kwargs):
self.logger.debug(f"self.experiment = \n{self.experiment}")
else:
raise ValueError("No experiment config file found at provided path")
self.rng = np.random.RandomState(self.experiment.get('random_seed', 123))
self.rng = np.random.RandomState(self.experiment.get("random_seed", 123))
self.setup_tuner()

try:
os.makedirs('experiments')
os.makedirs("experiments")
except FileExistsError:
pass
# TODO: in principle after deleting all jobs from the database,
# this could be problematic:
self.pos = None
self.chain = None
if 'tune_id' in self.experiment:
if "tune_id" in self.experiment:
self.resume_tuning()

def write_experiment_file(self):
with open(self.experiment_path, "w") as experiment_file:
experiment_file.write(json.dumps(self.experiment, indent=2))

def save_state(self):
path = os.path.join(
"experiments",
f"data_tuneid_{self.experiment['tune_id']}.npz")
path = os.path.join("experiments", f"data_tuneid_{self.experiment['tune_id']}.npz")
np.savez_compressed(path, np.array(self.opt.gp.pos_), np.array(self.opt.gp.chain_))

def resume_tuning(self):
path = os.path.join("experiments", f"data_tuneid_{self.experiment['tune_id']}.npz")
if os.path.exists(path):
data = np.load(path)
self.opt.gp.pos_ = data['arr_0']
self.opt.gp.chain_ = data['arr_1']
self.opt.gp.pos_ = data["arr_0"]
self.opt.gp.chain_ = data["arr_1"]

def parse_dimensions(self, param_dict):
def floatify(s):
try:
return float(s)
except ValueError:
return s

dimensions = []
for s in param_dict.values():
prior_str = re.findall(r"(\w+)\(", s)[0]
Expand Down Expand Up @@ -127,30 +126,29 @@ def parse_priors(self, priors):
return result

def setup_tuner(self):
self.tunecfg = self.experiment['tuner']
self.dimensions = self.parse_dimensions(self.tunecfg['parameters'])
self.tunecfg = self.experiment["tuner"]
self.dimensions = self.parse_dimensions(self.tunecfg["parameters"])
self.space = normalize_dimensions(self.dimensions)
self.priors = self.parse_priors(self.tunecfg['priors'])

self.kernel = (
ConstantKernel(constant_value=self.tunecfg.get("variance_value", 0.1 ** 2),
constant_value_bounds=tuple(self.tunecfg.get("variance_bounds", (0.01 ** 2, 0.5 ** 2))))
* Matern(
length_scale=self.tunecfg.get("length_scale_value", 0.3),
length_scale_bounds=tuple(self.tunecfg.get("length_scale_bounds", (0.2, 0.8))),
nu=2.5
)
self.priors = self.parse_priors(self.tunecfg["priors"])

self.kernel = ConstantKernel(
constant_value=self.tunecfg.get("variance_value", 0.1 ** 2),
constant_value_bounds=tuple(self.tunecfg.get("variance_bounds", (0.01 ** 2, 0.5 ** 2))),
) * Matern(
length_scale=self.tunecfg.get("length_scale_value", 0.3),
length_scale_bounds=tuple(self.tunecfg.get("length_scale_bounds", (0.2, 0.8))),
nu=2.5,
)
self.opt = Optimizer(
dimensions=self.dimensions,
n_points=self.tunecfg.get('n_points', 1000),
n_initial_points=self.tunecfg.get('n_initial_points', 5 * len(self.dimensions)),
n_points=self.tunecfg.get("n_points", 1000),
n_initial_points=self.tunecfg.get("n_initial_points", 5 * len(self.dimensions)),
gp_kernel=self.kernel,
gp_kwargs=dict(normalize_y=True),
gp_priors=self.priors,
acq_func=self.tunecfg.get('acq_func', 'ts'),
acq_func_kwargs=self.tunecfg.get('acq_func_kwargs', None), # TODO: Check if this works for all parameters
random_state=self.rng.randint(0, np.iinfo(np.int32).max)
acq_func=self.tunecfg.get("acq_func", "ts"),
acq_func_kwargs=self.tunecfg.get("acq_func_kwargs", None), # TODO: Check if this works for all parameters
random_state=self.rng.randint(0, np.iinfo(np.int32).max),
)

def query_data(self, cursor, tune_id, include_active=False):
Expand Down Expand Up @@ -195,20 +193,22 @@ def query_data(self, cursor, tune_id, include_active=False):

@staticmethod
def change_engine_config(engine_config, params):
init_strings = InitStrings(engine_config[0]['initStrings']) # TODO: allow tuning of different index
init_strings = InitStrings(engine_config[0]["initStrings"]) # TODO: allow tuning of different index
for k, v in params.items():
init_strings[k] = v

def insert_jobs(self, conn, cursor, new_x):
# 2. First set all active jobs to inactive:
try:
cursor.execute("""
cursor.execute(
"""
update tuning_jobs set active=false where tune_id=%(tune_id)s;
""", {"tune_id": self.experiment["tune_id"]})
""",
{"tune_id": self.experiment["tune_id"]},
)

# 3. Insert new jobs:
job_dict = {"engine": self.experiment["engine"],
"cutechess": self.experiment["cutechess"]}
job_dict = {"engine": self.experiment["engine"], "cutechess": self.experiment["cutechess"]}
timestamp = datetime.utcnow().replace(tzinfo=pytz.utc)
for i, tc in enumerate(self.experiment["time_controls"]):
job_dict["time_control"] = tc
Expand All @@ -222,17 +222,20 @@ def insert_jobs(self, conn, cursor, new_x):
%(lc0_nodes)s, %(sf_nodes)s, %(new_x)s)
returning job_id;
"""
cursor.execute(query, {
"timestamp": timestamp,
"config": job_json,
"active": True,
"tune_id": self.experiment["tune_id"],
"job_weight": self.experiment.get("job_weight", 1.0),
"minimum_version": self.experiment.get("minimum_version", 1),
"lc0_nodes": self.experiment["lc0_nodes"],
"sf_nodes": self.experiment["sf_nodes"],
"new_x": new_x
})
cursor.execute(
query,
{
"timestamp": timestamp,
"config": job_json,
"active": True,
"tune_id": self.experiment["tune_id"],
"job_weight": self.experiment.get("job_weight", 1.0),
"minimum_version": self.experiment.get("minimum_version", 1),
"lc0_nodes": self.experiment["lc0_nodes"],
"sf_nodes": self.experiment["sf_nodes"],
"new_x": new_x,
},
)
job_id = cursor.fetchone()[0]

query = """
Expand All @@ -241,7 +244,9 @@ def insert_jobs(self, conn, cursor, new_x):
values
(%(job_id)s, %(tune_id)s, %(time_control)s, 0, 0, 0);
"""
cursor.execute(query, {"job_id": job_id, "tune_id": self.experiment["tune_id"], "time_control": str(tc)})
cursor.execute(
query, {"job_id": job_id, "tune_id": self.experiment["tune_id"], "time_control": str(tc)}
)
conn.commit()
except BaseException:
conn.rollback()
Expand All @@ -255,7 +260,8 @@ def run(self):
with conn.cursor(cursor_factory=DictCursor) as curs:
if "tune_id" not in self.experiment:
# This appears to be a new tune, create entry in tunes database:
curs.execute("""
curs.execute(
"""
insert into tunes (description) VALUES (%(desc)s) returning tune_id;
""",
{"desc": self.experiment.get("description", "This job does not have a description")},
Expand All @@ -268,7 +274,7 @@ def run(self):
# 2. Check if minimum sample size and minimum wait time are reached, then query data and update model:
with psycopg2.connect(**self.connect_params) as conn:
with conn.cursor(cursor_factory=DictCursor) as curs:
X, y, samplesize_reached = self.query_data(curs, self.experiment['tune_id'], include_active=True)
X, y, samplesize_reached = self.query_data(curs, self.experiment["tune_id"], include_active=True)
self.logger.debug(f"Queried the database for data and got (last 5):\n{X[-5:]}\n{y[-5:]}")
if len(X) == 0:
self.logger.info("There are no datapoints yet, start first job")
Expand All @@ -294,10 +300,10 @@ def run(self):
y.tolist(),
fit=True,
replace=True,
n_samples=self.tunecfg['n_samples'],
gp_samples=self.tunecfg['gp_samples'],
gp_burnin=self.tunecfg['gp_burnin'],
progress=False
n_samples=self.tunecfg["n_samples"],
gp_samples=self.tunecfg["gp_samples"],
gp_burnin=self.tunecfg["gp_burnin"],
progress=False,
)
later = datetime.now()
difference = (later - now).total_seconds()
Expand Down

0 comments on commit 65f17c7

Please sign in to comment.