Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH support passing options through config files #325

Merged
merged 24 commits into from May 6, 2022
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
51 changes: 46 additions & 5 deletions benchopt/cli/main.py
@@ -1,3 +1,4 @@
import yaml
import click
import warnings
from pathlib import Path
Expand All @@ -20,6 +21,36 @@
)


def _get_run_args(cli_kwargs, config_file_kwargs):
options = {OPT_TO_VAR['--' + k]: v for k, v in config_file_kwargs.items()}
options.update(cli_kwargs)
return_names = [
"benchmark",
"solver_names",
"forced_solvers",
"dataset_names",
"objective_filters",
"max_runs",
"n_repetitions",
"timeout",
"plot",
"html",
"pdb",
"do_profile",
"env_name",
"old_objective_filters"
]
return [options[name] for name in return_names]


OPT_TO_VAR = {
'--objective-filter': 'objective_filters',
'--solver': 'solver_names',
'--dataset': 'dataset_names',
'--n-repetitions': 'n_repetitions',
}


@main.command(
help="Run a benchmark with benchopt.",
epilog="To (re-)install the required solvers and datasets "
Expand All @@ -28,14 +59,14 @@
)
@click.argument('benchmark', type=click.Path(exists=True),
shell_complete=complete_benchmarks)
@click.option('--objective-filter', '-o', 'objective_filters',
@click.option('--objective-filter', '-o', OPT_TO_VAR['--objective-filter'],
metavar='<objective_filter>', multiple=True, type=str,
help="Filter the objective based on its parametrized name. This "
"can be used to only include one set of parameters.")
@click.option('--old_objective-filter', '-p', 'old_objective_filters',
multiple=True, type=str,
help="Deprecated alias for --objective_filters/-o.")
@click.option('--solver', '-s', 'solver_names',
@click.option('--solver', '-s', OPT_TO_VAR['--solver'],
TomDLT marked this conversation as resolved.
Show resolved Hide resolved
metavar="<solver_name>", multiple=True, type=str,
help="Include <solver_name> in the benchmark. By default, all "
"solvers are included. When `-s` is used, only listed solvers"
Expand Down Expand Up @@ -65,6 +96,8 @@
@click.option('--timeout',
metavar="<int>", default=100, show_default=True, type=int,
help='Timeout a solver when run for more than <timeout> seconds')
@click.option('--file', 'config_file', default=None,
help="YAML configuration file")
@click.option('--plot/--no-plot', default=True,
help="Whether or not to plot the results. Default is True.")
@click.option('--html/--no-html', default=True,
Expand Down Expand Up @@ -94,10 +127,18 @@
help="Run the benchmark in the conda environment "
"named <env_name>. To install the required solvers and "
"datasets, see the command `benchopt install`.")
def run(benchmark, solver_names, forced_solvers, dataset_names,
def run(**kwargs):
config_file = kwargs.pop("config_file")
if config_file is not None:
with open(config_file, "r") as f:
config = yaml.safe_load(f)
else:
config = {}
(
benchmark, solver_names, forced_solvers, dataset_names,
objective_filters, max_runs, n_repetitions, timeout,
plot=True, html=True, pdb=False, do_profile=False,
env_name='False', old_objective_filters=None):
plot, html, pdb, do_profile, env_name, old_objective_filters
) = _get_run_args(kwargs, config)
if len(old_objective_filters):
warnings.warn(
'Using the -p option is deprecated, use -o instead',
Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Expand Up @@ -37,6 +37,7 @@ install_requires =
mako
psutil
plotly>=4.12
pyyaml
line-profiler
project_urls =
Documentation = https://benchopt.github.io/
Expand Down