-
Notifications
You must be signed in to change notification settings - Fork 50
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Add method to get the best configuration directly from Tuner, add com… #767
Changes from 3 commits
b10f4a5
b19940e
caab125
4472b09
c3fc9ca
853a9ff
d647877
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,7 +14,7 @@ | |
import time | ||
from collections import OrderedDict | ||
from pathlib import Path | ||
from typing import Callable, Dict, List, Optional, Set, Tuple, Any | ||
from typing import Callable, Dict, List, Optional, Set, Tuple, Any, Union | ||
|
||
import dill as dill | ||
|
||
|
@@ -44,6 +44,7 @@ | |
experiment_path, | ||
name_from_base, | ||
dump_json_with_numpy, | ||
metric_name_mode, | ||
) | ||
|
||
logger = logging.getLogger(__name__) | ||
|
@@ -683,3 +684,30 @@ def _default_callback(): | |
:return: Default callback to store results | ||
""" | ||
return StoreResultsCallback() | ||
|
||
def best_config( | ||
self, metric: Optional[Union[str, int]] = 0 | ||
) -> Tuple[int, Dict[str, Any]]: | ||
""" | ||
:param metric: Indicates which metric to use, can be the index or a name of the metric. | ||
default to 0 - first metric defined in the Scheduler | ||
:return: the best configuration found while tuning for the metric given and the associated trial-id | ||
""" | ||
metric_name, metric_mode = metric_name_mode( | ||
metric_names=self.scheduler.metric_names(), | ||
metric_mode=self.scheduler.metric_mode(), | ||
metric=metric, | ||
) | ||
trial_id, best_metric = print_best_metric_found( | ||
self.tuning_status, metric_names=[metric_name], mode=metric_mode | ||
) | ||
config = self.trial_backend._trial_dict[trial_id].config | ||
|
||
logger.info( | ||
f"If you want to retrain the best configuration found, you can run: \n" | ||
f"```tuner.trial_backend.start_trial(config={config})``` to start training from scratch\n" | ||
f"or\n" | ||
f"```tuner.trial_backend.start_trial(config={config}, checkpoint_trial_id={trial_id})``` to start from " | ||
f"last checkpoint (your script should have stored a checkpoint)" | ||
Comment on lines
+710
to
+711
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Here or in the FAQ entry, would it make sense to explain when you would use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It is not really versus, you can only restart from a checkpoint if your script supports checkpointing which may not be the case. I do not think it would make sense to explain checkpointing there as it has its own set of FAQ items, for instance: |
||
) | ||
return trial_id, config |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import pytest | ||
|
||
from syne_tune.util import metric_name_mode | ||
|
||
metric_names = ["m1", "m2", "m3"] | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"metric_mode, query_metric, expected_metric, expected_mode,", | ||
[ | ||
("max", "m2", "m2", "max"), | ||
("min", "m2", "m2", "min"), | ||
(["max", "min", "max"], "m2", "m2", "min"), | ||
(["max", "min", "max"], "m3", "m3", "max"), | ||
("max", 1, "m2", "max"), | ||
("min", 1, "m2", "min"), | ||
(["max", "min", "max"], 1, "m2", "min"), | ||
(["max", "min", "max"], 2, "m3", "max"), | ||
], | ||
) | ||
def test_metric_name_mode(metric_mode, query_metric, expected_metric, expected_mode): | ||
metric_name, metric_mode = metric_name_mode( | ||
metric_names=metric_names, metric_mode=metric_mode, metric=query_metric | ||
) | ||
assert metric_name == expected_metric | ||
assert metric_mode == expected_mode |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe plot again, and hopefully show improvement? Or consider splitting out into a separate retraining example?
Otherwise it feels a bit random - why train again and then do nothing with it after?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
One use-case could be to run with a larger budget, I do not have one use-case personally but I know some people asks for this so it probably have an example showing how it can be done.