Skip to content

Commit

Permalink
Merge pull request #903 from bouthilx/hotfix/pbt_cp_dir
Browse files Browse the repository at this point in the history
Fix PBT issues with working dir and promotion of max fidelity trials
  • Loading branch information
bouthilx committed Aug 2, 2022
2 parents d017d1b + ec910bd commit ba4ce25
Show file tree
Hide file tree
Showing 45 changed files with 1,110 additions and 508 deletions.
6 changes: 2 additions & 4 deletions docs/src/user/algorithms.rst
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,7 @@ Population Based Training (PBT)

.. warning::

PBT is broken in current version v0.2.4. We are working on a fix to be released in v0.2.5,
ETA July 2022.
PBT was broken in version v0.2.4. Make sure to use the latest release.

Population based training is an evolutionary algorithm that evolve trials
from low fidelity levels to high fidelity levels (ex: number of epochs), reusing
Expand Down Expand Up @@ -376,8 +375,7 @@ Population Based Bandits (PB2)

.. warning::

PBT is broken in current version v0.2.4. We are working on a fix to be released in v0.2.5,
ETA July 2022.
PB2 was broken in version v0.2.4. Make sure to use the latest release.

Population Based Bandits is a variant of Population Based Training using probabilistic model to
guide
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,14 @@
],
"bohb": [
"hpbandster",
"ConfigSpace",
"ConfigSpace==0.5.0",
"sspace @ git+https://github.com/Epistimio/sample-space.git@master#egg=sspace",
],
"pb2": ["GPy"],
"nevergrad": ["nevergrad>=0.4.3.post10", "fcmaes", "pymoo"],
"hebo": [
"numpy",
"pymoo==0.5.0",
"hebo @ git+https://github.com/huawei-noah/HEBO.git@v0.3.2#egg=hebo&subdirectory=HEBO",
],
}
Expand Down
6 changes: 2 additions & 4 deletions src/orion/algo/asha.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import copy
import logging
from collections import defaultdict
from typing import Any, Sequence
from typing import Sequence

import numpy
import numpy as np
Expand Down Expand Up @@ -211,9 +211,7 @@ def sample(self, num: int) -> list[Trial]:
def suggest(self, num: int) -> list[Trial]:
return super().suggest(num)

def create_bracket(
self, i: Any, budgets: list[BudgetTuple], iteration: int
) -> ASHABracket:
def create_bracket(self, budgets: list[BudgetTuple], iteration: int) -> ASHABracket:
return ASHABracket(self, budgets, iteration)


Expand Down
34 changes: 19 additions & 15 deletions src/orion/algo/axoptimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,20 @@
import copy
from typing import List, Optional

try:
from ax.service.ax_client import AxClient
from ax.service.utils.instantiation import ObjectiveProperties

has_Ax = True
except ImportError:
AxClient = None
ObjectiveProperties = None
has_Ax = False

from orion.algo.base import BaseAlgorithm
from orion.algo.space import Space
from orion.algo.space import Fidelity, Space
from orion.core.utils import format_trials
from orion.core.utils.flatten import flatten
from orion.core.utils.module_import import ImportOptional
from orion.core.worker.transformer import TransformedDimension

with ImportOptional("Ax") as import_optional:
from ax.service.ax_client import AxClient
from ax.service.utils.instantiation import ObjectiveProperties

if import_optional.failed:
AxClient = None # noqa: F811
ObjectiveProperties = None # noqa: F811


class AxOptimizer(BaseAlgorithm):
Expand All @@ -35,7 +35,7 @@ class AxOptimizer(BaseAlgorithm):
generator and for BoTorch-powered models. For the latter models, the
trials generated from the same optimization setup with the same seed,
will be mostly similar, but the exact parameter values may still vary
and trials latter in the optimizations will diverge more and more. This
and trials latter in the optimizations will diverge more and more. This
is because a degree of randomness is essential for high performance of
the Bayesian optimization models and is not controlled by the seed.
Expand Down Expand Up @@ -75,6 +75,8 @@ def __init__(
extra_objectives: Optional[List[str]] = None,
constraints: Optional[List[str]] = None,
):
import_optional.ensure()

extra_objectives = set(extra_objectives if extra_objectives else [])
constraints = constraints if constraints else []

Expand Down Expand Up @@ -202,9 +204,11 @@ def suggest(self, num):
if self.fidelity_index is not None:
# Convert 0-dim arrays into python numbers so their type can
# be validated by Ax
parameters[self.fidelity_index] = float(
self.space[self.fidelity_index].high
)
fidelity_dim = self.space[self.fidelity_index]
while isinstance(fidelity_dim, TransformedDimension):
fidelity_dim = fidelity_dim.original_dimension
assert isinstance(fidelity_dim, Fidelity)
parameters[self.fidelity_index] = float(fidelity_dim.high)

new_trial = format_trials.dict_to_trial(parameters, self.space)

Expand Down
21 changes: 11 additions & 10 deletions src/orion/algo/bohb.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,21 @@

import numpy as np

try:
from orion.algo.base import BaseAlgorithm
from orion.algo.parallel_strategy import strategy_factory
from orion.algo.space import Fidelity
from orion.core.utils.format_trials import dict_to_trial
from orion.core.utils.module_import import ImportOptional

with ImportOptional("BOHB") as import_optional:
from hpbandster.optimizers.config_generators.bohb import BOHB as CG_BOHB
from hpbandster.optimizers.iterations import SuccessiveHalving
from sspace.convert import convert_space, reverse, transform

has_HpBandSter = True
except ImportError:
CG_BOHB = None
SuccessiveHalving = None
has_HpBandSter = False
if import_optional.failed:
CG_BOHB = None # noqa: F811
SuccessiveHalving = None # noqa: F811

from orion.algo.base import BaseAlgorithm
from orion.algo.parallel_strategy import strategy_factory
from orion.algo.space import Fidelity
from orion.core.utils.format_trials import dict_to_trial

SPACE_ERROR = """
BOHB cannot be used if space does not contain a fidelity dimension.
Expand Down Expand Up @@ -110,6 +110,7 @@ def __init__(
min_bandwidth=1e-3,
parallel_strategy=None,
): # pylint: disable=too-many-arguments
import_optional.ensure()

if parallel_strategy is None:
parallel_strategy = {
Expand Down
12 changes: 6 additions & 6 deletions src/orion/algo/dehb/dehb.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,19 @@
from orion.algo.dehb.brackets import SHBracketManager
from orion.algo.space import Fidelity, Space
from orion.core.utils import format_trials
from orion.core.utils.module_import import ImportOptional
from orion.core.worker.trial import Trial

try:
with ImportOptional("DEHB") as import_optional:
from dehb.optimizers import DEHB as DEHBImpl
from sspace.convert import convert_space
from sspace.convert import transform as to_orion

IMPORT_ERROR = None
except ImportError as exc:
if import_optional.failed:

class DEHBImpl:
class DEHBImpl: # noqa: F811
pass

IMPORT_ERROR = exc


logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -265,6 +263,8 @@ def __init__(
min_clip: int | None = None,
max_clip: int | None = None,
):
import_optional.ensure()

# Sanity Check
if mutation_strategy not in MUTATION_STRATEGIES:
raise UnsupportedConfiguration(
Expand Down
21 changes: 14 additions & 7 deletions src/orion/algo/evolution_es.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import copy
import importlib
import logging
from typing import Callable, ClassVar, Sequence
from typing import Callable, ClassVar, Sequence, TypeVar

import numpy as np

Expand Down Expand Up @@ -89,7 +89,10 @@ def compute_budgets(
return budgets_eves


class EvolutionES(Hyperband):
BracketT = TypeVar("BracketT", bound="BracketEVES")


class EvolutionES(Hyperband[BracketT]):
"""EvolutionES formulates hyperparameter optimization as an evolution.
For more information on the algorithm,
Expand Down Expand Up @@ -137,14 +140,15 @@ def __init__(
mutate: str | dict | None = None,
max_retries: int = 1000,
):
self.mutate = mutate

super().__init__(space, seed=seed, repetitions=repetitions)
pair = nums_population // 2
mutate_ratio = 0.3
self.nums_population = nums_population
self.nums_comp_pairs = pair
self.max_retries = max_retries
self.mutate_ratio = mutate_ratio
self.mutate = mutate
self.nums_mutate_gene = (
int((len(self.space.values()) - 1) * mutate_ratio)
if int((len(self.space.values()) - 1) * mutate_ratio) > 0
Expand All @@ -168,11 +172,14 @@ def __init__(
pair,
)

self.brackets: list[BracketEVES] = [
BracketEVES(self, bracket_budgets, 1) for bracket_budgets in self.budgets
]
self.brackets: list[BracketT] = self.create_brackets()
self.seed_rng(seed)

def create_bracket(
self, bracket_budgets: list[BudgetTuple], iteration: int
) -> BracketT:
return BracketEVES(self, bracket_budgets, iteration)

@property
def state_dict(self) -> dict:
"""Return a state dict that can be used to reset the state of the algorithm."""
Expand All @@ -189,7 +196,7 @@ def set_state(self, state_dict: dict) -> None:
self.hurdles = state_dict["hurdles"]
super().set_state(state_dict)

def _get_bracket(self, trial: Trial) -> BracketEVES:
def _get_bracket(self, trial: Trial) -> BracketT:
"""Get the bracket of a trial during observe"""
return self.brackets[-1]

Expand Down
59 changes: 18 additions & 41 deletions src/orion/algo/hebo/hebo_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
"""
from __future__ import annotations

import contextlib
import copy
import typing
import warnings
Expand All @@ -19,26 +18,24 @@
from typing_extensions import Literal, TypedDict # type: ignore

from orion.algo.base import BaseAlgorithm
from orion.algo.hebo.random_state import RandomState
from orion.algo.space import Dimension, Fidelity, Space
from orion.core.utils.format_trials import dict_to_trial
from orion.core.utils.module_import import ImportOptional
from orion.core.utils.random_state import RandomState, control_randomness
from orion.core.worker.transformer import TransformedDimension
from orion.core.worker.trial import Trial

_HEBO_REQUIRED_ERROR = None
try:
with ImportOptional("HEBO") as import_optional:
import hebo
from hebo.acquisitions.acq import MACE, Acquisition
from hebo.design_space import DesignSpace
from hebo.design_space.param import Parameter
from torch.quasirandom import SobolEngine

except ImportError as err:
MACE = object
_HEBO_REQUIRED_ERROR = ImportError(
"The HEBO package is not installed. Install it with `pip install orion[hebo]`"
)
if import_optional.failed:
MACE = object # noqa: F811

if typing.TYPE_CHECKING and _HEBO_REQUIRED_ERROR:
if typing.TYPE_CHECKING and import_optional.failed:
Acquisition = object # noqa
DesignSpace = object # noqa
Parameter = object # noqa
Expand Down Expand Up @@ -137,8 +134,7 @@ def __init__(
seed: int | None = None,
parameters: Parameters | dict | None = None,
):
if _HEBO_REQUIRED_ERROR:
raise _HEBO_REQUIRED_ERROR
import_optional.ensure()

super().__init__(space)
if isinstance(parameters, dict):
Expand Down Expand Up @@ -167,7 +163,7 @@ def __init__(

self.hebo_space: DesignSpace = orion_space_to_hebo_space(self.space)

with self._control_randomness():
with control_randomness(self):
self.model = hebo.optimizers.hebo.HEBO(
space=self.hebo_space,
model_name=self.parameters.model_name,
Expand Down Expand Up @@ -238,7 +234,7 @@ def suggest(self, num: int) -> list[Trial]:
A list of trials representing values suggested by the algorithm.
"""
trials: list[Trial] = []
with self._control_randomness():
with control_randomness(self):
v: pd.DataFrame = self.model.suggest(n_suggestions=num)
point_dicts: dict[int, dict] = v.to_dict(orient="index") # type: ignore

Expand Down Expand Up @@ -280,7 +276,7 @@ def observe(self, trials: list[Trial]) -> None:

x_df = pd.DataFrame(new_xs)
y_array = np.array(new_ys).reshape([-1, 1])
with self._control_randomness():
with control_randomness(self):
self.model.observe(X=x_df, y=y_array)

def _hebo_params_to_orion_params(self, hebo_params: dict) -> dict:
Expand Down Expand Up @@ -330,12 +326,12 @@ def _orion_params_to_hebo_params(self, orion_params: dict) -> dict:
params = {}
for name, value in orion_params.items():
orion_dim: Dimension = self.space[name]
hebo_dim: Parameter = self.hebo_space.paras[name]

if orion_dim.type == "fidelity":
continue
from hebo.design_space.categorical_param import CategoricalPara

hebo_dim: Parameter = self.hebo_space.paras[name]

if isinstance(hebo_dim, CategoricalPara):
if (
value not in hebo_dim.categories
Expand All @@ -353,33 +349,14 @@ def _params_to_trial(self, orion_params: dict) -> Trial:
# Need to convert the {name: value} of point_dict into this format for Orion's Trial.
# Add the max value for the Fidelity dimensions, if any.
if self.fidelity_index is not None:
fidelity_dim: Fidelity = self.space[self.fidelity_index]
orion_params[self.fidelity_index] = fidelity_dim.high
fidelity_dim = self.space[self.fidelity_index]
while isinstance(fidelity_dim, TransformedDimension):
fidelity_dim = fidelity_dim.original_dimension
assert isinstance(fidelity_dim, Fidelity)
orion_params[self.fidelity_index] = float(fidelity_dim.high)
trial: Trial = dict_to_trial(orion_params, space=self.space)
return trial

@contextlib.contextmanager
def _control_randomness(self):
"""Seeds the randomness inside the indented block of code using `self.random_state`.
NOTE: This only has an effect if `seed_rng` was called previously, i.e. if
`self.random_state` is not None.
"""
if self.random_state is None:
yield
return

# Save the initial random state.
initial_rng_state = RandomState.current()
# Set the random state.
self.random_state.set()
yield
# Update the random state stored on `self`, so that the changes inside the block are
# reflected in the RandomState object.
self.random_state = RandomState.current()
# Reset the initial state.
initial_rng_state.set()


def orion_space_to_hebo_space(space: Space) -> DesignSpace:
"""Get the HEBO-equivalent space for the `Space` `space`.
Expand Down
Loading

0 comments on commit ba4ce25

Please sign in to comment.