diff --git a/Makefile b/Makefile index 42159410..6060d35e 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ test-search_space: done test-pytest: - python -m pytest --durations=10 -x -p no:warnings tests/; \ + python -m pytest --durations=10 -x -p no:warnings tests/ src/hyperactive/; \ test-timings: cd tests/_local_test_timings; \ diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/_generator.py b/scripts/_generator.py new file mode 100644 index 00000000..dc8c329d --- /dev/null +++ b/scripts/_generator.py @@ -0,0 +1,63 @@ +import os +from pathlib import Path + +# List of algorithm names and corresponding class names +algo_info = [ + ("downhill_simplex", "DownhillSimplexOptimizer"), + ("simulated_annealing", "SimulatedAnnealingOptimizer"), + ("direct_algorithm", "DirectAlgorithm"), + ("lipschitz_optimization", "LipschitzOptimizer"), + ("pattern_search", "PatternSearch"), + ("random_restart_hill_climbing", "RandomRestartHillClimbingOptimizer"), + ("random_search", "RandomSearchOptimizer"), + ("powells_method", "PowellsMethod"), + ("differential_evolution", "DifferentialEvolutionOptimizer"), + ("evolution_strategy", "EvolutionStrategyOptimizer"), + ("genetic_algorithm", "GeneticAlgorithmOptimizer"), + ("parallel_tempering", "ParallelTemperingOptimizer"), + ("particle_swarm_optimization", "ParticleSwarmOptimizer"), + ("spiral_optimization", "SpiralOptimization"), + ("bayesian_optimization", "BayesianOptimizer"), + ("forest_optimizer", "ForestOptimizer"), + ("tree_structured_parzen_estimators", "TreeStructuredParzenEstimators"), +] + +BASE_DIR = Path("generated_opt_algos") + + +# Template for the Python class file +def create_class_file_content(class_name: str) -> str: + return f'''from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class {class_name}(_BaseGFOadapter): + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import {class_name} + + return {class_name} +''' + + +# Main generation loop +for name, class_name in algo_info: + algo_folder = BASE_DIR / name + algo_folder.mkdir(parents=True, exist_ok=True) + + init_file = algo_folder / "__init__.py" + class_file = algo_folder / f"_{name}.py" + + # Create __init__.py (empty) + init_file.touch(exist_ok=True) + + # Write the optimizer class file + class_file.write_text(create_class_file_content(class_name)) + +print(f"Generated {len(algo_info)} folders in {BASE_DIR.resolve()}") diff --git a/src/hyperactive/base/_optimizer.py b/src/hyperactive/base/_optimizer.py index f1dc0fa9..8a3cca54 100644 --- a/src/hyperactive/base/_optimizer.py +++ b/src/hyperactive/base/_optimizer.py @@ -1,4 +1,5 @@ """Base class for optimizer.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from skbase.base import BaseObject diff --git a/src/hyperactive/opt/__init__.py b/src/hyperactive/opt/__init__.py index 8014b642..45ba40a4 100644 --- a/src/hyperactive/opt/__init__.py +++ b/src/hyperactive/opt/__init__.py @@ -1,14 +1,54 @@ """Individual optimization algorithms.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from hyperactive.opt.gridsearch import GridSearchSk -from hyperactive.opt.hillclimbing import HillClimbing -from hyperactive.opt.hillclimbing_repulsing import HillClimbingRepulsing -from hyperactive.opt.hillclimbing_stochastic import HillClimbingStochastic +from .gfo import ( + HillClimbing, + StochasticHillClimbing, + RepulsingHillClimbing, + SimulatedAnnealing, + DownhillSimplexOptimizer, + RandomSearch, + GridSearch, + RandomRestartHillClimbing, + PowellsMethod, + PatternSearch, + LipschitzOptimizer, + DirectAlgorithm, + ParallelTempering, + ParticleSwarmOptimizer, + SpiralOptimization, + GeneticAlgorithm, + EvolutionStrategy, + DifferentialEvolution, + BayesianOptimizer, + TreeStructuredParzenEstimators, + ForestOptimizer, +) + __all__ = [ "GridSearchSk", "HillClimbing", - "HillClimbingRepulsing", - "HillClimbingStochastic", + "RepulsingHillClimbing", + "StochasticHillClimbing", + "SimulatedAnnealing", + "DownhillSimplexOptimizer", + "RandomSearch", + "GridSearch", + "RandomRestartHillClimbing", + "PowellsMethod", + "PatternSearch", + "LipschitzOptimizer", + "DirectAlgorithm", + "ParallelTempering", + "ParticleSwarmOptimizer", + "SpiralOptimization", + "GeneticAlgorithm", + "EvolutionStrategy", + "DifferentialEvolution", + "BayesianOptimizer", + "TreeStructuredParzenEstimators", + "ForestOptimizer", ] diff --git a/src/hyperactive/opt/_adapters/_gfo.py b/src/hyperactive/opt/_adapters/_gfo.py index aca57da3..c2bae3d9 100644 --- a/src/hyperactive/opt/_adapters/_gfo.py +++ b/src/hyperactive/opt/_adapters/_gfo.py @@ -1,4 +1,5 @@ """Adapter for gfo package.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from hyperactive.base import BaseOptimizer @@ -40,9 +41,7 @@ def _get_gfo_class(self): class The GFO class to use. One of the concrete GFO classes """ - raise NotImplementedError( - "This method should be implemented in a subclass." - ) + raise NotImplementedError("This method should be implemented in a subclass.") def get_search_config(self): """Get the search configuration. @@ -55,8 +54,63 @@ def get_search_config(self): search_config = super().get_search_config() search_config["initialize"] = self._initialize del search_config["verbose"] + + search_config = self._handle_gfo_defaults(search_config) + + search_config["search_space"] = self._to_dict_np(search_config["search_space"]) + + return search_config + + def _handle_gfo_defaults(self, search_config): + """Handle default values for GFO search configuration. + + Temporary measure until GFO handles defaults gracefully. + + Parameters + ---------- + search_config : dict with str keys + The search configuration dictionary to handle defaults for. + + Returns + ------- + search_config : dict with str keys + The search configuration dictionary with defaults handled. + """ + if "sampling" in search_config and search_config["sampling"] is None: + search_config["sampling"] = {"random": 1000000} + + if "tree_para" in search_config and search_config["tree_para"] is None: + search_config["tree_para"] = {"n_estimators": 100} + return search_config + def _to_dict_np(self, search_space): + """Coerce the search space to a format suitable for gfo optimizers. + + gfo expects dicts of numpy arrays, not lists. + This method coerces lists or tuples in the search space to numpy arrays. + + Parameters + ---------- + search_space : dict with str keys and iterable values + The search space to coerce. + + Returns + ------- + dict with str keys and 1D numpy arrays as values + The coerced search space. + """ + import numpy as np + + def coerce_to_numpy(arr): + """Coerce a list or tuple to a numpy array.""" + if not isinstance(arr, np.ndarray): + return np.array(arr) + return arr + + coerced_search_space = {k: coerce_to_numpy(v) for k, v in search_space.items()} + return coerced_search_space + def _run(self, experiment, **search_config): """Run the optimization search process. Parameters @@ -75,15 +129,15 @@ def _run(self, experiment, **search_config): max_time = search_config.pop("max_time", None) gfo_cls = self._get_gfo_class() - hcopt = gfo_cls(**search_config) + gfopt = gfo_cls(**search_config) with StdoutMute(active=not self.verbose): - hcopt.search( + gfopt.search( objective_function=experiment.score, n_iter=n_iter, max_time=max_time, ) - best_params = hcopt.best_para + best_params = gfopt.best_para return best_params @classmethod @@ -143,5 +197,12 @@ def get_test_params(cls, parameter_set="default"): }, "n_iter": 100, } - - return [params_sklearn, params_ackley] + params_ackley_list = { + "experiment": ackley_exp, + "search_space": { + "x0": list(np.linspace(-5, 5, 10)), + "x1": list(np.linspace(-5, 5, 10)), + }, + "n_iter": 100, + } + return [params_sklearn, params_ackley, params_ackley_list] diff --git a/src/hyperactive/opt/gfo/__init__.py b/src/hyperactive/opt/gfo/__init__.py new file mode 100644 index 00000000..f1c430b7 --- /dev/null +++ b/src/hyperactive/opt/gfo/__init__.py @@ -0,0 +1,50 @@ +"""Individual optimization algorithms.""" + +# copyright: hyperactive developers, MIT License (see LICENSE file) + +from ._hillclimbing import HillClimbing +from ._stochastic_hillclimbing import StochasticHillClimbing +from ._repulsing_hillclimbing import RepulsingHillClimbing +from ._simulated_annealing import SimulatedAnnealing +from ._downhill_simplex import DownhillSimplexOptimizer +from ._random_search import RandomSearch +from ._grid_search import GridSearch +from ._random_restart_hill_climbing import RandomRestartHillClimbing +from ._powells_method import PowellsMethod +from ._pattern_search import PatternSearch +from ._lipschitz_optimization import LipschitzOptimizer +from ._direct_algorithm import DirectAlgorithm +from ._parallel_tempering import ParallelTempering +from ._particle_swarm_optimization import ParticleSwarmOptimizer +from ._spiral_optimization import SpiralOptimization +from ._genetic_algorithm import GeneticAlgorithm +from ._evolution_strategy import EvolutionStrategy +from ._differential_evolution import DifferentialEvolution +from ._bayesian_optimization import BayesianOptimizer +from ._tree_structured_parzen_estimators import TreeStructuredParzenEstimators +from ._forest_optimizer import ForestOptimizer + + +__all__ = [ + "HillClimbing", + "RepulsingHillClimbing", + "StochasticHillClimbing", + "SimulatedAnnealing", + "DownhillSimplexOptimizer", + "RandomSearch", + "GridSearch", + "RandomRestartHillClimbing", + "PowellsMethod", + "PatternSearch", + "LipschitzOptimizer", + "DirectAlgorithm", + "ParallelTempering", + "ParticleSwarmOptimizer", + "SpiralOptimization", + "GeneticAlgorithm", + "EvolutionStrategy", + "DifferentialEvolution", + "BayesianOptimizer", + "TreeStructuredParzenEstimators", + "ForestOptimizer", +] diff --git a/src/hyperactive/opt/gfo/_bayesian_optimization.py b/src/hyperactive/opt/gfo/_bayesian_optimization.py new file mode 100644 index 00000000..0d4f6b13 --- /dev/null +++ b/src/hyperactive/opt/gfo/_bayesian_optimization.py @@ -0,0 +1,156 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class BayesianOptimizer(_BaseGFOadapter): + """Bayesian optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the search process. + warm_start_smbo + The warm start for SMBO. + max_sample_size : int + The maximum number of points to sample. + sampling : dict + The sampling method to use. + replacement : bool + Whether to sample with replacement. + gpr : dict + The Gaussian Process Regressor to use. + xi : float + The exploration-exploitation trade-off parameter. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of BayesianOptimizer with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the bayesianOptimizer optimizer: + >>> from hyperactive.opt import BayesianOptimizer + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = BayesianOptimizer(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Bayesian Optimization", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "exploit", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + warm_start_smbo=None, + max_sample_size=10000000, + sampling=None, + replacement=True, + xi=0.03, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + + self.warm_start_smbo = warm_start_smbo + self.max_sample_size = max_sample_size + self.sampling = sampling + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.replacement = replacement + self.xi = xi + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import BayesianOptimizer + + return BayesianOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "xi": 0.33, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_differential_evolution.py b/src/hyperactive/opt/gfo/_differential_evolution.py new file mode 100644 index 00000000..287b4fc6 --- /dev/null +++ b/src/hyperactive/opt/gfo/_differential_evolution.py @@ -0,0 +1,147 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class DifferentialEvolution(_BaseGFOadapter): + """Differential evolution optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + population : int + The number of individuals in the population. + mutation_rate : float + The mutation rate. + crossover_rate : float + The crossover rate. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of DifferentialEvolution with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the differentialEvolution optimizer: + >>> from hyperactive.opt import DifferentialEvolution + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = DifferentialEvolution(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Differential Evolution", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population=10, + mutation_rate=0.9, + crossover_rate=0.9, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import DifferentialEvolutionOptimizer + + return DifferentialEvolutionOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 8, + "mutation_rate": 0.8, + "crossover_rate": 0.7, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_direct_algorithm.py b/src/hyperactive/opt/gfo/_direct_algorithm.py new file mode 100644 index 00000000..9abf95de --- /dev/null +++ b/src/hyperactive/opt/gfo/_direct_algorithm.py @@ -0,0 +1,150 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class DirectAlgorithm(_BaseGFOadapter): + """Direct optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + warm_start_smbo + The warm start for SMBO. + max_sample_size : int + The maximum number of points to sample. + sampling : dict + The sampling method to use. + replacement : bool + Whether to sample with replacement. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of DirectAlgorithm with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the directAlgorithm optimizer: + >>> from hyperactive.opt import DirectAlgorithm + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = DirectAlgorithm(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "DIRECT Algorithm", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "mixed", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + warm_start_smbo=None, + max_sample_size: int = 10000000, + sampling=None, + replacement=True, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.warm_start_smbo = warm_start_smbo + self.max_sample_size = max_sample_size + self.sampling = sampling + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.replacement = replacement + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import DirectAlgorithm + + return DirectAlgorithm + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "replacement": True, + "max_sample_size": 1000, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_downhill_simplex.py b/src/hyperactive/opt/gfo/_downhill_simplex.py new file mode 100644 index 00000000..145a38ef --- /dev/null +++ b/src/hyperactive/opt/gfo/_downhill_simplex.py @@ -0,0 +1,152 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class DownhillSimplexOptimizer(_BaseGFOadapter): + """Downhill simplex optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + alpha : float + The reflection parameter of the simplex algorithm. + gamma : float + The expansion parameter of the simplex algorithm. + beta : float + The contraction parameter of the simplex algorithm. + sigma : float + The shrinking parameter of the simplex algorithm. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of DownhillSimplexOptimizer with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the downhillSimplexOptimizer optimizer: + >>> from hyperactive.opt import DownhillSimplexOptimizer + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = DownhillSimplexOptimizer(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Downhill Simplex", + "info:local_vs_global": "local", + "info:explore_vs_exploit": "exploit", + "info:compute": "low", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + alpha=1, + gamma=2, + beta=0.5, + sigma=0.5, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.alpha = alpha + self.gamma = gamma + self.beta = beta + self.sigma = sigma + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import DownhillSimplexOptimizer + + return DownhillSimplexOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "alpha": 0.33, + "beta": 0.33, + "gamma": 0.33, + "sigma": 0.33, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_evolution_strategy.py b/src/hyperactive/opt/gfo/_evolution_strategy.py new file mode 100644 index 00000000..2df50c8c --- /dev/null +++ b/src/hyperactive/opt/gfo/_evolution_strategy.py @@ -0,0 +1,159 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class EvolutionStrategy(_BaseGFOadapter): + """Evolution strategy optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + population : int + The number of individuals in the population. + offspring : int + The number of offspring to generate in each generation. + replace_parents : bool + If True, the parents are replaced with the offspring in the next + generation. If False, the parents are kept in the next generation and the + offspring are added to the population. + mutation_rate : float + The mutation rate for the mutation operator. + crossover_rate : float + The crossover rate for the crossover operator. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of EvolutionStrategy with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the evolutionStrategy optimizer: + >>> from hyperactive.opt import EvolutionStrategy + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = EvolutionStrategy(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Evolution Strategy", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population=10, + offspring=20, + replace_parents=False, + mutation_rate=0.7, + crossover_rate=0.3, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.offspring = offspring + self.replace_parents = replace_parents + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import EvolutionStrategyOptimizer + + return EvolutionStrategyOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 15, + "offspring": 10, + "replace_parents": True, + "mutation_rate": 1, + "crossover_rate": 2, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_forest_optimizer.py b/src/hyperactive/opt/gfo/_forest_optimizer.py new file mode 100644 index 00000000..5e971643 --- /dev/null +++ b/src/hyperactive/opt/gfo/_forest_optimizer.py @@ -0,0 +1,163 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class ForestOptimizer(_BaseGFOadapter): + """Forest optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + warm_start_smbo + The warm start for SMBO. + max_sample_size : int + The maximum number of points to sample. + sampling : dict + The sampling method to use. + replacement : bool + Whether to sample with replacement. + tree_regressor : str + The tree regressor model to use. + tree_para : dict + The model specific parameters for the tree regressor. + xi : float + The xi parameter for the tree regressor. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of ForestOptimizer with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the forestOptimizer optimizer: + >>> from hyperactive.opt import ForestOptimizer + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = ForestOptimizer(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Forest Optimizer", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + warm_start_smbo=None, + max_sample_size=10000000, + sampling=None, + replacement=True, + tree_regressor="extra_tree", + tree_para=None, + xi=0.03, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.warm_start_smbo = warm_start_smbo + self.max_sample_size = max_sample_size + self.sampling = sampling + self.replacement = replacement + self.tree_regressor = tree_regressor + self.tree_para = tree_para + self.xi = xi + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import ForestOptimizer + + return ForestOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "replacement": True, + "tree_para": {"n_estimators": 50}, + "xi": 0.33, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_genetic_algorithm.py b/src/hyperactive/opt/gfo/_genetic_algorithm.py new file mode 100644 index 00000000..aaf281dd --- /dev/null +++ b/src/hyperactive/opt/gfo/_genetic_algorithm.py @@ -0,0 +1,162 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class GeneticAlgorithm(_BaseGFOadapter): + """Genetic algorithm optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the search process. + population : int + The number of individuals in the population. + offspring : int + The number of offspring to generate in each generation. + crossover : str + The crossover operator to use. + n_parents : int + The number of parents to select for crossover. + mutation_rate : float + The mutation rate. + crossover_rate : float + The crossover rate. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of GeneticAlgorithm with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the geneticAlgorithm optimizer: + >>> from hyperactive.opt import GeneticAlgorithm + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = GeneticAlgorithm(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Genetic Algorithm", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population=10, + offspring=10, + crossover="discrete-recombination", + n_parents=2, + mutation_rate=0.5, + crossover_rate=0.5, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.offspring = offspring + self.crossover = crossover + self.n_parents = n_parents + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import GeneticAlgorithmOptimizer + + return GeneticAlgorithmOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 15, + "offspring": 10, + "n_parents": 3, + "mutation_rate": 0.01, + "crossover_rate": 0.02, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_grid_search.py b/src/hyperactive/opt/gfo/_grid_search.py new file mode 100644 index 00000000..191c869c --- /dev/null +++ b/src/hyperactive/opt/gfo/_grid_search.py @@ -0,0 +1,142 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class GridSearch(_BaseGFOadapter): + """Grid search optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + step_size : int + The step-size for the grid search. + direction : "diagonal" or "orthogonal" + The direction of the grid search. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of GridSearch with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the gridSearch optimizer: + >>> from hyperactive.opt import GridSearch + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = GridSearch(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Grid Search", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + step_size=1, + direction="diagonal", + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.step_size = step_size + self.direction = direction + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import GridSearchOptimizer + + return GridSearchOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "step_size": 3, + "direction": "orthogonal", + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/hillclimbing/_hillclimbing.py b/src/hyperactive/opt/gfo/_hillclimbing.py similarity index 94% rename from src/hyperactive/opt/hillclimbing/_hillclimbing.py rename to src/hyperactive/opt/gfo/_hillclimbing.py index a6eabc81..9537793e 100644 --- a/src/hyperactive/opt/hillclimbing/_hillclimbing.py +++ b/src/hyperactive/opt/gfo/_hillclimbing.py @@ -1,4 +1,5 @@ """Hill climbing optimizer from gfo.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from hyperactive.opt._adapters._gfo import _BaseGFOadapter @@ -60,15 +61,15 @@ class HillClimbing(_BaseGFOadapter): 2. setting up the hill climbing optimizer: >>> from hyperactive.opt import HillClimbing >>> import numpy as np - >>> - >>> hillclimbing_config = { + >>> + >>> config = { ... "search_space": { - ... "C": np.array([0.01, 0.1, 1, 10]), - ... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], ... }, ... "n_iter": 100, ... } - >>> hillclimbing = HillClimbing(experiment=sklearn_exp, **hillclimbing_config) + >>> hillclimbing = HillClimbing(experiment=sklearn_exp, **config) 3. running the hill climbing search: >>> best_params = hillclimbing.run() diff --git a/src/hyperactive/opt/gfo/_lipschitz_optimization.py b/src/hyperactive/opt/gfo/_lipschitz_optimization.py new file mode 100644 index 00000000..d5ddfe56 --- /dev/null +++ b/src/hyperactive/opt/gfo/_lipschitz_optimization.py @@ -0,0 +1,150 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class LipschitzOptimizer(_BaseGFOadapter): + """Lipschitz optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + warm_start_smbo + The warm start for SMBO. + max_sample_size : int + The maximum number of points to sample. + sampling : dict + The sampling method to use. + replacement : bool + Whether to sample with replacement. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of LipschitzOptimizer with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the lipschitzOptimizer optimizer: + >>> from hyperactive.opt import LipschitzOptimizer + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = LipschitzOptimizer(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Lipschitz Optimization", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "mixed", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + warm_start_smbo=None, + max_sample_size=10000000, + sampling=None, + replacement=True, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.warm_start_smbo = warm_start_smbo + self.max_sample_size = max_sample_size + self.sampling = sampling + self.replacement = replacement + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import LipschitzOptimizer + + return LipschitzOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "max_sample_size": 1000, + "replacement": True, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_parallel_tempering.py b/src/hyperactive/opt/gfo/_parallel_tempering.py new file mode 100644 index 00000000..9b8208ae --- /dev/null +++ b/src/hyperactive/opt/gfo/_parallel_tempering.py @@ -0,0 +1,145 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class ParallelTempering(_BaseGFOadapter): + """Parallel tempering optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + epsilon : float + The step-size for the climbing. + distribution : str + The type of distribution to sample from. + n_neighbours : int + The number of neighbours to sample and evaluate before moving to the best + of those neighbours. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of ParallelTempering with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the parallelTempering optimizer: + >>> from hyperactive.opt import ParallelTempering + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = ParallelTempering(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Parallel Tempering", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "high", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population: int = 5, + n_iter_swap: int = 5, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.n_iter_swap = n_iter_swap + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import ParallelTemperingOptimizer + + return ParallelTemperingOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 10, + "n_iter_swap": 3, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_particle_swarm_optimization.py b/src/hyperactive/opt/gfo/_particle_swarm_optimization.py new file mode 100644 index 00000000..a24ac2f0 --- /dev/null +++ b/src/hyperactive/opt/gfo/_particle_swarm_optimization.py @@ -0,0 +1,157 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class ParticleSwarmOptimizer(_BaseGFOadapter): + """Particle swarm optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + population : int + The number of particles in the swarm. + inertia : float + The inertia of the swarm. + cognitive_weight : float + A factor of the movement towards the personal best position of the individual optimizers in the population. + social_weight : float + A factor of the movement towards the personal best position of the individual optimizers in the population. + temp_weight : float + The temperature weight of the swarm. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of ParticleSwarmOptimizer with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the particleSwarmOptimizer optimizer: + >>> from hyperactive.opt import ParticleSwarmOptimizer + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = ParticleSwarmOptimizer(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Particle Swarm Optimization", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population=10, + inertia=0.5, + cognitive_weight=0.5, + social_weight=0.5, + temp_weight=0.2, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.inertia = inertia + self.cognitive_weight = cognitive_weight + self.social_weight = social_weight + self.temp_weight = temp_weight + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import ParticleSwarmOptimizer + + return ParticleSwarmOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 15, + "inertia": 0.9, + "cognitive_weight": 0.9, + "social_weight": 0.9, + "temp_weight": 0.9, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_pattern_search.py b/src/hyperactive/opt/gfo/_pattern_search.py new file mode 100644 index 00000000..ac8d6d27 --- /dev/null +++ b/src/hyperactive/opt/gfo/_pattern_search.py @@ -0,0 +1,147 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class PatternSearch(_BaseGFOadapter): + """Pattern search optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + n_positions : int + Number of positions that the pattern consists of. + pattern_size : float + The initial size of the patterns in percentage of the size of the search space in the corresponding dimension. + reduction : float + The factor that reduces the size of the pattern if no better position is found. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of PatternSearch with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the patternSearch optimizer: + >>> from hyperactive.opt import PatternSearch + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = PatternSearch(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Pattern Search", + "info:local_vs_global": "local", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + n_positions=4, + pattern_size=0.25, + reduction=0.9, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.n_positions = n_positions + self.pattern_size = pattern_size + self.reduction = reduction + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import PatternSearch + + return PatternSearch + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "n_positions": 3, + "pattern_size": 0.5, + "reduction": 0.999, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_powells_method.py b/src/hyperactive/opt/gfo/_powells_method.py new file mode 100644 index 00000000..00db4a54 --- /dev/null +++ b/src/hyperactive/opt/gfo/_powells_method.py @@ -0,0 +1,142 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class PowellsMethod(_BaseGFOadapter): + """Powell's method optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the search process. + epsilon : float + The step-size for the climbing. + distribution : str + The type of distribution to sample from. + n_neighbours : int + The number of neighbours to sample and evaluate before moving to the best + of those neighbours. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of PowellsMethod with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the powellsMethod optimizer: + >>> from hyperactive.opt import PowellsMethod + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = PowellsMethod(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Powell’s Method", + "info:local_vs_global": "local", + "info:explore_vs_exploit": "exploit", + "info:compute": "low", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + iters_p_dim=10, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.iters_p_dim = iters_p_dim + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import PowellsMethod + + return PowellsMethod + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "iters_p_dim": 3, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_random_restart_hill_climbing.py b/src/hyperactive/opt/gfo/_random_restart_hill_climbing.py new file mode 100644 index 00000000..8de5bafe --- /dev/null +++ b/src/hyperactive/opt/gfo/_random_restart_hill_climbing.py @@ -0,0 +1,143 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class RandomRestartHillClimbing(_BaseGFOadapter): + """Random restart hill climbing optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + epsilon : float + The step-size for the climbing. + distribution : str + The type of distribution to sample from. + n_neighbours : int + The number of neighbours to sample and evaluate before moving to the best + of those neighbours. + n_iter_restart : int + The number of iterations after which to restart at a random position. + + Examples + -------- + Basic usage of RandomRestartHillClimbing with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the randomRestartHillClimbing optimizer: + >>> from hyperactive.opt import RandomRestartHillClimbing + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = RandomRestartHillClimbing(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Random Restart Hill Climbing", + "info:local_vs_global": "local", + "info:explore_vs_exploit": "mixed", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + epsilon=0.01, + distribution="normal", + n_neighbours=10, + n_iter_restart=0.5, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.epsilon = epsilon + self.distribution = distribution + self.n_neighbours = n_neighbours + self.n_iter_restart = n_iter_restart + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import RandomRestartHillClimbingOptimizer + + return RandomRestartHillClimbingOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "n_iter_restart": 2, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_random_search.py b/src/hyperactive/opt/gfo/_random_search.py new file mode 100644 index 00000000..06a25887 --- /dev/null +++ b/src/hyperactive/opt/gfo/_random_search.py @@ -0,0 +1,129 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class RandomSearch(_BaseGFOadapter): + """Random search optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + Optional, can be passed later via ``set_params``. + initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable], default=[] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int, default=None + If None, create a new random state. If int, create a new random state + seeded with the value. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of RandomSearch with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the randomSearch optimizer: + >>> from hyperactive.opt import RandomSearch + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = RandomSearch(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Random Search", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "low", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import RandomSearchOptimizer + + return RandomSearchOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/hillclimbing_repulsing/_hillclimbing_repulsing.py b/src/hyperactive/opt/gfo/_repulsing_hillclimbing.py similarity index 91% rename from src/hyperactive/opt/hillclimbing_repulsing/_hillclimbing_repulsing.py rename to src/hyperactive/opt/gfo/_repulsing_hillclimbing.py index 7b50c62a..e76ba5d7 100644 --- a/src/hyperactive/opt/hillclimbing_repulsing/_hillclimbing_repulsing.py +++ b/src/hyperactive/opt/gfo/_repulsing_hillclimbing.py @@ -1,10 +1,11 @@ """Hill climbing optimizer from gfo.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from hyperactive.opt._adapters._gfo import _BaseGFOadapter -class HillClimbingRepulsing(_BaseGFOadapter): +class RepulsingHillClimbing(_BaseGFOadapter): """Repulsing hill climbing optimizer. Parameters @@ -60,17 +61,17 @@ class HillClimbingRepulsing(_BaseGFOadapter): ... ) 2. setting up the hill climbing optimizer: - >>> from hyperactive.opt import HillClimbingRepulsing + >>> from hyperactive.opt import RepulsingHillClimbing >>> import numpy as np - >>> - >>> hc_config = { + >>> + >>> config = { ... "search_space": { - ... "C": np.array([0.01, 0.1, 1, 10]), - ... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], ... }, ... "n_iter": 100, ... } - >>> hillclimbing = HillClimbingRepulsing(experiment=sklearn_exp, **hc_config) + >>> hillclimbing = RepulsingHillClimbing(experiment=sklearn_exp, **config) 3. running the hill climbing search: >>> best_params = hillclimbing.run() @@ -145,8 +146,8 @@ def get_test_params(cls, parameter_set="default"): "experiment": experiment, "repulsion_factor": 7, "search_space": { - "C": np.array([0.01, 0.1, 1, 10]), - "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], }, "n_iter": 100, } diff --git a/src/hyperactive/opt/gfo/_simulated_annealing.py b/src/hyperactive/opt/gfo/_simulated_annealing.py new file mode 100644 index 00000000..6a169238 --- /dev/null +++ b/src/hyperactive/opt/gfo/_simulated_annealing.py @@ -0,0 +1,155 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class SimulatedAnnealing(_BaseGFOadapter): + """Simulated annealing optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + initialize : dict[str, int] + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float + The probability of a random iteration during the the search process. + epsilon : float + The step-size for the climbing. + distribution : str + The type of distribution to sample from. + n_neighbours : int + The number of neighbours to sample and evaluate before moving to the best + of those neighbours. + annealing_rate : float + The rate at which the temperature is annealed. + start_temp : float + The initial temperature. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of SimulatedAnnealing with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the simulatedAnnealing optimizer: + >>> from hyperactive.opt import SimulatedAnnealing + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = SimulatedAnnealing(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Simulated Annealing", + "info:local_vs_global": "global", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + epsilon=0.01, + distribution="normal", + n_neighbours=10, + annealing_rate=0.97, + start_temp=1, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.epsilon = epsilon + self.distribution = distribution + self.n_neighbours = n_neighbours + self.annealing_rate = annealing_rate + self.start_temp = start_temp + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import SimulatedAnnealingOptimizer + + return SimulatedAnnealingOptimizer + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "start_temp": 0.33, + "annealing_rate": 1.01, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/gfo/_spiral_optimization.py b/src/hyperactive/opt/gfo/_spiral_optimization.py new file mode 100644 index 00000000..e7fc457c --- /dev/null +++ b/src/hyperactive/opt/gfo/_spiral_optimization.py @@ -0,0 +1,144 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class SpiralOptimization(_BaseGFOadapter): + """Spiral optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + Optional, can be passed later via ``set_params``. + initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable], default=[] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int, default=None + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float, default=0.1 + The probability of a random iteration during the the search process. + population : int + The number of particles in the swarm. + decay_rate : float + This parameter is a factor, that influences the radius of the particles during their spiral movement. + Lower values accelerates the convergence of the particles to the best known position, while values above 1 eventually lead to a movement where the particles spiral away from each other. + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of SpiralOptimization with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the spiralOptimization optimizer: + >>> from hyperactive.opt import SpiralOptimization + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = SpiralOptimization(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Spiral Optimization", + "info:local_vs_global": "mixed", + "info:explore_vs_exploit": "explore", + "info:compute": "middle", + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + population: int = 10, + decay_rate: float = 0.99, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.population = population + self.decay_rate = decay_rate + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import SpiralOptimization + + return SpiralOptimization + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "population": 20, + "decay_rate": 0.9999, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/hillclimbing_stochastic/_hillclimbing_stochastic.py b/src/hyperactive/opt/gfo/_stochastic_hillclimbing.py similarity index 91% rename from src/hyperactive/opt/hillclimbing_stochastic/_hillclimbing_stochastic.py rename to src/hyperactive/opt/gfo/_stochastic_hillclimbing.py index c44ad9bd..7b8f7e40 100644 --- a/src/hyperactive/opt/hillclimbing_stochastic/_hillclimbing_stochastic.py +++ b/src/hyperactive/opt/gfo/_stochastic_hillclimbing.py @@ -1,10 +1,11 @@ """Hill climbing optimizer from gfo.""" + # copyright: hyperactive developers, MIT License (see LICENSE file) from hyperactive.opt._adapters._gfo import _BaseGFOadapter -class HillClimbingStochastic(_BaseGFOadapter): +class StochasticHillClimbing(_BaseGFOadapter): """Stochastic hill climbing optimizer. Parameters @@ -60,17 +61,17 @@ class HillClimbingStochastic(_BaseGFOadapter): ... ) 2. setting up the hill climbing optimizer: - >>> from hyperactive.opt import HillClimbingStochastic + >>> from hyperactive.opt import StochasticHillClimbing >>> import numpy as np - >>> - >>> hc_config = { + >>> + >>> config = { ... "search_space": { - ... "C": np.array([0.01, 0.1, 1, 10]), - ... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], ... }, ... "n_iter": 100, ... } - >>> hillclimbing = HillClimbingStochastic(experiment=sklearn_exp, **hc_config) + >>> hillclimbing = StochasticHillClimbing(experiment=sklearn_exp, **config) 3. running the hill climbing search: >>> best_params = hillclimbing.run() @@ -145,8 +146,8 @@ def get_test_params(cls, parameter_set="default"): "experiment": experiment, "p_accept": 0.33, "search_space": { - "C": np.array([0.01, 0.1, 1, 10]), - "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], }, "n_iter": 100, } diff --git a/src/hyperactive/opt/gfo/_tree_structured_parzen_estimators.py b/src/hyperactive/opt/gfo/_tree_structured_parzen_estimators.py new file mode 100644 index 00000000..765307b2 --- /dev/null +++ b/src/hyperactive/opt/gfo/_tree_structured_parzen_estimators.py @@ -0,0 +1,156 @@ +from hyperactive.opt._adapters._gfo import _BaseGFOadapter + + +class TreeStructuredParzenEstimators(_BaseGFOadapter): + """Tree structured parzen estimators optimizer. + + Parameters + ---------- + search_space : dict[str, list] + The search space to explore. A dictionary with parameter + names as keys and a numpy array as values. + Optional, can be passed later via ``set_params``. + initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} + The method to generate initial positions. A dictionary with + the following key literals and the corresponding value type: + {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} + constraints : list[callable], default=[] + A list of constraints, where each constraint is a callable. + The callable returns `True` or `False` dependend on the input parameters. + random_state : None, int, default=None + If None, create a new random state. If int, create a new random state + seeded with the value. + rand_rest_p : float, default=0.1 + The probability of a random iteration during the the search process. + warm_start_smbo + The warm start for SMBO. + max_sample_size : int + The maximum number of points to sample. + sampling : dict0 + The sampling method to use. + replacement : bool + Whether to sample with replacement. + gamma_tpe : float + The parameter for the Tree Structured Parzen Estimators + n_iter : int, default=100 + The number of iterations to run the optimizer. + verbose : bool, default=False + If True, print the progress of the optimization process. + experiment : BaseExperiment, optional + The experiment to optimize parameters for. + Optional, can be passed later via ``set_params``. + + Examples + -------- + Basic usage of TreeStructuredParzenEstimators with a scikit-learn experiment: + + 1. defining the experiment to optimize: + >>> from hyperactive.experiment.integrations import SklearnCvExperiment + >>> from sklearn.datasets import load_iris + >>> from sklearn.svm import SVC + >>> + >>> X, y = load_iris(return_X_y=True) + >>> + >>> sklearn_exp = SklearnCvExperiment( + ... estimator=SVC(), + ... X=X, + ... y=y, + ... ) + + 2. setting up the treeStructuredParzenEstimators optimizer: + >>> from hyperactive.opt import TreeStructuredParzenEstimators + >>> import numpy as np + >>> + >>> config = { + ... "search_space": { + ... "C": [0.01, 0.1, 1, 10], + ... "gamma": [0.0001, 0.01, 0.1, 1, 10], + ... }, + ... "n_iter": 100, + ... } + >>> optimizer = TreeStructuredParzenEstimators(experiment=sklearn_exp, **config) + + 3. running the optimization: + >>> best_params = optimizer.run() + + Best parameters can also be accessed via: + >>> best_params = optimizer.best_params_ + """ + + _tags = { + "info:name": "Tree Structured Parzen Estimators", + "info:local_vs_global": "mixed", # "local", "mixed", "global" + "info:explore_vs_exploit": "mixed", # "explore", "exploit", "mixed" + "info:compute": "high", # "low", "middle", "high" + } + + def __init__( + self, + search_space=None, + initialize=None, + constraints=None, + random_state=None, + rand_rest_p=0.1, + warm_start_smbo=None, + max_sample_size=10000000, + sampling=None, + replacement=True, + gamma_tpe=0.2, + n_iter=100, + verbose=False, + experiment=None, + ): + self.random_state = random_state + self.rand_rest_p = rand_rest_p + self.warm_start_smbo = warm_start_smbo + self.max_sample_size = max_sample_size + self.sampling = sampling + self.replacement = replacement + self.gamma_tpe = gamma_tpe + self.search_space = search_space + self.initialize = initialize + self.constraints = constraints + self.n_iter = n_iter + self.experiment = experiment + self.verbose = verbose + + super().__init__() + + def _get_gfo_class(self): + """Get the GFO class to use. + + Returns + ------- + class + The GFO class to use. One of the concrete GFO classes + """ + from gradient_free_optimizers import TreeStructuredParzenEstimators + + return TreeStructuredParzenEstimators + + @classmethod + def get_test_params(cls, parameter_set="default"): + """Get the test parameters for the optimizer. + + Returns + ------- + dict with str keys + The test parameters dictionary. + """ + import numpy as np + + params = super().get_test_params() + experiment = params[0]["experiment"] + more_params = { + "experiment": experiment, + "max_sample_size": 100, + "replacement": True, + "gamma_tpe": 0.01, + "search_space": { + "C": [0.01, 0.1, 1, 10], + "gamma": [0.0001, 0.01, 0.1, 1, 10], + }, + "n_iter": 100, + } + params.append(more_params) + return params diff --git a/src/hyperactive/opt/hillclimbing/__init__.py b/src/hyperactive/opt/hillclimbing/__init__.py deleted file mode 100644 index 4acd2fbf..00000000 --- a/src/hyperactive/opt/hillclimbing/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Hill climbing optimizer.""" -# copyright: hyperactive developers, MIT License (see LICENSE file) - -from hyperactive.opt.hillclimbing._hillclimbing import HillClimbing - -__all__ = ["HillClimbing"] diff --git a/src/hyperactive/opt/hillclimbing_repulsing/__init__.py b/src/hyperactive/opt/hillclimbing_repulsing/__init__.py deleted file mode 100644 index f6bd0b16..00000000 --- a/src/hyperactive/opt/hillclimbing_repulsing/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Hill climbing optimizer.""" -# copyright: hyperactive developers, MIT License (see LICENSE file) - -from hyperactive.opt.hillclimbing_repulsing._hillclimbing_repulsing import ( - HillClimbingRepulsing, -) - -__all__ = ["HillClimbingRepulsing"] diff --git a/src/hyperactive/opt/hillclimbing_stochastic/__init__.py b/src/hyperactive/opt/hillclimbing_stochastic/__init__.py deleted file mode 100644 index f7d1e78b..00000000 --- a/src/hyperactive/opt/hillclimbing_stochastic/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Hill climbing optimizer.""" -# copyright: hyperactive developers, MIT License (see LICENSE file) - -from hyperactive.opt.hillclimbing_stochastic._hillclimbing_stochastic import ( - HillClimbingStochastic, -) - -__all__ = ["HillClimbingStochastic"] diff --git a/src/hyperactive/tests/test_all_objects.py b/src/hyperactive/tests/test_all_objects.py index 84257db0..b5b4955f 100644 --- a/src/hyperactive/tests/test_all_objects.py +++ b/src/hyperactive/tests/test_all_objects.py @@ -4,6 +4,7 @@ import shutil from skbase.testing import BaseFixtureGenerator as _BaseFixtureGenerator +from skbase.testing import QuickTester as _QuickTester from skbase.testing import TestAllObjects as _TestAllObjects from hyperactive._registry import all_objects @@ -154,7 +155,7 @@ class ExperimentFixtureGenerator(BaseFixtureGenerator): object_type_filter = "experiment" -class TestAllExperiments(ExperimentFixtureGenerator): +class TestAllExperiments(ExperimentFixtureGenerator, _QuickTester): """Module level tests for all experiment classes.""" def test_paramnames(self, object_class): @@ -165,9 +166,9 @@ def test_paramnames(self, object_class): for inst, obj_param in zip(inst_params, obj_params): obj_inst = object_class(**inst) paramnames = obj_inst.paramnames() - assert set(obj_param.keys()) <= set(paramnames), ( - f"Parameter names do not match: {paramnames} != {obj_param}" - ) + assert set(obj_param.keys()) <= set( + paramnames + ), f"Parameter names do not match: {paramnames} != {obj_param}" def test_score_function(self, object_class): """Test that substituting into score works as intended.""" @@ -204,16 +205,14 @@ class OptimizerFixtureGenerator(BaseFixtureGenerator): object_type_filter = "optimizer" -class TestAllOptimizers(OptimizerFixtureGenerator): +class TestAllOptimizers(OptimizerFixtureGenerator, _QuickTester): """Module level tests for all optimizer classes.""" def test_opt_run(self, object_instance): """Test that run returns the expected result.""" paramnames = object_instance.get_params().keys() if "experiment" not in paramnames: - raise ValueError( - "Optimizer must have an 'experiment' parameter." - ) + raise ValueError("Optimizer must have an 'experiment' parameter.") # check that experiment occurs last in __init__ signature if not object_instance.__init__.__code__.co_varnames[-1] == "experiment": raise ValueError( @@ -255,6 +254,7 @@ def test_gfo_integration(self, object_instance): Runs the optimizer on the sklearn tuning experiment. """ from hyperactive.opt._adapters._gfo import _BaseGFOadapter + if not isinstance(object_instance, _BaseGFOadapter): return None @@ -282,8 +282,8 @@ def test_gfo_integration(self, object_instance): _config = { "search_space": { - "C": np.array([0.01, 0.1, 1, 10]), - "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), + "C": np.array([0.01, 0.1, 1, 10]), + "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), }, "n_iter": 100, "experiment": sklearn_exp, diff --git a/src/hyperactive/tests/test_class_register.py b/src/hyperactive/tests/test_class_register.py new file mode 100644 index 00000000..dfbd1f1f --- /dev/null +++ b/src/hyperactive/tests/test_class_register.py @@ -0,0 +1,94 @@ +# copyright: skpro developers, BSD-3-Clause License (see LICENSE file) +"""Registry and dispatcher for test classes. + +Module does not contain tests, only test utilities. +""" + +__author__ = ["fkiraly"] + +from inspect import isclass + + +def get_test_class_registry(): + """Return test class registry. + + Wrapped in a function to avoid circular imports. + + Returns + ------- + testclass_dict : dict + test class registry + keys are scitypes, values are test classes TestAll[Scitype] + """ + from hyperactive.tests.test_all_objects import ( + TestAllExperiments, + TestAllObjects, + TestAllOptimizers, + ) + + testclass_dict = dict() + # every object in sktime inherits from BaseObject + # "object" tests are run for all objects + testclass_dict["object"] = TestAllObjects + # more specific base classes + # these inherit either from BaseEstimator or BaseObject, + # so also imply estimator and object tests, or only object tests + testclass_dict["experiment"] = TestAllExperiments + testclass_dict["optimizer"] = TestAllOptimizers + + return testclass_dict + + +def get_test_classes_for_obj(obj): + """Get all test classes relevant for an object or estimator. + + Parameters + ---------- + obj : object or estimator, descendant of sktime BaseObject or BaseEstimator + object or estimator for which to get test classes + + Returns + ------- + test_classes : list of test classes + list of test classes relevant for obj + these are references to the actual classes, not strings + if obj was not a descendant of BaseObject or BaseEstimator, returns empty list + """ + from skbase.base import BaseObject + + def is_object(obj): + """Return whether obj is an estimator class or estimator object.""" + if isclass(obj): + return issubclass(obj, BaseObject) + else: + return isinstance(obj, BaseObject) + + # warning: BaseEstimator does not inherit from BaseObject, + # therefore we need to check both + if not is_object(obj): + return [] + + testclass_dict = get_test_class_registry() + + # we always need to run "object" tests + test_clss = [testclass_dict["object"]] + + try: + if isclass(obj): + obj_scitypes = obj.get_class_tag("object_type") + elif hasattr(obj, "get_tag"): + obj_scitypes = obj.get_tag("object_type") + else: + obj_scitypes = [] + except Exception: + obj_scitypes = [] + + if isinstance(obj_scitypes, str): + # if obj_scitypes is a string, convert to list + obj_scitypes = [obj_scitypes] + + for obj_scitype in obj_scitypes: + if obj_scitype in testclass_dict: + test_clss += [testclass_dict[obj_scitype]] + + return test_clss diff --git a/src/hyperactive/utils/__init__.py b/src/hyperactive/utils/__init__.py new file mode 100644 index 00000000..c9c88720 --- /dev/null +++ b/src/hyperactive/utils/__init__.py @@ -0,0 +1,7 @@ +"""Utility functionality.""" + +from hyperactive.utils.estimator_checks import check_estimator + +__all__ = [ + "check_estimator", +] diff --git a/src/hyperactive/utils/estimator_checks.py b/src/hyperactive/utils/estimator_checks.py new file mode 100644 index 00000000..1bc9f793 --- /dev/null +++ b/src/hyperactive/utils/estimator_checks.py @@ -0,0 +1,139 @@ +"""Estimator checker for extension.""" + +__author__ = ["fkiraly"] +__all__ = ["check_estimator"] + +from skbase.utils.dependencies import _check_soft_dependencies + + +def check_estimator( + estimator, + raise_exceptions=False, + tests_to_run=None, + fixtures_to_run=None, + verbose=True, + tests_to_exclude=None, + fixtures_to_exclude=None, +): + """Run all tests on one single estimator. + + Tests that are run on estimator: + + * all tests in test_all_estimators + * all interface compatibility tests from the module of estimator's scitype + + Parameters + ---------- + estimator : estimator class or estimator instance + raise_exceptions : bool, optional, default=False + whether to return exceptions/failures in the results dict, or raise them + + * if False: returns exceptions in returned `results` dict + * if True: raises exceptions as they occur + + tests_to_run : str or list of str, optional. Default = run all tests. + Names (test/function name string) of tests to run. + sub-sets tests that are run to the tests given here. + fixtures_to_run : str or list of str, optional. Default = run all tests. + pytest test-fixture combination codes, which test-fixture combinations to run. + sub-sets tests and fixtures to run to the list given here. + If both tests_to_run and fixtures_to_run are provided, runs the *union*, + i.e., all test-fixture combinations for tests in tests_to_run, + plus all test-fixture combinations in fixtures_to_run. + verbose : str, optional, default=True. + whether to print out informative summary of tests run. + tests_to_exclude : str or list of str, names of tests to exclude. default = None + removes tests that should not be run, after subsetting via tests_to_run. + fixtures_to_exclude : str or list of str, fixtures to exclude. default = None + removes test-fixture combinations that should not be run. + This is done after subsetting via fixtures_to_run. + + Returns + ------- + results : dict of results of the tests in self + keys are test/fixture strings, identical as in pytest, e.g., test[fixture] + entries are the string "PASSED" if the test passed, + or the exception raised if the test did not pass + returned only if all tests pass, or raise_exceptions=False + + Raises + ------ + if raise_exceptions=True, + raises any exception produced by the tests directly + + Examples + -------- + >>> from hyperactive.opt import HillClimbing + >>> from hyperactive.utils import check_estimator + + Running all tests for HillClimbing class, + this uses all instances from get_test_params and compatible scenarios + + >>> results = check_estimator(HillClimbing) + All tests PASSED! + + Running all tests for a specific HillClimbing + this uses the instance that is passed and compatible scenarios + + >>> specific_hill_climbing = HillClimbing.create_test_instance() + >>> results = check_estimator(specific_hill_climbing) + All tests PASSED! + + Running specific test (all fixtures) HillClimbing + + >>> results = check_estimator(HillClimbing, tests_to_run="test_clone") + All tests PASSED! + + {'test_clone[HillClimbing-0]': 'PASSED', + 'test_clone[HillClimbing-1]': 'PASSED'} + + Running one specific test-fixture-combination for ResidualDouble + + >>> check_estimator( + ... HillClimbing, fixtures_to_run="test_clone[HillClimbing-1]" + ... ) + All tests PASSED! + {'test_clone[HillClimbing-1]': 'PASSED'} + """ + msg = ( + "check_estimator is a testing utility for developers, and " + "requires pytest to be present " + "in the python environment, but pytest was not found. " + "pytest is a developer dependency and not included in the base " + "sktime installation. Please run: `pip install pytest` to " + "install the pytest package. " + "To install sktime with all developer dependencies, run:" + " `pip install hyperactive[dev]`" + ) + _check_soft_dependencies("pytest", msg=msg) + + from hyperactive.tests.test_class_register import get_test_classes_for_obj + + test_clss_for_est = get_test_classes_for_obj(estimator) + + results = {} + + for test_cls in test_clss_for_est: + test_cls_results = test_cls().run_tests( + obj=estimator, + raise_exceptions=raise_exceptions, + tests_to_run=tests_to_run, + fixtures_to_run=fixtures_to_run, + tests_to_exclude=tests_to_exclude, + fixtures_to_exclude=fixtures_to_exclude, + ) + results.update(test_cls_results) + + failed_tests = [key for key in results.keys() if results[key] != "PASSED"] + if len(failed_tests) > 0: + msg = failed_tests + msg = ["FAILED: " + x for x in msg] + msg = "\n".join(msg) + else: + msg = "All tests PASSED!" + + if verbose: + # printing is an intended feature, for console usage and interactive debugging + print(msg) # noqa T001 + + return results