Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ jobs:
exclude:
- os: "windows-latest"
python-version: "3.13"

fail-fast: false

runs-on: ${{ matrix.os }}
Expand Down
9 changes: 8 additions & 1 deletion src/hyperactive/experiment/integrations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,15 @@
# copyright: hyperactive developers, MIT License (see LICENSE file)

from hyperactive.experiment.integrations.sklearn_cv import SklearnCvExperiment
from hyperactive.experiment.integrations.sktime_classification import (
SktimeClassificationExperiment,
)
from hyperactive.experiment.integrations.sktime_forecasting import (
SktimeForecastingExperiment,
)

__all__ = ["SklearnCvExperiment", "SktimeForecastingExperiment"]
__all__ = [
"SklearnCvExperiment",
"SktimeClassificationExperiment",
"SktimeForecastingExperiment",
]
128 changes: 128 additions & 0 deletions src/hyperactive/experiment/integrations/_skl_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
"""Integration utilities for sklearn metrics with Hyperactive."""

__all__ = ["_coerce_to_scorer", "_guess_sign_of_sklmetric"]


def _coerce_to_scorer(scoring, estimator):
"""Coerce scoring argument into a sklearn scorer.

Parameters
----------
scoring : str, callable, or None
The scoring strategy to use.
estimator : estimator object or str
The estimator to use for default scoring if scoring is None.

If str, indicates estimator type, should be one of {"classifier", "regressor"}.

Returns
-------
scorer : callable
A sklearn scorer callable.
Follows the unified sklearn scorer interface
"""
from sklearn.metrics import check_scoring

# check if scoring is a scorer by checking for "estimator" in signature
if scoring is None:
if isinstance(estimator, str):
if estimator == "classifier":
from sklearn.metrics import accuracy_score

scoring = accuracy_score
elif estimator == "regressor":
from sklearn.metrics import r2_score

scoring = r2_score
else:
return check_scoring(estimator)

# check using inspect.signature for "estimator" in signature
if callable(scoring):
from inspect import signature

if "estimator" in signature(scoring).parameters:
return scoring
else:
from sklearn.metrics import make_scorer

return make_scorer(scoring)
else:
# scoring is a string (scorer name)
return check_scoring(estimator, scoring=scoring)


def _guess_sign_of_sklmetric(scorer):
"""Guess the sign of a sklearn metric scorer.

Parameters
----------
scorer : callable
The sklearn metric scorer to guess the sign for.

Returns
-------
int
1 if higher scores are better, -1 if lower scores are better.
"""
HIGHER_IS_BETTER = {
# Classification
"accuracy_score": True,
"auc": True,
"average_precision_score": True,
"balanced_accuracy_score": True,
"brier_score_loss": False,
"class_likelihood_ratios": False,
"cohen_kappa_score": True,
"d2_log_loss_score": True,
"dcg_score": True,
"f1_score": True,
"fbeta_score": True,
"hamming_loss": False,
"hinge_loss": False,
"jaccard_score": True,
"log_loss": False,
"matthews_corrcoef": True,
"ndcg_score": True,
"precision_score": True,
"recall_score": True,
"roc_auc_score": True,
"top_k_accuracy_score": True,
"zero_one_loss": False,
# Regression
"d2_absolute_error_score": True,
"d2_pinball_score": True,
"d2_tweedie_score": True,
"explained_variance_score": True,
"max_error": False,
"mean_absolute_error": False,
"mean_absolute_percentage_error": False,
"mean_gamma_deviance": False,
"mean_pinball_loss": False,
"mean_poisson_deviance": False,
"mean_squared_error": False,
"mean_squared_log_error": False,
"mean_tweedie_deviance": False,
"median_absolute_error": False,
"r2_score": True,
"root_mean_squared_error": False,
"root_mean_squared_log_error": False,
}

scorer_name = getattr(scorer, "__name__", None)

if hasattr(scorer, "greater_is_better"):
return 1 if scorer.greater_is_better else -1
elif scorer_name in HIGHER_IS_BETTER:
return 1 if HIGHER_IS_BETTER[scorer_name] else -1
elif scorer_name.endswith("_score"):
# If the scorer name ends with "_score", we assume higher is better
return 1
elif scorer_name.endswith("_loss") or scorer_name.endswith("_deviance"):
# If the scorer name ends with "_loss", we assume lower is better
return -1
elif scorer_name.endswith("_error"):
return -1
else:
# If we cannot determine the sign, we assume lower is better
return -1
98 changes: 5 additions & 93 deletions src/hyperactive/experiment/integrations/sklearn_cv.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@
# copyright: hyperactive developers, MIT License (see LICENSE file)

from sklearn import clone
from sklearn.metrics import check_scoring
from sklearn.model_selection import cross_validate
from sklearn.utils.validation import _num_samples

from hyperactive.base import BaseExperiment
from hyperactive.experiment.integrations._skl_metrics import (
_coerce_to_scorer,
_guess_sign_of_sklmetric,
)


class SklearnCvExperiment(BaseExperiment):
Expand Down Expand Up @@ -97,22 +100,7 @@ def __init__(self, estimator, X, y, scoring=None, cv=None):
else:
self._cv = cv

# check if scoring is a scorer by checking for "estimator" in signature
if scoring is None:
self._scoring = check_scoring(self.estimator)
# check using inspect.signature for "estimator" in signature
elif callable(scoring):
from inspect import signature

if "estimator" in signature(scoring).parameters:
self._scoring = scoring
else:
from sklearn.metrics import make_scorer

self._scoring = make_scorer(scoring)
else:
# scoring is a string (scorer name)
self._scoring = check_scoring(self.estimator, scoring=scoring)
self._scoring = _coerce_to_scorer(scoring, self.estimator)
self.scorer_ = self._scoring

# Set the sign of the scoring function
Expand Down Expand Up @@ -281,79 +269,3 @@ def _get_score_params(self):
score_params_defaults,
]
return params


def _guess_sign_of_sklmetric(scorer):
"""Guess the sign of a sklearn metric scorer.

Parameters
----------
scorer : callable
The sklearn metric scorer to guess the sign for.

Returns
-------
int
1 if higher scores are better, -1 if lower scores are better.
"""
HIGHER_IS_BETTER = {
# Classification
"accuracy_score": True,
"auc": True,
"average_precision_score": True,
"balanced_accuracy_score": True,
"brier_score_loss": False,
"class_likelihood_ratios": False,
"cohen_kappa_score": True,
"d2_log_loss_score": True,
"dcg_score": True,
"f1_score": True,
"fbeta_score": True,
"hamming_loss": False,
"hinge_loss": False,
"jaccard_score": True,
"log_loss": False,
"matthews_corrcoef": True,
"ndcg_score": True,
"precision_score": True,
"recall_score": True,
"roc_auc_score": True,
"top_k_accuracy_score": True,
"zero_one_loss": False,
# Regression
"d2_absolute_error_score": True,
"d2_pinball_score": True,
"d2_tweedie_score": True,
"explained_variance_score": True,
"max_error": False,
"mean_absolute_error": False,
"mean_absolute_percentage_error": False,
"mean_gamma_deviance": False,
"mean_pinball_loss": False,
"mean_poisson_deviance": False,
"mean_squared_error": False,
"mean_squared_log_error": False,
"mean_tweedie_deviance": False,
"median_absolute_error": False,
"r2_score": True,
"root_mean_squared_error": False,
"root_mean_squared_log_error": False,
}

scorer_name = getattr(scorer, "__name__", None)

if hasattr(scorer, "greater_is_better"):
return 1 if scorer.greater_is_better else -1
elif scorer_name in HIGHER_IS_BETTER:
return 1 if HIGHER_IS_BETTER[scorer_name] else -1
elif scorer_name.endswith("_score"):
# If the scorer name ends with "_score", we assume higher is better
return 1
elif scorer_name.endswith("_loss") or scorer_name.endswith("_deviance"):
# If the scorer name ends with "_loss", we assume lower is better
return -1
elif scorer_name.endswith("_error"):
return -1
else:
# If we cannot determine the sign, we assume lower is better
return -1
Loading
Loading