Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MNT Use check_scalar in AdaBoostClassifier #21442

Merged
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
22 changes: 21 additions & 1 deletion sklearn/ensemble/_weight_boosting.py
Expand Up @@ -25,6 +25,7 @@

from abc import ABCMeta, abstractmethod

import numbers
import numpy as np

import warnings
Expand All @@ -36,6 +37,7 @@

from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, _safe_indexing
from ..utils import check_scalar
from ..utils.extmath import softmax
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
Expand Down Expand Up @@ -478,9 +480,27 @@ def fit(self, X, y, sample_weight=None):
self : object
Fitted estimator.
"""
check_scalar(
self.n_estimators,
"n_estimators",
target_type=numbers.Integral,
min_val=1,
include_boundaries="left",
)

check_scalar(
self.learning_rate,
"learning_rate",
target_type=numbers.Real,
min_val=0,
include_boundaries="neither",
)

# Check that algorithm is supported
if self.algorithm not in ("SAMME", "SAMME.R"):
raise ValueError("algorithm %s is not supported" % self.algorithm)
raise ValueError(
f"Algorithm must be 'SAMME' or 'SAMME.R'. Got {self.algorithm} instead."
genvalen marked this conversation as resolved.
Show resolved Hide resolved
)

# Fit
return super().fit(X, y, sample_weight)
Expand Down
18 changes: 18 additions & 0 deletions sklearn/ensemble/tests/test_weight_boosting.py
Expand Up @@ -549,6 +549,24 @@ def test_adaboostregressor_sample_weight():
assert score_no_outlier == pytest.approx(score_with_weight)


@pytest.mark.parametrize(
"params, err_type, err_msg",
[
({"n_estimators": 0}, ValueError, "n_estimators == 0, must be >= 1"),
genvalen marked this conversation as resolved.
Show resolved Hide resolved
({"learning_rate": 0}, ValueError, "learning_rate == 0, must be >= 1."),
(
{"algorithm": "unknown"},
ValueError,
"Algorithm must be 'SAMME' or 'SAMME.R'.",
),
],
)
def test_adaboost_classifier_params_validation(params, err_type, err_msg):
"""Check the parameters validation in `AdaBoostClassifier`."""
with pytest.raises(err_type, match=err_msg):
AdaBoostClassifier(**params).fit(X, y_class) # args are from toy sample


@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_adaboost_consistent_predict(algorithm):
# check that predict_proba and predict give consistent results
Expand Down