Skip to content

Commit

Permalink
Merge pull request #4844 from Alnusjaponica/remove-deprecated-arguments
Browse files Browse the repository at this point in the history
Remove deprecated arguments with regard to `LightGBM>=4.0`
  • Loading branch information
toshihikoyanase committed Aug 17, 2023
2 parents b4e1925 + 501b7e0 commit 2470936
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 75 deletions.
61 changes: 1 addition & 60 deletions optuna/integration/_lightgbm_tuner/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,14 +358,11 @@ def __init__(
self,
params: dict[str, Any],
train_set: "lgb.Dataset",
callbacks: list[Callable[..., Any]],
callbacks: list[Callable[..., Any]] | None = None,
num_boost_round: int = 1000,
fobj: Callable[..., Any] | None = None,
feval: Callable[..., Any] | None = None,
feature_name: str = "auto",
categorical_feature: str = "auto",
early_stopping_rounds: int | None = None,
verbose_eval: bool | int | str | None = None,
time_budget: int | None = None,
sample_size: int | None = None,
study: optuna.study.Study | None = None,
Expand All @@ -379,26 +376,9 @@ def __init__(
_imports.check()

params = copy.deepcopy(params)
if fobj is not None:
if "objective" not in params:
params["objective"] = fobj
else:
warnings.warn(
"Objective function is specified by param['objective'] and therefore `fobj`"
" will be ignored.",
UserWarning,
)

# Handling alias metrics.
_handling_alias_metrics(params)

if early_stopping_rounds is not None:
callbacks.append(
lgb.early_stopping(
stopping_rounds=early_stopping_rounds, verbose=bool(verbose_eval)
)
)

args = [params, train_set]
kwargs: dict[str, Any] = dict(
num_boost_round=num_boost_round,
Expand Down Expand Up @@ -778,14 +758,9 @@ def __init__(
num_boost_round: int = 1000,
valid_sets: "VALID_SET_TYPE" | None = None,
valid_names: Any | None = None,
fobj: Callable[..., Any] | None = None,
feval: Callable[..., Any] | None = None,
feature_name: str = "auto",
categorical_feature: str = "auto",
early_stopping_rounds: int | None = None,
evals_result: dict[Any, Any] | None = None,
verbose_eval: bool | int | str | None = "warn",
learning_rates: list[float] | None = None,
keep_training_booster: bool = False,
callbacks: list[Callable[..., Any]] | None = None,
time_budget: int | None = None,
Expand All @@ -798,33 +773,14 @@ def __init__(
*,
optuna_seed: int | None = None,
) -> None:
if callbacks is None:
callbacks = []

if evals_result is not None:
callbacks.append(lgb.record_evaluation(evals_result))

if learning_rates is not None:
callbacks.append(lgb.reset_parameter(learning_rate=learning_rates))

if verbose_eval == "warn":
verbose_eval = False if callbacks else True
if verbose_eval is True:
callbacks.append(lgb.log_evaluation())
elif isinstance(verbose_eval, int):
callbacks.append(lgb.log_evaluation(verbose_eval))

super().__init__(
params,
train_set,
callbacks=callbacks,
num_boost_round=num_boost_round,
fobj=fobj,
feval=feval,
feature_name=feature_name,
categorical_feature=categorical_feature,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
time_budget=time_budget,
sample_size=sample_size,
study=study,
Expand Down Expand Up @@ -992,14 +948,10 @@ def __init__(
nfold: int = 5,
stratified: bool = True,
shuffle: bool = True,
fobj: Callable[..., Any] | None = None,
feval: Callable[..., Any] | None = None,
feature_name: str = "auto",
categorical_feature: str = "auto",
early_stopping_rounds: int | None = None,
fpreproc: Callable[..., Any] | None = None,
verbose_eval: bool | int | None = None,
show_stdv: bool = True,
seed: int = 0,
callbacks: list[Callable[..., Any]] | None = None,
time_budget: int | None = None,
Expand All @@ -1013,25 +965,14 @@ def __init__(
*,
optuna_seed: int | None = None,
) -> None:
if callbacks is None:
callbacks = []

if verbose_eval is True:
callbacks.append(lgb.log_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, int):
callbacks.append(lgb.log_evaluation(period=verbose_eval, show_stdv=show_stdv))

super().__init__(
params,
train_set,
callbacks=callbacks,
num_boost_round=num_boost_round,
fobj=fobj,
feval=feval,
feature_name=feature_name,
categorical_feature=categorical_feature,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
time_budget=time_budget,
sample_size=sample_size,
study=study,
Expand Down
39 changes: 24 additions & 15 deletions tests/integration_tests/lightgbm_tuner_tests/test_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@


with try_import():
from lightgbm import early_stopping
from lightgbm import log_evaluation
import sklearn.datasets
from sklearn.model_selection import KFold
Expand Down Expand Up @@ -269,8 +270,8 @@ def _get_tuner_object(
params,
train_set,
num_boost_round=5,
early_stopping_rounds=2,
valid_sets=dummy_dataset,
callbacks=[early_stopping(stopping_rounds=2)],
study=study,
**kwargs_options,
)
Expand All @@ -286,7 +287,12 @@ def test_no_eval_set_args(self) -> None:
params: dict[str, Any] = {}
train_set = lgb.Dataset(None)
with pytest.raises(ValueError) as excinfo:
lgb.LightGBMTuner(params, train_set, num_boost_round=5, early_stopping_rounds=2)
lgb.LightGBMTuner(
params,
train_set,
num_boost_round=5,
callbacks=[early_stopping(stopping_rounds=2)],
)

assert excinfo.type == ValueError
assert str(excinfo.value) == "`valid_sets` is required."
Expand All @@ -312,7 +318,7 @@ def test_inconsistent_study_direction(self, metric: str, study_direction: str) -
train_set,
valid_sets=[train_set, valid_set],
num_boost_round=5,
early_stopping_rounds=2,
callbacks=[early_stopping(stopping_rounds=2)],
study=study,
)

Expand All @@ -333,7 +339,7 @@ def test__parse_args_wrapper_args(self) -> None:
params,
train_set,
num_boost_round=12,
early_stopping_rounds=10,
callbacks=[early_stopping(stopping_rounds=10)],
valid_sets=val_set,
time_budget=600,
sample_size=1000,
Expand Down Expand Up @@ -438,9 +444,8 @@ def test_tune_num_leaves_negative_max_depth(self) -> None:
params,
train_dataset,
num_boost_round=3,
early_stopping_rounds=2,
valid_sets=valid_dataset,
callbacks=[log_evaluation(-1)],
callbacks=[early_stopping(stopping_rounds=2), log_evaluation(-1)],
)
runner.tune_num_leaves()
assert len(runner.study.trials) == 20
Expand Down Expand Up @@ -742,9 +747,8 @@ def test_tune_best_score_reproducibility(self) -> None:
params,
train,
valid_sets=valid,
early_stopping_rounds=3,
callbacks=[early_stopping(stopping_rounds=3), log_evaluation(-1)],
optuna_seed=10,
callbacks=[log_evaluation(-1)],
)
tuner_first_try.run()
best_score_first_try = tuner_first_try.best_score
Expand All @@ -753,9 +757,8 @@ def test_tune_best_score_reproducibility(self) -> None:
params,
train,
valid_sets=valid,
early_stopping_rounds=3,
callbacks=[early_stopping(stopping_rounds=3), log_evaluation(-1)],
optuna_seed=10,
callbacks=[log_evaluation(-1)],
)
tuner_second_try.run()
best_score_second_try = tuner_second_try.best_score
Expand All @@ -779,11 +782,13 @@ def _get_tunercv_object(
study: optuna.study.Study | None = None,
) -> LightGBMTunerCV:
# Required keyword arguments.
kwargs: dict[str, Any] = dict(num_boost_round=5, early_stopping_rounds=2, study=study)
kwargs: dict[str, Any] = dict(num_boost_round=5, study=study)
kwargs.update(kwargs_options)

train_set = train_set or mock.MagicMock(spec="lgb.Dataset")
runner = LightGBMTunerCV(params, train_set, **kwargs)
runner = LightGBMTunerCV(
params, train_set, callbacks=[early_stopping(stopping_rounds=2)], **kwargs
)
return runner

def test_deprecated_args(self) -> None:
Expand All @@ -808,7 +813,11 @@ def test_inconsistent_study_direction(self, metric: str, study_direction: str) -
study = optuna.create_study(direction=study_direction)
with pytest.raises(ValueError) as excinfo:
LightGBMTunerCV(
params, train_set, num_boost_round=5, early_stopping_rounds=2, study=study
params,
train_set,
num_boost_round=5,
callbacks=[early_stopping(stopping_rounds=2)],
study=study,
)

assert excinfo.type == ValueError
Expand Down Expand Up @@ -1067,7 +1076,7 @@ def test_tune_best_score_reproducibility(self) -> None:
tuner_first_try = lgb.LightGBMTunerCV(
params,
train,
early_stopping_rounds=3,
callbacks=[early_stopping(stopping_rounds=3)],
folds=KFold(n_splits=3),
optuna_seed=10,
)
Expand All @@ -1077,7 +1086,7 @@ def test_tune_best_score_reproducibility(self) -> None:
tuner_second_try = lgb.LightGBMTunerCV(
params,
train,
early_stopping_rounds=3,
callbacks=[early_stopping(stopping_rounds=3)],
folds=KFold(n_splits=3),
optuna_seed=10,
)
Expand Down

0 comments on commit 2470936

Please sign in to comment.