Skip to content

Commit

Permalink
[python-package] fix mypy errors related to eval result parsing in ca…
Browse files Browse the repository at this point in the history
…llbacks (#6096)
  • Loading branch information
jameslamb committed Sep 13, 2023
1 parent 0b3d9da commit 163416d
Showing 1 changed file with 19 additions and 3 deletions.
22 changes: 19 additions & 3 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,11 @@ def __init__(self, eval_result: _EvalResultDict) -> None:
self.eval_result = eval_result

def _init(self, env: CallbackEnv) -> None:
if env.evaluation_result_list is None:
raise RuntimeError(
"record_evaluation() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
"Please report it at https://github.com/microsoft/LightGBM/issues"
)
self.eval_result.clear()
for item in env.evaluation_result_list:
if len(item) == 4: # regular train
Expand All @@ -147,6 +152,11 @@ def _init(self, env: CallbackEnv) -> None:
def __call__(self, env: CallbackEnv) -> None:
if env.iteration == env.begin_iteration:
self._init(env)
if env.evaluation_result_list is None:
raise RuntimeError(
"record_evaluation() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
"Please report it at https://github.com/microsoft/LightGBM/issues"
)
for item in env.evaluation_result_list:
if len(item) == 4:
data_name, eval_name, result = item[:3]
Expand Down Expand Up @@ -285,6 +295,10 @@ def _is_train_set(self, ds_name: str, eval_name: str, train_name: str) -> bool:
return (ds_name == "cv_agg" and eval_name == "train") or ds_name == train_name

def _init(self, env: CallbackEnv) -> None:
if env.evaluation_result_list is None or env.evaluation_result_list == []:
raise ValueError(
"For early stopping, at least one dataset and eval metric is required for evaluation"
)
is_dart = any(env.params.get(alias, "") == 'dart' for alias in _ConfigAliases.get("boosting"))
only_train_set = (
len(env.evaluation_result_list) == 1
Expand All @@ -300,9 +314,6 @@ def _init(self, env: CallbackEnv) -> None:
elif only_train_set:
_log_warning('Only training set found, disabling early stopping.')
return
if not env.evaluation_result_list:
raise ValueError('For early stopping, '
'at least one dataset and eval metric is required for evaluation')

if self.stopping_rounds <= 0:
raise ValueError("stopping_rounds should be greater than zero.")
Expand Down Expand Up @@ -364,6 +375,11 @@ def __call__(self, env: CallbackEnv) -> None:
self._init(env)
if not self.enabled:
return
if env.evaluation_result_list is None:
raise RuntimeError(
"early_stopping() callback enabled but no evaluation results found. This is a probably bug in LightGBM. "
"Please report it at https://github.com/microsoft/LightGBM/issues"
)
# self.best_score_list is initialized to an empty list
first_time_updating_best_score_list = (self.best_score_list == [])
for i in range(len(env.evaluation_result_list)):
Expand Down

0 comments on commit 163416d

Please sign in to comment.