Skip to content

Commit

Permalink
list all metrics in the case of wrong eval_metric (#361)
Browse files Browse the repository at this point in the history
  • Loading branch information
pplonski committed Mar 31, 2021
1 parent c461107 commit 8f6d7c1
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions supervised/base_automl.py
Expand Up @@ -1783,15 +1783,15 @@ def _validate_eval_metric(self):
]:
raise ValueError(
f"Metric {self.eval_metric} is not allowed in ML task: {self._get_ml_task()}. \
Use 'logloss'"
Use 'logloss', 'auc', 'f1', or 'average_precision'"
)

elif (
self._get_ml_task() == MULTICLASS_CLASSIFICATION
) and self.eval_metric not in ["logloss", "f1"]:
raise ValueError(
f"Metric {self.eval_metric} is not allowed in ML task: {self._get_ml_task()}. \
Use 'logloss'"
Use 'logloss', or 'f1'"
)

elif self._get_ml_task() == REGRESSION and self.eval_metric not in [
Expand All @@ -1805,7 +1805,7 @@ def _validate_eval_metric(self):
]:
raise ValueError(
f"Metric {self.eval_metric} is not allowed in ML task: {self._get_ml_task()}. \
Use 'rmse'"
Use 'rmse', 'mse', 'mae', 'r2', 'mape', 'spearman', or 'pearson'"
)

def _validate_validation_strategy(self):
Expand Down

0 comments on commit 8f6d7c1

Please sign in to comment.