Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions autosklearn/evaluation/abstract_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,9 @@ def _loss(self, y_true, y_hat, all_scoring_functions=None):
all_scoring_functions=all_scoring_functions)

if hasattr(score, '__len__'):
err = {key: 1 - score[key] for key in score}
err = {key: self.metric._optimum - score[key] for key in score}
else:
err = 1 - score
err = self.metric._optimum - score

return err

Expand Down
50 changes: 35 additions & 15 deletions autosklearn/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,11 @@


class Scorer(object, metaclass=ABCMeta):
def __init__(self, name, score_func, sign, kwargs):
def __init__(self, name, score_func, optimum, sign, kwargs):
self.name = name
self._kwargs = kwargs
self._score_func = score_func
self._optimum = optimum
self._sign = sign

@abstractmethod
Expand Down Expand Up @@ -133,8 +134,8 @@ def __call__(self, y_true, y_pred, sample_weight=None):
return self._sign * self._score_func(y_true, y_pred, **self._kwargs)


def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
def make_scorer(name, score_func, optimum=1, greater_is_better=True,
needs_proba=False, needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.

Factory inspired by scikit-learn which wraps scikit-learn scoring functions
Expand All @@ -146,6 +147,10 @@ def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.

optimum : int or float, default=1
The best score achievable by the score function, i.e. maximum in case of
scorer function and minimum in case of loss function.

greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
Expand Down Expand Up @@ -174,41 +179,56 @@ def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(name, score_func, sign, kwargs)
return cls(name, score_func, optimum, sign, kwargs)


# Standard regression scores
r2 = make_scorer('r2', sklearn.metrics.r2_score)
r2 = make_scorer('r2',
sklearn.metrics.r2_score)
mean_squared_error = make_scorer('mean_squared_error',
sklearn.metrics.mean_squared_error,
optimum=0,
greater_is_better=False)
mean_absolute_error = make_scorer('mean_absolute_error',
sklearn.metrics.mean_absolute_error,
optimum=0,
greater_is_better=False)
median_absolute_error = make_scorer('median_absolute_error',
sklearn.metrics.median_absolute_error,
optimum=0,
greater_is_better=False)

# Standard Classification Scores
accuracy = make_scorer('accuracy', sklearn.metrics.accuracy_score)
accuracy = make_scorer('accuracy',
sklearn.metrics.accuracy_score)
balanced_accuracy = make_scorer('balanced_accuracy',
classification_metrics.balanced_accuracy)
f1 = make_scorer('f1', sklearn.metrics.f1_score)
f1 = make_scorer('f1',
sklearn.metrics.f1_score)

# Score functions that need decision values
roc_auc = make_scorer('roc_auc', sklearn.metrics.roc_auc_score,
greater_is_better=True, needs_threshold=True)
roc_auc = make_scorer('roc_auc',
sklearn.metrics.roc_auc_score,
greater_is_better=True,
needs_threshold=True)
average_precision = make_scorer('average_precision',
sklearn.metrics.average_precision_score,
needs_threshold=True)
precision = make_scorer('precision', sklearn.metrics.precision_score)
recall = make_scorer('recall', sklearn.metrics.recall_score)
precision = make_scorer('precision',
sklearn.metrics.precision_score)
recall = make_scorer('recall',
sklearn.metrics.recall_score)

# Score function for probabilistic classification
log_loss = make_scorer('log_loss', sklearn.metrics.log_loss,
greater_is_better=False, needs_proba=True)
pac_score = make_scorer('pac_score', classification_metrics.pac_score,
greater_is_better=True, needs_proba=True)
log_loss = make_scorer('log_loss',
sklearn.metrics.log_loss,
optimum=0,
greater_is_better=False,
needs_proba=True)
pac_score = make_scorer('pac_score',
classification_metrics.pac_score,
greater_is_better=True,
needs_proba=True)
# TODO what about mathews correlation coefficient etc?


Expand Down
2 changes: 2 additions & 0 deletions example/example_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def main():
accuracy_scorer = autosklearn.metrics.make_scorer(
name="accu",
score_func=accuracy,
optimum=1,
greater_is_better=True,
needs_proba=False,
needs_threshold=False,
Expand All @@ -77,6 +78,7 @@ def main():
accuracy_scorer = autosklearn.metrics.make_scorer(
name="accu_add",
score_func=accuracy_wk,
optimum=1,
greater_is_better=True,
needs_proba=False,
needs_threshold=False,
Expand Down
Loading