Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Additional Regression Objectives #100

Merged
merged 18 commits into from
Oct 21, 2019
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
bdb66a8
Added MSE, MaxError, and ExpVariance metrics
christopherbunn Sep 23, 2019
b223ac0
Added MAE, MSLE, and MedianAE objectives
christopherbunn Sep 24, 2019
96f64e6
Merge branch 'master' into addl-reg-metrics
christopherbunn Sep 24, 2019
2160ba3
Added new metrics to standard_metrics init
christopherbunn Sep 25, 2019
fe4f92f
Merge branch 'addl-reg-metrics' of github.com:FeatureLabs/evalml into…
christopherbunn Sep 25, 2019
2f12f65
Updated options in get_objectives and changed corresponding test
christopherbunn Sep 25, 2019
9784919
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Sep 30, 2019
a7b0bf8
Alphabetized imports and fixed greater_is_better for ExpVariance
christopherbunn Sep 30, 2019
cd8c717
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 8, 2019
8e36432
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Oct 10, 2019
7c45011
Added new metrics to API ref
christopherbunn Oct 10, 2019
7574398
Changed objective test to use ProblemType enums
christopherbunn Oct 11, 2019
8f85b76
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 11, 2019
40852de
Fixed lint errors
christopherbunn Oct 14, 2019
ee71a80
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 16, 2019
4899457
Merge branch 'master' into addl-reg-metrics
kmax12 Oct 17, 2019
4050500
Changed standard metric name to be consistent
christopherbunn Oct 18, 2019
5589f75
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Oct 21, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions docs/source/api_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,12 @@ Regression
:nosignatures:

R2
MAE
MSE
MSLE
MedianAE
MaxError
ExpVariance

.. currentmodule:: evalml.problem_types

Expand All @@ -155,6 +161,3 @@ Tuners
:nosignatures:

SKOptTuner



6 changes: 4 additions & 2 deletions evalml/objectives/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,10 @@
from .fraud_cost import FraudCost
from .lead_scoring import LeadScoring
from .standard_metrics import (
F1, F1Micro, F1Macro, F1Weighted, Precision, PrecisionMicro, PrecisionMacro, PrecisionWeighted, Recall, RecallMicro, RecallMacro, RecallWeighted,
AUC, AUCMicro, AUCMacro, AUCWeighted, LogLoss, MCC, R2
AUC, AUCMacro, AUCMicro, AUCWeighted, ExpVariance, F1, F1Macro, F1Micro,
F1Weighted, LogLoss, MAE, MaxError, MCC, MedianAE, MSE, MSLE, Precision,
PrecisionMacro, PrecisionMicro, PrecisionWeighted, R2, Recall, RecallMacro,
RecallMicro, RecallWeighted
)
from .objective_base import ObjectiveBase
from .utils import get_objective, get_objectives
Expand Down
72 changes: 72 additions & 0 deletions evalml/objectives/standard_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,78 @@ def score(self, y_predicted, y_true):
return metrics.r2_score(y_true, y_predicted)


class MAE(ObjectiveBase):
"""Mean absolute error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MAE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_absolute_error(y_true, y_predicted)


class MSE(ObjectiveBase):
"""Mean squared error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MSE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_squared_error(y_true, y_predicted)


class MSLE(ObjectiveBase):
"""Mean squared log error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MSLE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_squared_log_error(y_true, y_predicted)


class MedianAE(ObjectiveBase):
"""Median absolute error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MedAE"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make name MedianAe to be consistent?

problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.median_absolute_error(y_true, y_predicted)


class MaxError(ObjectiveBase):
"""Maximum residual error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MaxError"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.max_error(y_true, y_predicted)


class ExpVariance(ObjectiveBase):
"""Explained variance score for regression"""
needs_fitting = False
greater_is_better = True
need_proba = False
name = "ExpVariance"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.explained_variance_score(y_true, y_predicted)


def _handle_predictions(y_true, y_pred):
if len(np.unique(y_true)) > 2:
classes = np.unique(y_true)
Expand Down
6 changes: 6 additions & 0 deletions evalml/objectives/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@
"log_loss": standard_metrics.LogLoss(),
"mcc": standard_metrics.MCC(),
"r2": standard_metrics.R2(),
"mae": standard_metrics.MAE(),
"mse": standard_metrics.MSE(),
"msle": standard_metrics.MSLE(),
"median_ae": standard_metrics.MedianAE(),
"max_error": standard_metrics.MaxError(),
"exp_var": standard_metrics.ExpVariance()
}


Expand Down
2 changes: 1 addition & 1 deletion evalml/tests/objective_tests/test_objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def test_get_objective():
def test_get_objectives_types():
christopherbunn marked this conversation as resolved.
Show resolved Hide resolved
assert len(get_objectives('multiclass')) == 14
assert len(get_objectives('binary')) == 6
assert len(get_objectives('regression')) == 1
assert len(get_objectives('regression')) == 7


def test_binary_average(X_y):
Expand Down