Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Additional Regression Objectives #100

Merged
merged 18 commits into from
Oct 21, 2019
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
bdb66a8
Added MSE, MaxError, and ExpVariance metrics
christopherbunn Sep 23, 2019
b223ac0
Added MAE, MSLE, and MedianAE objectives
christopherbunn Sep 24, 2019
96f64e6
Merge branch 'master' into addl-reg-metrics
christopherbunn Sep 24, 2019
2160ba3
Added new metrics to standard_metrics init
christopherbunn Sep 25, 2019
fe4f92f
Merge branch 'addl-reg-metrics' of github.com:FeatureLabs/evalml into…
christopherbunn Sep 25, 2019
2f12f65
Updated options in get_objectives and changed corresponding test
christopherbunn Sep 25, 2019
9784919
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Sep 30, 2019
a7b0bf8
Alphabetized imports and fixed greater_is_better for ExpVariance
christopherbunn Sep 30, 2019
cd8c717
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 8, 2019
8e36432
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Oct 10, 2019
7c45011
Added new metrics to API ref
christopherbunn Oct 10, 2019
7574398
Changed objective test to use ProblemType enums
christopherbunn Oct 11, 2019
8f85b76
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 11, 2019
40852de
Fixed lint errors
christopherbunn Oct 14, 2019
ee71a80
Merge branch 'master' into addl-reg-metrics
christopherbunn Oct 16, 2019
4899457
Merge branch 'master' into addl-reg-metrics
kmax12 Oct 17, 2019
4050500
Changed standard metric name to be consistent
christopherbunn Oct 18, 2019
5589f75
Merge branch 'master' of github.com:FeatureLabs/evalml into addl-reg-…
christopherbunn Oct 21, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions docs/source/api_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,12 @@ Regression
:nosignatures:

R2
MAE
MSE
MSLE
MedianAE
MaxError
ExpVariance

.. currentmodule:: evalml.problem_types

Expand All @@ -155,6 +161,3 @@ Tuners
:nosignatures:

SKOptTuner



14 changes: 7 additions & 7 deletions evalml/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,24 @@
# hack to prevent warnings from skopt
# must import sklearn first
import sklearn
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import skopt

import evalml.demos
import evalml.model_types
import evalml.objectives
import evalml.pipelines
# import evalml.models
import evalml.preprocessing
import evalml.problem_types
import evalml.pipelines
import evalml.model_types
import evalml.utils

from evalml.pipelines import list_model_types, save_pipeline, load_pipeline
from evalml.models import AutoClassifier, AutoRegressor
from evalml.pipelines import list_model_types, load_pipeline, save_pipeline

with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import skopt



warnings.filterwarnings("ignore", category=DeprecationWarning)


Expand Down
72 changes: 72 additions & 0 deletions evalml/objectives/standard_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,78 @@ def score(self, y_predicted, y_true):
return metrics.r2_score(y_true, y_predicted)


class MAE(ObjectiveBase):
"""Mean absolute error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MAE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_absolute_error(y_true, y_predicted)


class MSE(ObjectiveBase):
"""Mean squared error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MSE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_squared_error(y_true, y_predicted)


class MSLE(ObjectiveBase):
"""Mean squared log error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MSLE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.mean_squared_log_error(y_true, y_predicted)


class MedianAE(ObjectiveBase):
"""Median absolute error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MedianAE"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.median_absolute_error(y_true, y_predicted)


class MaxError(ObjectiveBase):
"""Maximum residual error for regression"""
needs_fitting = False
greater_is_better = False
need_proba = False
name = "MaxError"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.max_error(y_true, y_predicted)


class ExpVariance(ObjectiveBase):
"""Explained variance score for regression"""
needs_fitting = False
greater_is_better = True
need_proba = False
name = "ExpVariance"
problem_types = [ProblemTypes.REGRESSION]

def score(self, y_predicted, y_true):
return metrics.explained_variance_score(y_true, y_predicted)


def _handle_predictions(y_true, y_pred):
if len(np.unique(y_true)) > 2:
classes = np.unique(y_true)
Expand Down
6 changes: 6 additions & 0 deletions evalml/objectives/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@
"log_loss": standard_metrics.LogLoss(),
"mcc": standard_metrics.MCC(),
"r2": standard_metrics.R2(),
"mae": standard_metrics.MAE(),
"mse": standard_metrics.MSE(),
"msle": standard_metrics.MSLE(),
"median_ae": standard_metrics.MedianAE(),
"max_error": standard_metrics.MaxError(),
"exp_var": standard_metrics.ExpVariance()
}


Expand Down
2 changes: 1 addition & 1 deletion evalml/pipelines/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
list_model_types,
load_pipeline,
save_pipeline
)
)
7 changes: 4 additions & 3 deletions evalml/tests/objective_tests/test_objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
get_objectives
)
from evalml.pipelines import LogisticRegressionPipeline
from evalml.problem_types import ProblemTypes


def test_get_objective():
Expand All @@ -16,9 +17,9 @@ def test_get_objective():


def test_get_objectives_types():
christopherbunn marked this conversation as resolved.
Show resolved Hide resolved
assert len(get_objectives('multiclass')) == 14
assert len(get_objectives('binary')) == 6
assert len(get_objectives('regression')) == 1
assert len(get_objectives(ProblemTypes.MULTICLASS)) == 14
assert len(get_objectives(ProblemTypes.BINARY)) == 6
assert len(get_objectives(ProblemTypes.REGRESSION)) == 7


def test_binary_average(X_y):
Expand Down
2 changes: 1 addition & 1 deletion evalml/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# flake8:noqa
from .convert_time import convert_to_seconds
from .logging_utils import Logger
from .convert_time import convert_to_seconds