Skip to content

Commit

Permalink
[MNT] Fix linting issues in performance_metrics module (#4320)
Browse files Browse the repository at this point in the history
Fiesx linting issues in `performance_metrics` module.
Towards #4182.

- isort auto-fixes
- Fix pydocstyle issues (D100, D400, D103)
  • Loading branch information
SamiAlavi committed Mar 15, 2023
1 parent 2544272 commit 0119386
Showing 1 changed file with 36 additions and 29 deletions.
@@ -1,53 +1,55 @@
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Tests for performance metrics forecasting."""

__author__ = ["Tomasz Chodakowski", "Ryan Kuhns"]

import pytest
import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_numeric_dtype
from sktime.utils._testing.series import _make_series

from sktime.performance_metrics.forecasting import (
MeanAbsoluteScaledError,
MedianAbsoluteScaledError,
MeanSquaredScaledError,
MedianSquaredScaledError,
GeometricMeanRelativeAbsoluteError,
GeometricMeanRelativeSquaredError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanAbsoluteScaledError,
MeanAsymmetricError,
MeanRelativeAbsoluteError,
MeanSquaredError,
MeanSquaredPercentageError,
MeanSquaredScaledError,
MedianAbsoluteError,
MedianSquaredError,
MeanAbsolutePercentageError,
MedianAbsolutePercentageError,
MeanSquaredPercentageError,
MedianSquaredPercentageError,
MeanRelativeAbsoluteError,
MedianAbsoluteScaledError,
MedianRelativeAbsoluteError,
GeometricMeanRelativeAbsoluteError,
GeometricMeanRelativeSquaredError,
MeanAsymmetricError,
MedianSquaredError,
MedianSquaredPercentageError,
MedianSquaredScaledError,
RelativeLoss,
mean_absolute_scaled_error,
median_absolute_scaled_error,
mean_squared_scaled_error,
median_squared_scaled_error,
geometric_mean_relative_absolute_error,
geometric_mean_relative_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_absolute_scaled_error,
mean_asymmetric_error,
mean_relative_absolute_error,
mean_squared_error,
mean_squared_percentage_error,
mean_squared_scaled_error,
median_absolute_error,
median_squared_error,
mean_absolute_percentage_error,
median_absolute_percentage_error,
mean_squared_percentage_error,
median_squared_percentage_error,
mean_relative_absolute_error,
median_absolute_scaled_error,
median_relative_absolute_error,
geometric_mean_relative_absolute_error,
geometric_mean_relative_squared_error,
mean_asymmetric_error,
median_squared_error,
median_squared_percentage_error,
median_squared_scaled_error,
relative_loss,
)
from sktime.performance_metrics.tests._config import RANDOM_SEED
from sktime.utils._testing.series import _make_series

# For multiple comparisons of equality between functions and classes
rng = np.random.default_rng(RANDOM_SEED)
Expand Down Expand Up @@ -369,7 +371,7 @@


def _call_metrics(metric_func, metric_class, y_true, y_pred, y_train, y_pred_benchmark):
"""Call function and class metrics and return results"""
"""Call function and class metrics and return results."""
class_attrs = metric_class.get_params()
function_metric = metric_func(
y_true,
Expand All @@ -390,8 +392,7 @@ def _call_metrics(metric_func, metric_class, y_true, y_pred, y_train, y_pred_ben
@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
@pytest.mark.parametrize("n_test_case", [1, 2, 3])
def test_univariate_loss_expected_zero(n_test_case, metric_func_name):
# Test cases where the expected loss is zero for perfect forecast.

"""Test cases where the expected loss is zero for perfect forecast."""
metric_class = LOSS_RESULTS[metric_func_name]["class"]
metric_func = LOSS_RESULTS[metric_func_name]["func"]

Expand Down Expand Up @@ -426,6 +427,7 @@ def test_univariate_loss_expected_zero(n_test_case, metric_func_name):
@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
@pytest.mark.parametrize("n_test_case", [1, 2, 3])
def test_univariate_loss_against_expected_value(n_test_case, metric_func_name):
"""Test univariate loss against expected value."""
metric_class = LOSS_RESULTS[metric_func_name]["class"]
metric_func = LOSS_RESULTS[metric_func_name]["func"]
true_loss = LOSS_RESULTS[metric_func_name][f"test_case_{n_test_case}"]
Expand Down Expand Up @@ -461,6 +463,7 @@ def test_univariate_loss_against_expected_value(n_test_case, metric_func_name):
@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
@pytest.mark.parametrize("random_state", RANDOM_STATES)
def test_univariate_metric_function_class_equality(metric_func_name, random_state):
"""Tests that loss function and class should return equal values."""
metric_class = LOSS_RESULTS[metric_func_name]["class"]
metric_func = LOSS_RESULTS[metric_func_name]["func"]

Expand All @@ -486,6 +489,7 @@ def test_univariate_metric_function_class_equality(metric_func_name, random_stat
@pytest.mark.parametrize("random_state", RANDOM_STATES)
@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
def test_univariate_function_output_type(metric_func_name, random_state):
"""Tests that loss function with univariate input should return scalar number."""
metric_func = LOSS_RESULTS[metric_func_name]["func"]
y = _make_series(n_timepoints=75, random_state=random_state)
y_train, y_true = y.iloc[:50], y.iloc[50:]
Expand All @@ -505,6 +509,7 @@ def test_univariate_function_output_type(metric_func_name, random_state):

@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
def test_y_true_y_pred_inconsistent_n_outputs_raises_error(metric_func_name):
"""Error should be raised when y_true and y_pred have different number of output."""
metric_func = LOSS_RESULTS[metric_func_name]["func"]
y = _make_series(n_timepoints=75, random_state=RANDOM_STATES[0])
y_train, y_true = y.iloc[:50], y.iloc[50:]
Expand All @@ -523,6 +528,7 @@ def test_y_true_y_pred_inconsistent_n_outputs_raises_error(metric_func_name):

@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
def test_y_true_y_pred_inconsistent_n_timepoints_raises_error(metric_func_name):
"""Error should be raised if input variables have inconsistent number of samples."""
metric_func = LOSS_RESULTS[metric_func_name]["func"]
y = _make_series(n_timepoints=75, random_state=RANDOM_STATES[0])
y_train, y_true = y.iloc[:50], y.iloc[50:]
Expand All @@ -538,6 +544,7 @@ def test_y_true_y_pred_inconsistent_n_timepoints_raises_error(metric_func_name):

@pytest.mark.parametrize("metric_func_name", LOSS_RESULTS.keys())
def test_y_true_y_pred_inconsistent_n_variables_raises_error(metric_func_name):
"""Error should be raised when y_true and y_pred have different number of output."""
metric_func = LOSS_RESULTS[metric_func_name]["func"]
y = _make_series(n_timepoints=75, random_state=RANDOM_STATES[0])
y_train, y_true = y.iloc[:50], y.iloc[50:]
Expand Down

0 comments on commit 0119386

Please sign in to comment.