Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MNT] fix and sharpen soft dependency isolation logic for statsmodels and pmdarima #4443

Merged
merged 5 commits into from Apr 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions sktime/forecasting/ardl.py
Expand Up @@ -204,6 +204,7 @@ class ARDL(_StatsModelsAdapter):
"enforce_index_type": None, # index type that needs to be enforced in X/y
"capability:pred_int": False, # does forecaster implement proba forecasts?
"python_version": None, # PEP 440 python version specifier to limit versions
"python_dependencies": "statsmodels>=0.13.0",
}

def __init__(
Expand Down
25 changes: 12 additions & 13 deletions sktime/forecasting/base/tests/test_base.py
Expand Up @@ -14,17 +14,19 @@
from sktime.datatypes import check_is_mtype, convert
from sktime.datatypes._utilities import get_cutoff, get_window
from sktime.forecasting.arima import ARIMA
from sktime.forecasting.theta import ThetaForecaster
from sktime.forecasting.var import VAR
from sktime.utils._testing.hierarchical import _make_hierarchical
from sktime.utils._testing.panel import _make_panel
from sktime.utils._testing.series import _make_series
from sktime.utils.validation._dependencies import _check_soft_dependencies
from sktime.utils.validation._dependencies import _check_estimator_deps

PANEL_MTYPES = ["pd-multiindex", "nested_univ", "numpy3D"]
HIER_MTYPES = ["pd_multiindex_hier"]


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("mtype", PANEL_MTYPES)
Expand Down Expand Up @@ -73,7 +75,7 @@ def test_vectorization_series_to_panel(mtype):


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("mtype", HIER_MTYPES)
Expand Down Expand Up @@ -126,7 +128,7 @@ def test_vectorization_series_to_hier(mtype):


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("method", PROBA_DF_METHODS)
Expand Down Expand Up @@ -162,7 +164,7 @@ def test_vectorization_series_to_panel_proba(method, mtype):


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("method", PROBA_DF_METHODS)
Expand Down Expand Up @@ -198,7 +200,7 @@ def test_vectorization_series_to_hier_proba(method, mtype):


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("method", PROBA_DF_METHODS)
Expand All @@ -219,7 +221,7 @@ def test_vectorization_preserves_row_index_names(method):


@pytest.mark.skipif(
not _check_soft_dependencies("pmdarima", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for ARIMA not available",
)
@pytest.mark.parametrize("mtype", HIER_MTYPES)
Expand Down Expand Up @@ -275,14 +277,12 @@ def test_vectorization_multivariate(mtype, exogeneous):


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps([ThetaForecaster, VAR], severity="none"),
reason="skip test if required soft dependency not available",
)
def test_dynamic_tags_reset_properly():
"""Test that dynamic tags are being reset properly."""
from sktime.forecasting.compose import MultiplexForecaster
from sktime.forecasting.theta import ThetaForecaster
from sktime.forecasting.var import VAR

# this forecaster will have the scitype:y tag set to "univariate"
f = MultiplexForecaster([("foo", ThetaForecaster()), ("var", VAR())])
Expand All @@ -295,14 +295,13 @@ def test_dynamic_tags_reset_properly():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ThetaForecaster, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_predict_residuals():
"""Test that predict_residuals has no side-effect."""
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.forecasting.theta import ThetaForecaster

y = _make_series(n_columns=1)
y_train, y_test = temporal_train_test_split(y)
Expand All @@ -318,7 +317,7 @@ def test_predict_residuals():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency not available",
)
@pytest.mark.parametrize("nullable_type", ["Int64", "Float64", "boolean"])
Expand Down
2 changes: 1 addition & 1 deletion sktime/forecasting/compose/_pipeline.py
Expand Up @@ -1571,7 +1571,7 @@ def get_test_params(cls, parameter_set="default"):
params1 = {"forecaster_X": fx, "forecaster_y": fy}

# example with probabilistic capability
if _check_soft_dependencies("statsmodels", severity="none"):
if _check_soft_dependencies("pmdarima", severity="none"):
from sktime.forecasting.arima import ARIMA

fy_proba = ARIMA()
Expand Down
4 changes: 2 additions & 2 deletions sktime/forecasting/model_selection/tests/test_tune.py
Expand Up @@ -35,7 +35,7 @@
from sktime.performance_metrics.forecasting.probabilistic import CRPS, PinballLoss
from sktime.transformations.series.detrend import Detrender
from sktime.utils._testing.hierarchical import _make_hierarchical
from sktime.utils.validation._dependencies import _check_soft_dependencies
from sktime.utils.validation._dependencies import _check_estimator_deps

TEST_METRICS = [MeanAbsolutePercentageError(symmetric=True), MeanSquaredError()]
TEST_METRICS_PROBA = [CRPS(), PinballLoss()]
Expand Down Expand Up @@ -171,7 +171,7 @@ def test_gscv_hierarchical(forecaster, param_grid, cv, scoring, error_score):


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARIMA, severity="none"),
reason="skip test if required soft dependency for hmmlearn not available",
)
@pytest.mark.parametrize("scoring", TEST_METRICS_PROBA)
Expand Down
14 changes: 7 additions & 7 deletions sktime/forecasting/tests/test_ardl.py
Expand Up @@ -8,11 +8,11 @@
from sktime.datasets import load_macroeconomic
from sktime.forecasting.ardl import ARDL
from sktime.forecasting.base import ForecastingHorizon
from sktime.utils.validation._dependencies import _check_soft_dependencies
from sktime.utils.validation._dependencies import _check_estimator_deps


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_against_statsmodels():
Expand Down Expand Up @@ -41,7 +41,7 @@ def test_against_statsmodels():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_against_statsmodels_2():
Expand Down Expand Up @@ -73,7 +73,7 @@ def test_against_statsmodels_2():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_against_statsmodels_3():
Expand Down Expand Up @@ -101,7 +101,7 @@ def test_against_statsmodels_3():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_against_statsmodels_4():
Expand All @@ -128,7 +128,7 @@ def test_against_statsmodels_4():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_auto_ardl():
Expand Down Expand Up @@ -162,7 +162,7 @@ def test_auto_ardl():


@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
not _check_estimator_deps(ARDL, severity="none"),
reason="skip test if required soft dependency not available",
)
def test_against_statsmodels_5():
Expand Down
Expand Up @@ -33,7 +33,7 @@
@pytest.mark.parametrize("flatten", flatten_list)
@pytest.mark.parametrize("no_levels", level_list)
def test_reconciler_fit_transform(method, flatten, no_levels):
"""Tests fit_trasnform and output of reconciler.
"""Tests fit_transform and output of reconciler.

Raises
------
Expand Down