Skip to content

Commit

Permalink
[ENH] enable check_estimator and QuickTester.run_tests to work wi…
Browse files Browse the repository at this point in the history
…th skip marked `pytest` tests (issue sktime#2419) (sktime#6233)

#### Reference Issues/PRs: 
Fixes sktime#2419. 

#### What does this implement/fix? Explain your changes.
Ensure check_estimator and QuickTester.run_tests work with skipped
pytest tests.
Added exception handling for NaiveForecaster skip exceptions in
QuickTester.run_tests.
  • Loading branch information
YelenaYY authored and geetu040 committed Jun 4, 2024
1 parent e0d0dd1 commit 2b8ee14
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 27 deletions.
30 changes: 12 additions & 18 deletions sktime/forecasting/tests/test_all_forecasters.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,12 +212,10 @@ def test_predict_time_index(
"""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
pytest.skip(
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
y_train = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=50
)
Expand Down Expand Up @@ -268,12 +266,10 @@ def test_predict_time_index_with_X(
"""Check that predicted time index matches forecasting horizon."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
pytest.skip(
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
z, X = make_forecasting_problem(index_type=index_type, make_X=True)

# Some estimators may not support all time index types and fh types, hence we
Expand Down Expand Up @@ -308,12 +304,10 @@ def test_predict_time_index_in_sample_full(
"""Check that predicted time index equals fh for full in-sample predictions."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
pytest.skip(
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
y_train = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y_train, return_index=True)
steps = -np.arange(len(y_train))
Expand Down
10 changes: 4 additions & 6 deletions sktime/split/tests/test_temporaltraintest.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,10 @@ def _check_train_test_split_y(fh, split):
def test_split_by_fh(index_type, fh_type, is_relative, values):
"""Test temporal_train_test_split."""
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
pytest.skip(
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
y = _make_series(20, index_type=index_type)
cutoff = get_cutoff(y.iloc[:10], return_index=True)
fh = _make_fh(cutoff, values, fh_type, is_relative)
Expand Down
3 changes: 3 additions & 0 deletions sktime/tests/test_all_estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np
import pandas as pd
import pytest
from _pytest.outcomes import Skipped

from sktime.base import BaseEstimator, BaseObject, load
from sktime.classification.deep_learning.base import BaseDeepClassifier
Expand Down Expand Up @@ -599,6 +600,8 @@ def _generate_estimator_instance_cls(test_name, **kwargs):
try:
test_fun(**deepcopy(args))
results[key] = "PASSED"
except Skipped as err:
results[key] = f"SKIPPED: {err.msg}"
except Exception as err:
results[key] = err
else:
Expand Down
25 changes: 22 additions & 3 deletions sktime/utils/tests/test_check_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,33 @@

@pytest.mark.parametrize("estimator_class", EXAMPLE_CLASSES)
def test_check_estimator_passed(estimator_class):
"""Test that check_estimator returns only passed tests for examples we know pass."""
"""Test that check_estimator returns only passed tests for examples we know pass.
Tests may be skipped if they are not applicable to the estimator,
in this case the test is marked as "SKIP", and we test
that less than 10% of tests are skipped.
"""
estimator_instance = estimator_class.create_test_instance()

result_class = check_estimator(estimator_class, verbose=False)
assert all(x == "PASSED" for x in result_class.values())

# Check there are no failures.
assert not any(x == "FAILED" for x in result_class.values())

# Check less than 10% are skipped.
skip_ratio = sum(list(x[:4] == "SKIP" for x in result_class.values()))
skip_ratio = skip_ratio / len(result_class.values())
assert skip_ratio < 0.1

result_instance = check_estimator(estimator_instance, verbose=False)
assert all(x == "PASSED" for x in result_instance.values())

# Check there are no failures.
assert not any(x == "FAILED" for x in result_instance.values())

# Check less than 10% are skipped.
skip_ratio = sum(list(x[:4] == "SKIP" for x in result_instance.values()))
skip_ratio = skip_ratio / len(result_instance.values())
assert skip_ratio < 0.1


@pytest.mark.parametrize("estimator_class", EXAMPLE_CLASSES)
Expand Down

0 comments on commit 2b8ee14

Please sign in to comment.