Skip to content

Commit

Permalink
[MAINT] Pytest fixture to ignore internal deprecation warnings (#4043)
Browse files Browse the repository at this point in the history
* Ignore internal deprecation warnings

* More internal deprecations

* improve warnings and their tests

* fix typo (#4062)

---------

Co-authored-by: Remi Gau <remi_gau@hotmail.com>
  • Loading branch information
ymzayek and Remi-Gau committed Oct 17, 2023
1 parent 96755a4 commit 05ae8ce
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 40 deletions.
17 changes: 17 additions & 0 deletions nilearn/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,23 @@ def warnings_as_errors():
yield


@pytest.fixture(autouse=True)
def suppress_specific_warning():
"""Ignore internal deprecation warnings."""
with warnings.catch_warnings():
messages = (
"The `darkness` parameter will be deprecated.*|"
"`legacy_format` will default to `False`.*|"
"In release 0.13, this fetcher will return a dictionary.*|"
)
warnings.filterwarnings(
"ignore",
message=messages,
category=DeprecationWarning,
)
yield


# ------------------------ RNG ------------------------#


Expand Down
10 changes: 5 additions & 5 deletions nilearn/datasets/atlas.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def fetch_atlas_difumo(
labels = pd.read_csv(files_[0])
labels = labels.rename(columns={c: c.lower() for c in labels.columns})
if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
labels = labels.to_records(index=False)

# README
Expand Down Expand Up @@ -355,7 +355,7 @@ def fetch_atlas_destrieux_2009(
params = dict(maps=files_[1], labels=pd.read_csv(files_[0], index_col=0))

if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
params["labels"] = params["labels"].to_records()

params["description"] = Path(files_[2]).read_text()
Expand Down Expand Up @@ -935,7 +935,7 @@ def fetch_coords_power_2011(legacy_format=True):
columns={c: c.lower() for c in params["rois"].columns}
)
if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
params["rois"] = params["rois"].to_records(index=False)
return Bunch(**params)

Expand Down Expand Up @@ -1539,7 +1539,7 @@ def fetch_coords_dosenbach_2010(ordered_regions=True, legacy_format=True):
)

if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
params["rois"] = params["rois"].to_records(index=False)

return Bunch(**params)
Expand Down Expand Up @@ -1623,7 +1623,7 @@ def fetch_coords_seitzman_2018(ordered_regions=True, legacy_format=True):
rois = rois.sort_values(by=["network", "y"])

if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
rois = rois.to_records()

params = dict(
Expand Down
4 changes: 2 additions & 2 deletions nilearn/datasets/func.py
Original file line number Diff line number Diff line change
Expand Up @@ -899,7 +899,7 @@ def _is_valid_path(path, index, verbose):
subjects_indices.append(subject_names.index(name))
csv_data = csv_data.iloc[subjects_indices]
if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
csv_data = csv_data.to_records(index=False)
return Bunch(ext_vars=csv_data, description=fdescr, **files)

Expand Down Expand Up @@ -1195,7 +1195,7 @@ def fetch_abide_pcp(
pheno = pheno[:n_subjects]

if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
pheno = pheno.to_records(index=False)

results = {
Expand Down
2 changes: 1 addition & 1 deletion nilearn/datasets/struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -852,7 +852,7 @@ def fetch_oasis_vbm(
fdescr = _get_dataset_descr(dataset_name)

if legacy_format:
warnings.warn(_LEGACY_FORMAT_MSG)
warnings.warn(_LEGACY_FORMAT_MSG, DeprecationWarning)
csv_data = csv_data.to_records(index=False)

return Bunch(
Expand Down
23 changes: 13 additions & 10 deletions nilearn/decoding/tests/test_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,20 +265,23 @@ def test_wrap_param_grid(param_grid):
],
)
def test_wrap_param_grid_warning(param_grid, need_wrap):
param_name = "alphas"
expected_warning_substring = "should be a sequence of iterables"

with warnings.catch_warnings(record=True) as raised_warnings:
_wrap_param_grid(param_grid, param_name)
warning_messages = [str(warning.message) for warning in raised_warnings]

found_warning = any(
expected_warning_substring in x for x in warning_messages
)

if need_wrap:
assert found_warning
with pytest.warns(UserWarning, match=expected_warning_substring):
_wrap_param_grid(param_grid, param_name="alphas")

else:
with warnings.catch_warnings(record=True) as raised_warnings:
_wrap_param_grid(param_grid, param_name="alphas")
warning_messages = [
str(warning.message) for warning in raised_warnings
]

found_warning = any(
expected_warning_substring in x for x in warning_messages
)

assert not found_warning


Expand Down
13 changes: 1 addition & 12 deletions nilearn/decomposition/tests/test_canica.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
"""Test CanICA."""

import warnings

import numpy as np
import pytest
from nibabel import Nifti1Image
Expand Down Expand Up @@ -150,18 +148,9 @@ def test_percentile_range(canica_data):
# stess thresholding via edge case
canica = CanICA(n_components=edge_case, threshold=float(edge_case))

with warnings.catch_warnings(record=True) as warning:
with pytest.warns(UserWarning, match="obtained a critical threshold"):
canica.fit(canica_data)

# ensure a single warning is raised
# filter out deprecation warnings
warning_messages = [
"obtained a critical threshold" in str(w.message)
for w in warning
if not issubclass(w.category, (DeprecationWarning, FutureWarning))
]
assert sum(warning_messages) == 1


def test_canica_square_img(mask_img):
data, components, rng = _make_canica_test_data(n_subjects=8)
Expand Down
8 changes: 1 addition & 7 deletions nilearn/reporting/tests/test_glm_reporter.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import warnings

import numpy as np
import pandas as pd
import pytest
Expand Down Expand Up @@ -92,13 +90,9 @@ def test_check_report_dims():
expected_warning_text = (
"Report size has invalid values. Using default 1600x800"
)
with warnings.catch_warnings(record=True) as raised_warnings:
with pytest.warns(UserWarning, match=expected_warning_text):
actual_output = glmr._check_report_dims(test_input)
raised_warnings_texts = [
str(warning_.message) for warning_ in raised_warnings
]
assert actual_output == expected_output
assert expected_warning_text in raised_warnings_texts


def test_coerce_to_dict_with_string():
Expand Down
4 changes: 1 addition & 3 deletions nilearn/tests/test_masking.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,8 @@ def test_compute_background_mask(affine_eye):
# Check that we get a useful warning for empty masks
mean_image = np.zeros((9, 9, 9))
mean_image = Nifti1Image(mean_image, affine_eye)
with warnings.catch_warnings(record=True) as w:
with pytest.warns(MaskWarning, match="Computed an empty mask"):
compute_background_mask(mean_image)
assert len(w) == 1
assert isinstance(w[0].message, masking.MaskWarning)


def test_compute_brain_mask():
Expand Down

0 comments on commit 05ae8ce

Please sign in to comment.