Skip to content

Commit

Permalink
[MAINT] Use rng fixture instead of check_random_state (#4063)
Browse files Browse the repository at this point in the history
* rm check_random_state for fixture

* adapt fixture scope

* rm RANDOM_STATE
  • Loading branch information
Remi-Gau committed Oct 19, 2023
1 parent b328d6d commit 9fcc19d
Show file tree
Hide file tree
Showing 8 changed files with 93 additions and 157 deletions.
4 changes: 1 addition & 3 deletions nilearn/_utils/tests/test_extmath.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
"""Test the _utils.extmath module."""
import numpy as np
from sklearn.utils import check_random_state

from nilearn._utils.extmath import fast_abs_percentile, is_spd


def test_fast_abs_percentile():
rng = check_random_state(1)
def test_fast_abs_percentile(rng):
data = np.arange(100)
rng.shuffle(data)
for p in data:
Expand Down
21 changes: 6 additions & 15 deletions nilearn/decoding/tests/test_graph_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from nibabel import Nifti1Image
from numpy.testing import assert_almost_equal
from scipy import linalg
from sklearn.utils import check_random_state

from nilearn.decoding.objective_functions import _div, _gradient
from nilearn.decoding.space_net import BaseSpaceNet
Expand Down Expand Up @@ -60,10 +59,9 @@ def get_gradient_matrix(w_size, mask):
return grad_matrix


def test_grad_matrix():
def test_grad_matrix(rng):
"""Test for matricial form of gradient."""
_, _, w, mask, *_ = _make_data()
rng = check_random_state(42)

G = get_gradient_matrix(w.size, mask)

Expand All @@ -75,9 +73,8 @@ def test_grad_matrix():
assert_almost_equal(_gradient(image_buffer)[grad_mask], np.dot(G, v))


def test_adjointness(size=4):
def test_adjointness(rng, size=4):
"""Test for adjointness between gradient and div operators."""
rng = check_random_state(42)
for _ in range(3):
image_1 = rng.rand(size, size, size)
image_2 = rng.rand(3, size, size, size)
Expand All @@ -88,11 +85,9 @@ def test_adjointness(size=4):
assert_almost_equal(Axdoty, -xdotAty)


def test_identity_adjointness(size=4):
def test_identity_adjointness(rng, size=4):
"""Test adjointess between _graph_net_data_function and \
_graph_net_adjoint_data_function, with identity design matrix."""
rng = check_random_state(42)

# A mask full of ones
mask = np.ones((size, size, size), dtype=bool)

Expand All @@ -113,10 +108,8 @@ def test_identity_adjointness(size=4):
assert_almost_equal(Axdoty, xdotAty)


def test_operators_adjointness(size=4):
def test_operators_adjointness(rng, size=4):
"""Perform same as test_identity_adjointness with generic design matrix."""
rng = check_random_state(42)

# A mask full of ones
mask = np.ones((size, size, size), dtype=bool)

Expand Down Expand Up @@ -189,11 +182,10 @@ def func_grad(w):
)


def test_squared_loss_derivative_lipschitz_constant():
def test_squared_loss_derivative_lipschitz_constant(rng):
"""Test Lipschitz-continuity of the derivative of _squared_loss loss \
function."""
X, y, w, mask, *_ = _make_data()
rng = check_random_state(42)
grad_weight = 2.08e-1

lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
Expand All @@ -216,10 +208,9 @@ def test_squared_loss_derivative_lipschitz_constant():
assert gradient_difference <= lipschitz_constant * point_difference


def test_logistic_derivative_lipschitz_constant():
def test_logistic_derivative_lipschitz_constant(rng):
"""Test Lipschitz-continuity of the derivative of logistic loss."""
X, y, w, mask, *_ = _make_data()
rng = check_random_state(42)
grad_weight = 2.08e-1

lipschitz_constant = _logistic_derivative_lipschitz_constant(
Expand Down
5 changes: 1 addition & 4 deletions nilearn/decoding/tests/test_objective_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import pytest
from numpy.testing import assert_almost_equal, assert_array_equal
from scipy.optimize import check_grad
from sklearn.utils import check_random_state

from nilearn.decoding.objective_functions import (
_div_id,
Expand All @@ -20,10 +19,8 @@
@pytest.mark.parametrize("ndim", range(1, 5))
@pytest.mark.parametrize("l1_ratio", L1_RATIO)
@pytest.mark.parametrize("size", [3, 4, 5])
def test_grad_div_adjoint_arbitrary_ndim(ndim, l1_ratio, size):
def test_grad_div_adjoint_arbitrary_ndim(rng, ndim, l1_ratio, size):
# We need to check that <D x, y> = <x, DT y> for x and y random vectors
rng = check_random_state(42)

shape = tuple([size] * ndim)
x = rng.normal(size=shape)
y = rng.normal(size=[ndim + 1] + list(shape))
Expand Down
9 changes: 3 additions & 6 deletions nilearn/decoding/tests/test_same_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ def to_niimgs(X, dim):
return Nifti1Image(X, affine), Nifti1Image(mask.astype(np.float64), affine)


def test_same_energy_calculus_pure_lasso():
rng = check_random_state(42)
def test_same_energy_calculus_pure_lasso(rng):
X, y, w, mask = _make_data(rng=rng, masked=True)

# check funcvals
Expand All @@ -95,8 +94,7 @@ def test_same_energy_calculus_pure_lasso():
assert_array_equal(g1, g2)


def test_lipschitz_constant_loss_mse():
rng = check_random_state(42)
def test_lipschitz_constant_loss_mse(rng):
X, _, _, mask = _make_data(rng=rng, masked=True)
l1_ratio = 1.0
alpha = 0.1
Expand All @@ -109,8 +107,7 @@ def test_lipschitz_constant_loss_mse():
assert_almost_equal(a, b)


def test_lipschitz_constant_loss_logreg():
rng = check_random_state(42)
def test_lipschitz_constant_loss_logreg(rng):
X, _, _, mask = _make_data(rng=rng, masked=True)
l1_ratio = 1.0
alpha = 0.1
Expand Down
16 changes: 5 additions & 11 deletions nilearn/decoding/tests/test_space_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.linear_model._coordinate_descent import _alpha_grid
from sklearn.metrics import accuracy_score
from sklearn.utils import check_random_state

from nilearn._utils.param_validation import adjust_screening_percentile
from nilearn.decoding.space_net import (
Expand Down Expand Up @@ -42,9 +41,8 @@
@pytest.mark.parametrize("l1_ratio", [0.5, 1.0])
@pytest.mark.parametrize("n_alphas", range(1, 10))
def test_space_net_alpha_grid(
is_classif, l1_ratio, n_alphas, n_samples=4, n_features=3
rng, is_classif, l1_ratio, n_alphas, n_samples=4, n_features=3
):
rng = check_random_state(42)
X = rng.randn(n_samples, n_features)
y = np.arange(n_samples)

Expand Down Expand Up @@ -77,10 +75,9 @@ def test_space_net_alpha_grid_same_as_sk():
)


def test_early_stopping_callback_object(n_samples=10, n_features=30):
def test_early_stopping_callback_object(rng, n_samples=10, n_features=30):
# This test evolves w so that every line of th _EarlyStoppingCallback
# code is executed a some point. This a kind of code fuzzing.
rng = check_random_state(42)
X_test = rng.randn(n_samples, n_features)
y_test = np.dot(X_test, np.ones(n_features))
w = np.zeros(n_features)
Expand Down Expand Up @@ -218,8 +215,7 @@ def test_squared_loss_path_scores():

@pytest.mark.parametrize("l1_ratio", [1])
@pytest.mark.parametrize("debias", [True])
def test_tv_regression_simple(l1_ratio, debias):
rng = check_random_state(42)
def test_tv_regression_simple(rng, l1_ratio, debias):
dim = (4, 4, 4)
W_init = np.zeros(dim)
W_init[2:3, 1:2, -2:] = 1
Expand All @@ -245,8 +241,7 @@ def test_tv_regression_simple(l1_ratio, debias):


@pytest.mark.parametrize("l1_ratio", [0.0, 0.5, 1.0])
def test_tv_regression_3D_image_doesnt_crash(l1_ratio):
rng = check_random_state(42)
def test_tv_regression_3D_image_doesnt_crash(rng, l1_ratio):
dim = (3, 4, 5)
W_init = np.zeros(dim)
W_init[2:3, 3:, 1:3] = 1
Expand Down Expand Up @@ -436,8 +431,7 @@ def test_space_net_regressor_subclass(penalty, alpha, l1_ratio, verbose):


@pytest.mark.parametrize("is_classif", IS_CLASSIF)
def test_space_net_alpha_grid_pure_spatial(is_classif):
rng = check_random_state(42)
def test_space_net_alpha_grid_pure_spatial(rng, is_classif):
X = rng.randn(10, 100)
y = np.arange(X.shape[0])

Expand Down
2 changes: 1 addition & 1 deletion nilearn/mass_univariate/permuted_least_squares.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ def permuted_ols(
If False, only positive effects are considered as relevant. The null
hypothesis is that the effect is zero or negative.
random_state : :obj:`int` or None, optional
random_state : :obj:`int` or np.random.RandomState or None, optional
Seed for random number generator, to have the same permutations
in each computing units.
Expand Down

0 comments on commit 9fcc19d

Please sign in to comment.