Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FIX] fix SimpleRegressionResults #4071

Merged
merged 8 commits into from
Dec 16, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 3 additions & 4 deletions nilearn/glm/_base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from nibabel.onetime import auto_attr
from sklearn.base import BaseEstimator, TransformerMixin

from nilearn._utils import CacheMixin
Expand All @@ -8,7 +7,7 @@ class BaseGLM(BaseEstimator, TransformerMixin, CacheMixin):
"""Implement a base class \
for the :term:`General Linear Model<GLM>`."""

@auto_attr
@property
def residuals(self):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have no issue with the change, but could you explain what it means/implies ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As far as I can tell these decorators do the same thing but one comes from the standard library, so feels like it is more appropriate to use it.

Do you want me to mention it in the changelog?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIUC it's not exactly the same, as property doesn't store the value as an object attribute after initial call (see https://nipy.org/nibabel/reference/nibabel.onetime.html#auto-attr). While using property works as well it seems it could add function call overhead that could affect performance. You could time some tests that we already have to quickly compare one versus the other.

Copy link
Collaborator Author

@Remi-Gau Remi-Gau Dec 15, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIUC it's not exactly the same, as property doesn't store the value as an object attribute after initial call (see https://nipy.org/nibabel/reference/nibabel.onetime.html#auto-attr). While using property works as well it seems it could add function call overhead that could affect performance. You could time some tests that we already have to quickly compare one versus the other.

You were right @ymzayek it would make the code quite a bit slower.

Ran the following script on this branch and on main.

On main I am at about 0.197 seconds per iteration.

On this branch it increases up to 0.943 seconds per iteration if I try to get the residuals 10 times.

So yeah will revert those decorators.

"""Benchmark for getting GLM residuals

Runs a first level GLM N_ITER time on dummy data
and gets the residuals n times (between 0 and N_ITER_RESIDUALS times).

Gets the time taken to for 1 iteration (fit GLM and get residual n times).

"""python
from nilearn._utils.data_gen import (
    generate_fake_fmri_data_and_design,
)
from nilearn.glm.first_level import (
    FirstLevelModel,
)

import time

def compute_residuals(mask, fmri_data, design_matrices):
    model = FirstLevelModel(
        mask_img=mask, minimize_memory=False, noise_model="ols"
    )
    model.fit(fmri_data, design_matrices=design_matrices)
    return model.residuals

N_ITER = 20
N_ITER_RESIDUALS = 11

def main():

    shapes, rk = [(20, 20, 20, 100)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk
    )

    for i in range(len(design_matrices)):
        design_matrices[i][design_matrices[i].columns[0]] = 1

    for n_iter_residuals in range(1, N_ITER_RESIDUALS):

        t0 = time.monotonic_ns()
        for _ in range(N_ITER):
            model = FirstLevelModel(
                mask_img=mask, minimize_memory=False, noise_model="ols"
            )
            model.fit(fmri_data, design_matrices=design_matrices)
            # get the residuals several times to see if this makes a difference
            for __ in range(n_iter_residuals):
                model.residuals
        t1 = time.monotonic_ns()

        print(f"Get residuals {n_iter_residuals} times")
        print(f"{(t1-t0)/10**9/N_ITER:0.3f} seconds per iteration")


if __name__ == "__main__":
    main()

"""Transform voxelwise residuals to the same shape \
as the input Nifti1Image(s).
Expand All @@ -23,7 +22,7 @@ def residuals(self):
"residuals", result_as_time_series=True
)

@auto_attr
@property
def predicted(self):
"""Transform voxelwise predicted values to the same shape \
as the input Nifti1Image(s).
Expand All @@ -38,7 +37,7 @@ def predicted(self):
"predicted", result_as_time_series=True
)

@auto_attr
@property
def r_square(self):
"""Transform voxelwise r-squared values to the same shape \
as the input Nifti1Image(s).
Expand Down
3 changes: 1 addition & 2 deletions nilearn/glm/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
Author: Bertrand Thirion, 2011--2015
"""
import numpy as np
from nibabel.onetime import auto_attr
from scipy.linalg import inv
from scipy.stats import t as t_distribution

Expand Down Expand Up @@ -86,7 +85,7 @@ def __init__(
# put this as a parameter of LikelihoodModel
self.df_residuals = self.df_total - self.df_model

@auto_attr
@property
def logL(self):
"""Return the maximized log-likelihood."""
return self.model.logL(self.theta, self.Y, nuisance=self.nuisance)
Expand Down
34 changes: 17 additions & 17 deletions nilearn/glm/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import numpy as np
import scipy.linalg as spl
from nibabel.onetime import auto_attr
from numpy.linalg import matrix_rank

from nilearn.glm._utils import positive_reciprocal
Expand Down Expand Up @@ -309,12 +308,12 @@
self.whitened_residuals = whitened_residuals
self.whitened_design = model.whitened_design

@auto_attr
@property
def residuals(self):
"""Residuals from the fit."""
return self.Y - self.predicted

@auto_attr
@property
def normalized_residuals(self):
"""Residuals, normalized to have unit length.

Expand All @@ -337,31 +336,31 @@
"""
return self.residuals * positive_reciprocal(np.sqrt(self.dispersion))

@auto_attr
@property
def predicted(self):
"""Return linear predictor values from a design matrix."""
beta = self.theta
# the LikelihoodModelResults has parameters named 'theta'
X = self.whitened_design
return np.dot(X, beta)

@auto_attr
@property
def SSE(self):
"""Error sum of squares.

If not from an OLS model this is "pseudo"-SSE.
"""
return (self.whitened_residuals**2).sum(0)

@auto_attr
@property
def r_square(self):
"""Proportion of explained variance.

If not from an OLS model this is "pseudo"-R2.
"""
return np.var(self.predicted, 0) / np.var(self.whitened_Y, 0)

@auto_attr
@property
def MSE(self):
"""Return Mean square (error)."""
return self.SSE / self.df_residuals
Expand Down Expand Up @@ -391,15 +390,19 @@
# put this as a parameter of LikelihoodModel
self.df_residuals = self.df_total - self.df_model

def logL(self, Y):
def logL(self):
"""Return the maximized log-likelihood."""
raise ValueError("can not use this method for simple results")
raise NotImplementedError(

Check warning on line 395 in nilearn/glm/regression.py

View check run for this annotation

Codecov / codecov/patch

nilearn/glm/regression.py#L395

Added line #L395 was not covered by tests
"logL not implemented for "
"SimpleRegressionsResults. "
"Use RegressionResults"
)

def residuals(self, Y):
def residuals(self, Y, X):
"""Residuals from the fit."""
return Y - self.predicted
return Y - self.predicted(X)

def normalized_residuals(self, Y):
def normalized_residuals(self, Y, X):
"""Residuals, normalized to have unit length.

See :footcite:`Montgomery2006` and :footcite:`Davidson2004`.
Expand All @@ -419,14 +422,11 @@
.. footbibliography::

"""
return self.residuals(Y) * positive_reciprocal(
return self.residuals(Y, X) * positive_reciprocal(

Check warning on line 425 in nilearn/glm/regression.py

View check run for this annotation

Codecov / codecov/patch

nilearn/glm/regression.py#L425

Added line #L425 was not covered by tests
np.sqrt(self.dispersion)
)

@auto_attr
def predicted(self):
def predicted(self, X):
"""Return linear predictor values from a design matrix."""
beta = self.theta
# the LikelihoodModelResults has parameters named 'theta'
X = self.model.design
return np.dot(X, beta)
17 changes: 15 additions & 2 deletions nilearn/glm/tests/test_regression.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
"""Test functions for models.regression"""

import pytest
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from numpy.testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)

from nilearn.glm import ARModel, OLSModel
from nilearn.glm import ARModel, OLSModel, SimpleRegressionResults


@pytest.fixture()
Expand Down Expand Up @@ -69,3 +73,12 @@ def test_AR_degenerate(X, Y):
model = ARModel(design=X, rho=0.9)
results = model.fit(Y)
assert results.df_residuals == 31


def test_simple_results(X, Y):
model = OLSModel(X)
results = model.fit(Y)

simple_results = SimpleRegressionResults(results)
assert_array_equal(results.predicted, simple_results.predicted(X))
assert_array_equal(results.residuals, simple_results.residuals(Y, X))