From 58ded607281911e9ad547d33ebe275211ac65654 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Mon, 21 Oct 2019 15:17:14 +0200 Subject: [PATCH 01/25] Making fetch_localizer_button_task backwards compatibile (#2182) --- examples/01_plotting/plot_dim_plotting.py | 4 ++-- nilearn/datasets/func.py | 14 +++++--------- nilearn/datasets/tests/test_func.py | 23 +++++++++++++++++------ 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/examples/01_plotting/plot_dim_plotting.py b/examples/01_plotting/plot_dim_plotting.py index 11253a33ad..afecc379c3 100644 --- a/examples/01_plotting/plot_dim_plotting.py +++ b/examples/01_plotting/plot_dim_plotting.py @@ -21,9 +21,9 @@ localizer_dataset = datasets.fetch_localizer_button_task() # Contrast map of motor task -localizer_tmap_filename = localizer_dataset.tmaps[0] +localizer_tmap_filename = localizer_dataset.tmap # Subject specific anatomical image -localizer_anat_filename = localizer_dataset.anats[0] +localizer_anat_filename = localizer_dataset.anat ########################################################################### # Plotting with enhancement of background image with dim=-.5 # -------------------------------------------------------------------------- diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 38844a93b1..16316965c6 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1073,7 +1073,7 @@ def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, return data -def fetch_localizer_button_task(n_subjects=None, data_dir=None, url=None, +def fetch_localizer_button_task(data_dir=None, url=None, verbose=1): """Fetch left vs right button press contrast maps from the localizer. @@ -1116,18 +1116,14 @@ def fetch_localizer_button_task(n_subjects=None, data_dir=None, url=None, nilearn.datasets.fetch_localizer_contrasts """ - if n_subjects is None: - n_subjects = [2] data = fetch_localizer_contrasts(["left vs right button press"], - n_subjects=n_subjects, + n_subjects=[2], get_tmaps=True, get_masks=False, get_anats=True, data_dir=data_dir, url=url, resume=True, verbose=verbose) - # TODO: remove -> only here for compatibility - if len(data["tmaps"]) == 1: - setattr(data, "tmap", data["tmaps"][0]) - if len(data["anats"]) == 1: - setattr(data, "anat", data["anats"][0]) + # Additional keys for backward compatibility + data['tmap'] = data['tmaps'][0] + data['anat'] = data['anats'][0] return data diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 9c52e3dbca..8f7de18ea4 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -281,12 +281,23 @@ def test_fetch_localizer_calculation_task(): @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) @with_setup(setup_localizer, teardown_localizer) def test_fetch_localizer_button_task(): - # 2 subjects - dataset = func.fetch_localizer_button_task( - data_dir=tst.tmpdir, - verbose=1) - assert_true(isinstance(dataset.tmaps[0], _basestring)) - assert_true(isinstance(dataset.anats[0], _basestring)) + local_url = "file://" + tst.datadir + + # Disabled: cannot be tested without actually fetching covariates CSV file + # Only one subject + dataset = func.fetch_localizer_button_task(data_dir=tst.tmpdir, + url=local_url, + verbose=1) + + assert_true(isinstance(dataset.tmaps, list)) + assert_true(isinstance(dataset.anats, list)) + + assert len(dataset.tmaps) == 1 + assert len(dataset.anats) == 1 + + assert_true(isinstance(dataset.tmap, str)) + assert_true(isinstance(dataset.anat, str)) + assert_not_equal(dataset.description, '') From 7e09df29d06153b7d198f9a5b7f607d1f5b999d6 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Mon, 21 Oct 2019 17:17:49 +0200 Subject: [PATCH 02/25] Release Nilearn 0.6.0 alpha (#2164) * Updated .mailmap * Updated contributors list * Removed unsupported python versions from PyPI tags * Added whats_new entry for PR #2167 * Updated Whats whats_new list of contributors --- .mailmap | 10 ++++++++-- doc/whats_new.rst | 40 +++++++++++++++++++++++++++++++++++++++- setup.py | 3 --- 3 files changed, 47 insertions(+), 6 deletions(-) diff --git a/.mailmap b/.mailmap index e0b2d2bbb3..3a978ca81c 100644 --- a/.mailmap +++ b/.mailmap @@ -3,16 +3,21 @@ Alexandre Abadie Alexandre Abraham Alexandre Gramfort Alexandre Savio +Andrés Hoyos Idrobo +Antoine Grigis Arthur Mensch Ben Cipollini Bertrand Thirion +Céline Delettre Chris Filo Gorgolewski Danilo Bzdok Demian Wassermann Dimitri Papadopoulos Orfanos Elvis Dohmatob Fabian Pedregosa +Franz Liem Gael Varoquaux +Greg Kiar Jan Margeta Jaques Grobler Jason Gors @@ -20,19 +25,20 @@ Jona Sassenhagen Jean Kossaifi Jean Remi King Jeff Chiang +Jerome-Alexis Chevalier +Jerome Dockes Julia Huntenburg J Necus Kamalakar Daddy Konstantin Shmelkov +Kshitij Chawla (kchawla-pi) Loïc Estève Martin Perez-Guevara Matthias Ekman -Mehdi Rahim Mehdi Rahim Michael Eickenberg Michael Hanke Michael Waskom -Moritz Boos Moritz Boos Óscar Nájera Philippe Gervais diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 1bef49c278..10c76357a5 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,6 +1,8 @@ 0.6.0a ====== +**Released October 2019** + NEW --- @@ -106,7 +108,43 @@ Fixes version 0.14.3 (instead of 0.8.1) by default, which includes corrected region label names along with 700 and 900 region parcelations. - Colormap creation functions have been updated to avoid matplotlib deprecation warnings - about colormap reversal + about colormap reversal. +- Neurovault fetcher no longer fails if unable to update dataset metadata file due to faulty permissions. + +Contributors +------------ + +The following people contributed to this release (in alphabetical order):: + + Alexandre Abraham + Alexandre Gramfort + Ana Luisa + Ana Luisa Pinho + Andrés Hoyos Idrobo + Antoine Grigis + BAZEILLE Thomas + Bertrand Thirion + Colin Reininger + Céline Delettre + Dan Gale + Daniel Gomez + Elizabeth DuPre + Eric Larson + Franz Liem + Gael Varoquaux + Gilles de Hollander + Greg Kiar + Guillaume Lemaitre + Ian Abenes + Jake Vogel + Jerome Dockes + Jerome-Alexis Chevalier + Julia Huntenburg + Kamalakar Daddy + Kshitij Chawla (kchawla-pi) + Mehdi Rahim + Moritz Boos + Sylvain Takerkart 0.5.2 ===== diff --git a/setup.py b/setup.py index a88c17ae81..0d2147ca1d 100755 --- a/setup.py +++ b/setup.py @@ -78,9 +78,6 @@ def is_installing(): 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', From 40e8e994be2ad5566ca1ce2df1968f4033b9e6dd Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Thu, 24 Oct 2019 14:37:13 +0200 Subject: [PATCH 03/25] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5ae4de4c1a..0227e62450 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -4,7 +4,7 @@ # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: -- master +- fix-azp jobs: From 31ae37e8837fa3c9bdcbf8d39106af2846fb7562 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Thu, 24 Oct 2019 14:52:36 +0200 Subject: [PATCH 04/25] Updated requirements list for devs (#2190) --- requirements-build-docs.txt | 3 --- requirements-dev.txt | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/requirements-build-docs.txt b/requirements-build-docs.txt index 27e6262424..7e6ea6f3c9 100644 --- a/requirements-build-docs.txt +++ b/requirements-build-docs.txt @@ -10,11 +10,8 @@ sphinx sphinx-gallery numpydoc coverage -patsy -boto3 pillow pandas -nose-timer nibabel scikit-learn joblib diff --git a/requirements-dev.txt b/requirements-dev.txt index e73c47dc6a..4654660c40 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,3 +7,8 @@ scikit-learn joblib pandas matplotlib +nibabel +pytest +pytest-cov +numpy +cython \ No newline at end of file From 342f54fb2d95598076e85ad2b2daf8fff8ec56eb Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Fri, 25 Oct 2019 11:31:41 +0200 Subject: [PATCH 05/25] Run tests on local Windows machines & Azure Pipelines (#2191) * Removed Py2 testing from Azure Pipelines, added Py3.8 * Removed CR line-end for HTML strings comparison, prevent failure on Win - Added Py3.7 testing to Azure Pipelines * Added Py3.7 testing to Azure Pipelines * Corrected job label in AZP, added comment in check_html * Improved instructions for Pytest on Azure Pipelines * Corrected AZP commands * Added coverage upload to codecov * Added test reporting using JUnitXML * Change path for JUnitXML test reports * Install codecov python package for uploading coverage reports to codecov * Corrected indentation of config key * More robust line-feed replace, better comment * Added Azure Pipeline CI Status badge to Nilearn's README * Changed AZP Org name to Parietal, Project name to Nilearn * Changed AZP config to run for all branches & PRs --- README.rst | 3 +++ azure-pipelines.yml | 16 +++++++++++----- nilearn/plotting/tests/test_js_plotting_utils.py | 8 ++++++-- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/README.rst b/README.rst index e2f42830a9..9a1efd0d2a 100644 --- a/README.rst +++ b/README.rst @@ -11,6 +11,9 @@ .. image:: https://codecov.io/gh/nilearn/nilearn/branch/master/graph/badge.svg :target: https://codecov.io/gh/nilearn/nilearn +.. image:: https://dev.azure.com/Parietal/Nilearn/_apis/build/status/nilearn.nilearn?branchName=master + :target: https://dev.azure.com/Parietal/Nilearn/_apis/build/status/nilearn.nilearn?branchName=master + nilearn ======= diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0227e62450..d6d5180a78 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -4,7 +4,7 @@ # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: -- fix-azp +- master jobs: @@ -13,10 +13,10 @@ jobs: vmImage: 'vs2017-win2016' strategy: matrix: - Python27: - python.version: '2.7' Python35: python.version: '3.5' + Python37: + python.version: '3.7' maxParallel: 4 steps: @@ -25,14 +25,20 @@ jobs: versionSpec: '$(python.version)' architecture: 'x64' - - script: python -m pip install --upgrade pip && pip install --prefer-binary -r requirements-dev.txt + - script: | + python -m pip install --upgrade pip + pip install --prefer-binary -r requirements-dev.txt + pip install pytest-azurepipelines codecov displayName: 'Install dependencies' - script: | pip install . - pytest ./nilearn -v + python -m pytest --pyargs nilearn --cov-report=xml --cov=nilearn --cov-append --junitxml=test-results.xml displayName: 'test' + - script: | + codecov -f coverage.xml --token=$(CODECOV_TOKEN) + displayName: 'codecov' - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-results.xml' diff --git a/nilearn/plotting/tests/test_js_plotting_utils.py b/nilearn/plotting/tests/test_js_plotting_utils.py index 97154dcb07..06bc608b54 100644 --- a/nilearn/plotting/tests/test_js_plotting_utils.py +++ b/nilearn/plotting/tests/test_js_plotting_utils.py @@ -11,7 +11,8 @@ from nilearn import surface from nilearn.datasets import fetch_surf_fsaverage -from numpy.testing import assert_warns +from numpy.testing import assert_warns, assert_equal + try: from lxml import etree LXML_INSTALLED = True @@ -218,7 +219,10 @@ def check_html(html, check_selects=True, plot_div_id='surface-plot'): html.save_as_html(tmpfile) with open(tmpfile) as f: saved = f.read() - assert saved == html.get_standalone() + # If present, replace Windows line-end '\r\n' with Unix's '\n' + saved = saved.replace('\r\n', '\n') + standalone = html.get_standalone().replace('\r\n', '\n') + assert_equal(saved, standalone) finally: os.remove(tmpfile) assert "INSERT" not in html.html From 45d79041de4f1251582abd4c6042cb5315476358 Mon Sep 17 00:00:00 2001 From: "Kshitij Chawla (kchawla-pi)" Date: Fri, 25 Oct 2019 13:50:48 +0200 Subject: [PATCH 06/25] Updated Appveyor status badge --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 9a1efd0d2a..3b21ba9de1 100644 --- a/README.rst +++ b/README.rst @@ -4,8 +4,8 @@ :target: https://travis-ci.org/nilearn/nilearn :alt: Travis Build Status -.. image:: https://ci.appveyor.com/api/projects/status/github/nilearn/nilearn?branch=master&svg=true - :target: https://ci.appveyor.com/project/nilearn-ci/nilearn +.. image:: https://ci.appveyor.com/api/projects/status/qynxxpx0hge4h0rn/branch/master?svg=true + :target: https://ci.appveyor.com/api/projects/status/qynxxpx0hge4h0rn/branch/master?svg=true :alt: AppVeyor Build Status .. image:: https://codecov.io/gh/nilearn/nilearn/branch/master/graph/badge.svg From 7ca7d113a7ac1e9334cc96086b28940b00e6baaf Mon Sep 17 00:00:00 2001 From: robbisg Date: Mon, 28 Oct 2019 10:39:50 +0100 Subject: [PATCH 07/25] FIX: marker size issue in plot_connectome #2185 (#2186) * FIX: marker size issue in plot_connectome #2185 * Update displays.py * Update displays.py List and numpy array accepted * Update nilearn/plotting/displays.py Co-Authored-By: jeromedockes * FIX: marker size issue in plot_connectome #2185 * Update whats_new.rst * Update displays.py * Fixed identation for flake --- doc/whats_new.rst | 2 ++ nilearn/plotting/displays.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 10c76357a5..dfc8ab2209 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -86,6 +86,8 @@ Changes Fixes ----- +- :func:`nilearn.plotting.plot_connectome` now correctly displays marker size on 'l' + and 'r' orientations, if an array or a list is passed to the function. - :func:`nilearn.plotting.plot_glass_brain` with colorbar=True does not crash when images have NaNs. - add_contours now accepts `threshold` argument for filled=False. Now diff --git a/nilearn/plotting/displays.py b/nilearn/plotting/displays.py index b6f8b46ec1..84aff68e8f 100644 --- a/nilearn/plotting/displays.py +++ b/nilearn/plotting/displays.py @@ -403,7 +403,7 @@ def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs): if self.direction == 'r' and xc >= 0: relevant_coords.append(cidx) elif self.direction == 'l' and xc <= 0: - relevant_coords.append(cidx) + relevant_coords.append(cidx) xdata = xdata[relevant_coords] ydata = ydata[relevant_coords] # if marker_color is string for example 'red' or 'blue', then @@ -415,6 +415,9 @@ def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs): len(marker_color) != 1: marker_color = marker_color[relevant_coords] + if not isinstance(marker_size, numbers.Number): + marker_size = np.asarray(marker_size)[relevant_coords] + defaults = {'marker': 'o', 'zorder': 1000} for k, v in defaults.items(): From 4b1db8857bbb002e9f13fcf9c3239282b8445e51 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Mon, 28 Oct 2019 10:46:36 +0100 Subject: [PATCH 08/25] Moved new entries to next release --- doc/whats_new.rst | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index dfc8ab2209..657d26873f 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,6 +1,16 @@ -0.6.0a +0.6.0b ====== +Fixes +----- + +- :func:`nilearn.plotting.plot_connectome` now correctly displays marker size on 'l' + and 'r' orientations, if an array or a list is passed to the function. + + +0.6.0a0 +======= + **Released October 2019** NEW @@ -86,8 +96,6 @@ Changes Fixes ----- -- :func:`nilearn.plotting.plot_connectome` now correctly displays marker size on 'l' - and 'r' orientations, if an array or a list is passed to the function. - :func:`nilearn.plotting.plot_glass_brain` with colorbar=True does not crash when images have NaNs. - add_contours now accepts `threshold` argument for filled=False. Now From 053e5a8d6f336440a765df8079dc391aedf053cd Mon Sep 17 00:00:00 2001 From: jeromedockes Date: Mon, 28 Oct 2019 10:50:47 +0100 Subject: [PATCH 09/25] remove is_valid filter (#2169) --- doc/whats_new.rst | 6 ++++++ nilearn/datasets/neurovault.py | 3 +-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 657d26873f..a944205388 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,6 +1,12 @@ 0.6.0b ====== +Changes +------- + +- :func:`nilearn.datasets.fetch_neurovault` now does not filter out images that + have their metadata field `is_valid` cleared by default. + Fixes ----- diff --git a/nilearn/datasets/neurovault.py b/nilearn/datasets/neurovault.py index d541ce16dd..f54a7aae71 100644 --- a/nilearn/datasets/neurovault.py +++ b/nilearn/datasets/neurovault.py @@ -2080,13 +2080,12 @@ def basic_image_terms(): true: - It is not in MNI space. - - Its metadata field "is_valid" is cleared. - It is thresholded. - Its map type is one of "ROI/mask", "anatomical", or "parcellation". - Its image type is "atlas" """ - return {'not_mni': False, 'is_valid': True, 'is_thresholded': False, + return {'not_mni': False, 'is_thresholded': False, 'map_type': NotIn('ROI/mask', 'anatomical', 'parcellation'), 'image_type': NotEqual('atlas')} From bd27cf51ba5c4a766b24c3852d26346bc4dd481d Mon Sep 17 00:00:00 2001 From: jeromedockes Date: Mon, 28 Oct 2019 11:51:51 +0100 Subject: [PATCH 10/25] fix error when colorscale given boolean array (#2193) * fix error when colorscale given boolean array * add comment --- nilearn/plotting/js_plotting_utils.py | 2 ++ nilearn/plotting/tests/test_js_plotting_utils.py | 1 + 2 files changed, 3 insertions(+) diff --git a/nilearn/plotting/js_plotting_utils.py b/nilearn/plotting/js_plotting_utils.py index 5e41e2ae54..90e4efa222 100644 --- a/nilearn/plotting/js_plotting_utils.py +++ b/nilearn/plotting/js_plotting_utils.py @@ -87,6 +87,8 @@ def colorscale(cmap, values, threshold=None, symmetric_cmap=True, vmin = 0 if vmax is None: vmax = abs_values.max() + # cast to float to avoid TypeError if vmax is a numpy boolean + vmax = float(vmax) if symmetric_cmap: vmin = - vmax if vmin is None: diff --git a/nilearn/plotting/tests/test_js_plotting_utils.py b/nilearn/plotting/tests/test_js_plotting_utils.py index 06bc608b54..ffd6555ee4 100644 --- a/nilearn/plotting/tests/test_js_plotting_utils.py +++ b/nilearn/plotting/tests/test_js_plotting_utils.py @@ -62,6 +62,7 @@ def test_colorscale_no_threshold(): assert colors['cmap'].N == 256 assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13) assert colors['abs_threshold'] is None + colors = js_plotting_utils.colorscale(cmap, values > 0, .5) def test_colorscale_threshold_0(): From ee1f25a908d8357b4fba84dab1d2b7018e9f5e04 Mon Sep 17 00:00:00 2001 From: jeromedockes Date: Mon, 28 Oct 2019 15:36:57 +0100 Subject: [PATCH 11/25] [MRG] Add get data function (#2172) * add get_data * move to niimg.py * most get_data changes * iter * mni template * iter * iter * iter * fix swap_hemispheres test * fix test_niimg_conversions test * restore use dataobj in test * pep8 * simplify get_data * public get_data with check_niimg + whats new + docstring * add get_data to reference --- doc/manipulating_images/input_output.rst | 4 +- doc/modules/reference.rst | 1 + doc/whats_new.rst | 15 +++ .../plot_haxby_different_estimators.py | 3 +- .../02_decoding/plot_haxby_searchlight.py | 6 +- examples/02_decoding/plot_oasis_vbm.py | 3 +- .../plot_data_driven_parcellations.py | 7 +- .../plot_affine_transformation.py | 8 +- .../plot_roi_extraction.py | 5 +- .../05_advanced/plot_haxby_mass_univariate.py | 4 +- .../plot_localizer_mass_univariate_methods.py | 5 +- .../plot_localizer_simple_analysis.py | 3 +- .../plot_neurovault_meta_analysis.py | 4 +- nilearn/_utils/ndimage.py | 4 +- nilearn/_utils/niimg.py | 21 +++- nilearn/_utils/niimg_conversions.py | 29 +++-- nilearn/_utils/param_validation.py | 3 +- nilearn/datasets/atlas.py | 8 +- nilearn/datasets/func.py | 3 +- nilearn/datasets/struct.py | 4 +- nilearn/datasets/tests/test_atlas.py | 7 +- nilearn/decoding/space_net.py | 3 +- nilearn/decoding/tests/test_same_api.py | 3 +- nilearn/decoding/tests/test_space_net.py | 7 +- nilearn/decomposition/tests/test_canica.py | 7 +- .../decomposition/tests/test_dict_learning.py | 8 +- nilearn/image/__init__.py | 4 +- nilearn/image/image.py | 46 +++++-- nilearn/image/resampling.py | 9 +- nilearn/image/tests/test_image.py | 112 +++++++++++------- nilearn/image/tests/test_resampling.py | 109 ++++++++--------- nilearn/input_data/multi_nifti_masker.py | 3 +- nilearn/input_data/nifti_maps_masker.py | 3 +- nilearn/input_data/nifti_masker.py | 3 +- .../tests/test_multi_nifti_masker.py | 11 +- .../tests/test_nifti_labels_masker.py | 11 +- .../tests/test_nifti_maps_masker.py | 5 +- nilearn/input_data/tests/test_nifti_masker.py | 27 +++-- .../tests/test_nifti_spheres_masker.py | 3 +- nilearn/masking.py | 9 +- nilearn/plotting/displays.py | 3 +- nilearn/plotting/find_cuts.py | 7 +- nilearn/plotting/img_plotting.py | 15 ++- nilearn/plotting/tests/test_html_stat_map.py | 7 +- nilearn/plotting/tests/test_html_surface.py | 5 +- nilearn/plotting/tests/test_img_plotting.py | 13 +- nilearn/regions/rena_clustering.py | 5 +- .../regions/tests/test_region_extractor.py | 40 ++++--- nilearn/regions/tests/test_rena_clustering.py | 7 +- .../regions/tests/test_signal_extraction.py | 27 +++-- nilearn/surface/surface.py | 9 +- nilearn/tests/test_masking.py | 53 +++++---- nilearn/tests/test_niimg.py | 10 +- nilearn/tests/test_niimg_conversions.py | 85 +++++++------ 54 files changed, 478 insertions(+), 338 deletions(-) diff --git a/doc/manipulating_images/input_output.rst b/doc/manipulating_images/input_output.rst index 0254e0711d..5960c47f86 100644 --- a/doc/manipulating_images/input_output.rst +++ b/doc/manipulating_images/input_output.rst @@ -175,7 +175,7 @@ Analyze files) is the standard way of sharing data in neuroimaging research. Three main components are: :data: - raw scans in form of a numpy array: ``data = img.get_data()`` + raw scans in form of a numpy array: ``data = nilearn.image.get_data(img)`` :affine: returns the transformation matrix that maps from voxel indices of the numpy array to actual real-world @@ -218,7 +218,7 @@ objects": **Niimg:** A Niimg-like object can be one of the following: * A string with a file path to a Nifti or Analyse image - * An ``SpatialImage`` from nibabel, ie an object exposing ``get_data()`` + * An ``SpatialImage`` from nibabel, ie an object exposing ``get_fdata()`` method and ``affine`` attribute, typically a ``Nifti1Image`` from nibabel_. **Niimg-4D:** Similarly, some functions require 4D Nifti-like diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 6f43d06135..745c3acda2 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -169,6 +169,7 @@ uses. coord_transform copy_img crop_img + get_data high_variance_confounds index_img iter_img diff --git a/doc/whats_new.rst b/doc/whats_new.rst index a944205388..94b6a37914 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,9 +1,24 @@ 0.6.0b ====== +NEW +--- + +- A new function :func:`nilearn.image.get_data` to replace the deprecated + nibabel method `Nifti1Image.get_data`. Now use `nilearn.image.get_data(img)` + rather than `img.get_data()`. This is because Nibabel is removing the + `get_data` method. You may also consider using the Nibabel + `Nifti1Image.get_fdata`, which returns the data cast to floating-point. + See https://github.com/nipy/nibabel/wiki/BIAP8 . + As a benefit, the `get_data` function works on niimg-like objects such as + filenames (see http://nilearn.github.io/manipulating_images/input_output.html ). + Changes ------- +- All functions and examples now use `nilearn.image.get_data` rather than the + deprecated method `nibabel.Nifti1Image.get_data`. + - :func:`nilearn.datasets.fetch_neurovault` now does not filter out images that have their metadata field `is_valid` cleared by default. diff --git a/examples/02_decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py index 1d228c7284..b8be60c340 100644 --- a/examples/02_decoding/plot_haxby_different_estimators.py +++ b/examples/02_decoding/plot_haxby_different_estimators.py @@ -12,6 +12,7 @@ # Fetch data using nilearn dataset fetcher from nilearn import datasets +from nilearn.image import get_data # by default 2nd subject data will be fetched haxby_dataset = datasets.fetch_haxby() @@ -193,7 +194,7 @@ else: continue weight_img = masker.inverse_transform(weights) - weight_map = weight_img.get_data() + weight_map = get_data(weight_img) threshold = np.max(np.abs(weight_map)) * 1e-3 plot_stat_map(weight_img, bg_img=mean_epi_img, display_mode='z', cut_coords=[-15], diff --git a/examples/02_decoding/plot_haxby_searchlight.py b/examples/02_decoding/plot_haxby_searchlight.py index 29bdd4e23e..8da090e752 100644 --- a/examples/02_decoding/plot_haxby_searchlight.py +++ b/examples/02_decoding/plot_haxby_searchlight.py @@ -14,7 +14,7 @@ # ------------------- import pandas as pd from nilearn import datasets -from nilearn.image import new_img_like, load_img +from nilearn.image import new_img_like, load_img, get_data # We fetch 2nd subject from haxby datasets (which is default) haxby_dataset = datasets.fetch_haxby() @@ -49,7 +49,7 @@ mask_img = load_img(haxby_dataset.mask) # .astype() makes a copy. -process_mask = mask_img.get_data().astype(np.int) +process_mask = get_data(mask_img).astype(np.int) picked_slice = 29 process_mask[..., (picked_slice + 1):] = 0 process_mask[..., :picked_slice] = 0 @@ -96,7 +96,7 @@ f_values, p_values = f_classif(fmri_masked, y) p_values = -np.log10(p_values) p_values[p_values > 10] = 10 -p_unmasked = nifti_masker.inverse_transform(p_values).get_data() +p_unmasked = get_data(nifti_masker.inverse_transform(p_values)) ######################################################################### # Visualization diff --git a/examples/02_decoding/plot_oasis_vbm.py b/examples/02_decoding/plot_oasis_vbm.py index b8810422d8..6963f22eaa 100644 --- a/examples/02_decoding/plot_oasis_vbm.py +++ b/examples/02_decoding/plot_oasis_vbm.py @@ -43,6 +43,7 @@ import matplotlib.pyplot as plt from nilearn import datasets from nilearn.input_data import NiftiMasker +from nilearn.image import get_data n_subjects = 100 # more subjects requires more memory @@ -165,7 +166,7 @@ '\n(Non-parametric + max-type correction)') display.title(title, y=1.2) -n_detections = (signed_neg_log_pvals_unmasked.get_data() > threshold).sum() +n_detections = (get_data(signed_neg_log_pvals_unmasked) > threshold).sum() print('\n%d detections' % n_detections) show() diff --git a/examples/03_connectivity/plot_data_driven_parcellations.py b/examples/03_connectivity/plot_data_driven_parcellations.py index 552230d765..95c1bb5a49 100644 --- a/examples/03_connectivity/plot_data_driven_parcellations.py +++ b/examples/03_connectivity/plot_data_driven_parcellations.py @@ -121,15 +121,16 @@ # Grab number of voxels from attribute mask image (mask_img_). import numpy as np -original_voxels = np.sum(ward.mask_img_.get_data()) +from nilearn.image import get_data +original_voxels = np.sum(get_data(ward.mask_img_)) # Compute mean over time on the functional image to use the mean # image for compressed representation comparisons mean_func_img = mean_img(dataset.func[0]) # Compute common vmin and vmax -vmin = np.min(mean_func_img.get_data()) -vmax = np.max(mean_func_img.get_data()) +vmin = np.min(get_data(mean_func_img)) +vmax = np.max(get_data(mean_func_img)) plotting.plot_epi(mean_func_img, cut_coords=cut_coords, title='Original (%i voxels)' % original_voxels, diff --git a/examples/04_manipulating_images/plot_affine_transformation.py b/examples/04_manipulating_images/plot_affine_transformation.py index f0b10a4dbb..41e1c4466a 100644 --- a/examples/04_manipulating_images/plot_affine_transformation.py +++ b/examples/04_manipulating_images/plot_affine_transformation.py @@ -46,6 +46,8 @@ # Create the data with numpy import numpy as np +from nilearn.image import get_data + grid = np.mgrid[0:192, 0:128] circle = np.sum( (grid - np.array([32, 32])[:, np.newaxis, np.newaxis]) ** 2, @@ -111,17 +113,17 @@ plt.title("The original data in voxel space") plt.figure() -plt.imshow(img_in_mm_space.get_data()[:, :, 0], vmin=0, vmax=vmax) +plt.imshow(get_data(img_in_mm_space)[:, :, 0], vmin=0, vmax=vmax) plt.title("The original data in mm space") plt.figure() -plt.imshow(img_3d_affine_in_mm_space.get_data()[:, :, 0], +plt.imshow(get_data(img_3d_affine_in_mm_space)[:, :, 0], vmin=0, vmax=vmax) plt.title("Transformed using a 3x3 affine -\n leads to " "re-estimation of bounding box") plt.figure() -plt.imshow(img_4d_affine_in_mm_space.get_data()[:, :, 0], +plt.imshow(get_data(img_4d_affine_in_mm_space)[:, :, 0], vmin=0, vmax=vmax) plt.title("Transformed using a 4x4 affine -\n Uses affine anchor " "and estimates bounding box size") diff --git a/examples/04_manipulating_images/plot_roi_extraction.py b/examples/04_manipulating_images/plot_roi_extraction.py index c5b0b730f6..1bd1c54300 100644 --- a/examples/04_manipulating_images/plot_roi_extraction.py +++ b/examples/04_manipulating_images/plot_roi_extraction.py @@ -115,8 +115,9 @@ # setting, machine-learning algorithms can perform poorly due to the so-called # curse of dimensionality. However, simple means from the realms of classical # statistics can help reducing the number of voxels. +from nilearn.image import get_data -fmri_data = fmri_img.get_data() +fmri_data = get_data(fmri_img) # number of voxels being x*y*z, samples in 4th dimension print(fmri_data.shape) @@ -211,7 +212,7 @@ # numbers to boolean type from nilearn.image import load_img -vt = load_img(mask_vt_filename).get_data().astype(bool) +vt = get_data(load_img(mask_vt_filename)).astype(bool) # We can then use a logical "and" operation - numpy.logical_and - to keep only # voxels that have been selected in both masks. In neuroimaging jargon, this diff --git a/examples/05_advanced/plot_haxby_mass_univariate.py b/examples/05_advanced/plot_haxby_mass_univariate.py index 047648ed4e..31da7396f8 100644 --- a/examples/05_advanced/plot_haxby_mass_univariate.py +++ b/examples/05_advanced/plot_haxby_mass_univariate.py @@ -122,6 +122,8 @@ # Use the fmri mean image as a surrogate of anatomical data from nilearn import image +from nilearn.image import get_data + mean_fmri_img = image.mean_img(func_filename) threshold = -np.log10(0.1) # 10% corrected @@ -135,7 +137,7 @@ display_mode='z', cut_coords=[-1, ], vmax=vmax) -neg_log_pvals_bonferroni_data = neg_log_pvals_bonferroni_unmasked.get_data() +neg_log_pvals_bonferroni_data = get_data(neg_log_pvals_bonferroni_unmasked) n_detections = (neg_log_pvals_bonferroni_data > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Parametric two-sided F-test' diff --git a/examples/05_advanced/plot_localizer_mass_univariate_methods.py b/examples/05_advanced/plot_localizer_mass_univariate_methods.py index 12531abf6c..1d73040557 100644 --- a/examples/05_advanced/plot_localizer_mass_univariate_methods.py +++ b/examples/05_advanced/plot_localizer_mass_univariate_methods.py @@ -22,6 +22,7 @@ from nilearn import datasets from nilearn.input_data import NiftiMasker from nilearn.mass_univariate import permuted_ols +from nilearn.image import get_data ############################################################################## # Load Localizer contrast @@ -93,7 +94,7 @@ display_mode='z', cut_coords=[z_slice], figure=fig, vmax=vmax, black_bg=True) -n_detections = (neg_log_pvals_anova_unmasked.get_data() > threshold).sum() +n_detections = (get_data(neg_log_pvals_anova_unmasked) > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Parametric + Bonferroni correction)' '\n%d detections') % n_detections @@ -108,7 +109,7 @@ display_mode='z', cut_coords=[z_slice], figure=fig, vmax=vmax, black_bg=True) -n_detections = (neg_log_pvals_permuted_ols_unmasked.get_data() +n_detections = (get_data(neg_log_pvals_permuted_ols_unmasked) > threshold).sum() title = ('Negative $\log_{10}$ p-values' '\n(Non-parametric + max-type correction)' diff --git a/examples/05_advanced/plot_localizer_simple_analysis.py b/examples/05_advanced/plot_localizer_simple_analysis.py index 12b1483440..f8ca0c7d0a 100644 --- a/examples/05_advanced/plot_localizer_simple_analysis.py +++ b/examples/05_advanced/plot_localizer_simple_analysis.py @@ -18,6 +18,7 @@ import matplotlib.pyplot as plt from nilearn import datasets from nilearn.input_data import NiftiMasker +from nilearn.image import get_data ############################################################################ @@ -65,7 +66,7 @@ display_mode='z', cut_coords=[z_slice], figure=fig) -masked_pvals = np.ma.masked_less(neg_log_pvals_anova_unmasked.get_data(), +masked_pvals = np.ma.masked_less(get_data(neg_log_pvals_anova_unmasked), threshold) title = ('Negative $\log_{10}$ p-values' diff --git a/examples/05_advanced/plot_neurovault_meta_analysis.py b/examples/05_advanced/plot_neurovault_meta_analysis.py index 399ad77f3a..945361a5a8 100644 --- a/examples/05_advanced/plot_neurovault_meta_analysis.py +++ b/examples/05_advanced/plot_neurovault_meta_analysis.py @@ -15,7 +15,7 @@ from nilearn.datasets import fetch_neurovault_ids from nilearn import plotting -from nilearn.image import new_img_like, load_img, math_img +from nilearn.image import new_img_like, load_img, math_img, get_data ###################################################################### @@ -88,7 +88,7 @@ def t_to_z(t_scores, deg_of_freedom): # Convert data, create new image. z_img = new_img_like( - t_img, t_to_z(t_img.get_data(), deg_of_freedom=deg_of_freedom)) + t_img, t_to_z(get_data(t_img), deg_of_freedom=deg_of_freedom)) z_imgs.append(z_img) diff --git a/nilearn/_utils/ndimage.py b/nilearn/_utils/ndimage.py index 237b04417e..fff3befc33 100644 --- a/nilearn/_utils/ndimage.py +++ b/nilearn/_utils/ndimage.py @@ -39,8 +39,8 @@ def largest_connected_component(volume): is done inplace to avoid big-endian issues with scipy ndimage module. """ - if hasattr(volume, "get_data") \ - or isinstance(volume, _basestring): + if (hasattr(volume, "get_data") or hasattr( + volume, "get_fdata") or isinstance(volume, _basestring)): raise ValueError('Please enter a valid numpy array. For images use\ largest_connected_component_img') # Get the new byteorder to handle issues like "Big-endian buffer not diff --git a/nilearn/_utils/niimg.py b/nilearn/_utils/niimg.py index 64c62221a4..ebadcd0878 100644 --- a/nilearn/_utils/niimg.py +++ b/nilearn/_utils/niimg.py @@ -14,6 +14,17 @@ from .compat import _basestring +def _get_data(img): + # copy-pasted from https://github.com/nipy/nibabel/blob/de44a105c1267b07ef9e28f6c35b31f851d5a005/nibabel/dataobj_images.py#L204 + # get_data is removed from nibabel because: + # see https://github.com/nipy/nibabel/wiki/BIAP8 + if img._data_cache is not None: + return img._data_cache + data = np.asanyarray(img._dataobj) + img._data_cache = data + return data + + def _safe_get_data(img, ensure_finite=False): """ Get the data in the image without having a side effect on the Nifti1Image object @@ -30,7 +41,7 @@ def _safe_get_data(img, ensure_finite=False): Returns ------- data: numpy array - get_data() return from Nifti image. + nilearn.image.get_data return from Nifti image. """ if hasattr(img, '_data_cache') and img._data_cache is None: # By loading directly dataobj, we prevent caching if the data is @@ -40,7 +51,7 @@ def _safe_get_data(img, ensure_finite=False): # that's why we invoke a forced call to the garbage collector gc.collect() - data = img.get_data() + data = _get_data(img) if ensure_finite: non_finite_mask = np.logical_not(np.isfinite(data)) if non_finite_mask.sum() > 0: # any non_finite_mask values? @@ -113,16 +124,16 @@ def load_niimg(niimg, dtype=None): " not compatible with nibabel format:\n" + short_repr(niimg)) - dtype = _get_target_dtype(niimg.get_data().dtype, dtype) + dtype = _get_target_dtype(_get_data(niimg).dtype, dtype) if dtype is not None: # Copyheader and set dtype in header if header exists if niimg.header is not None: - niimg = new_img_like(niimg, niimg.get_data().astype(dtype), + niimg = new_img_like(niimg, _get_data(niimg).astype(dtype), niimg.affine, copy_header=True) niimg.header.set_data_dtype(dtype) else: - niimg = new_img_like(niimg, niimg.get_data().astype(dtype), + niimg = new_img_like(niimg, _get_data(niimg).astype(dtype), niimg.affine) return niimg diff --git a/nilearn/_utils/niimg_conversions.py b/nilearn/_utils/niimg_conversions.py index 3ecf93a159..1466950dcb 100644 --- a/nilearn/_utils/niimg_conversions.py +++ b/nilearn/_utils/niimg_conversions.py @@ -18,6 +18,7 @@ from .compat import _basestring, izip from .exceptions import DimensionError +from .niimg import _get_data def _check_fov(img, affine, shape): @@ -72,7 +73,7 @@ def _index_img(img, index): """Helper function for check_niimg_4d.""" return new_img_like( - img, img.get_data()[:, :, :, index], img.affine, + img, _get_data(img)[:, :, :, index], img.affine, copy_header=True) @@ -178,8 +179,8 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. The '~' symbol is expanded to the user home folder. - If it is an object, check if the get_data() method - and affine attribute are present, raise TypeError otherwise. + If it is an object, check if the affine attribute present and that + nilearn.image.get_data returns a result, raise TypeError otherwise. ensure_ndim: integer {3, 4}, optional Indicate the dimensionality of the expected niimg. An @@ -208,7 +209,8 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed - that the returned object has get_data() method and affine attribute. + that the returned object has an affine attribute and that its data can + be retrieved with nilearn.image.get_data. Notes ----- @@ -266,7 +268,7 @@ def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, affine = niimg.affine niimg = new_img_like(niimg, data[:, :, :, 0], affine) if atleast_4d and len(niimg.shape) == 3: - data = niimg.get_data().view() + data = _get_data(niimg).view() data.shape = data.shape + (1, ) niimg = new_img_like(niimg, data, niimg.affine) @@ -286,8 +288,9 @@ def check_niimg_3d(niimg, dtype=None): niimg: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html If niimg is a string, consider it as a path to Nifti image and - call nibabel.load on it. If it is an object, check if the get_data() - method and affine attribute are present, raise TypeError otherwise. + call nibabel.load on it. + If it is an object, check if the affine attribute present and that + nilearn.image.get_data returns a result, raise TypeError otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the @@ -298,7 +301,8 @@ def check_niimg_3d(niimg, dtype=None): ------- result: 3D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed - that the returned object has get_data() method and affine attribute. + that the returned object has an affine attribute and that its data can + be retrieved with nilearn.image.get_data. Notes ----- @@ -322,8 +326,9 @@ def check_niimg_4d(niimg, return_iterator=False, dtype=None): If niimgs is an iterable, checks if data is really 4D. Then, considering that it is a list of niimg and load them one by one. If niimg is a string, consider it as a path to Nifti image and - call nibabel.load on it. If it is an object, check if the get_data() - method and affine attribute are present, raise an Exception otherwise. + call nibabel.load on it. + If it is an object, check if the affine attribute present and that + nilearn.image.get_data returns a result, raise TypeError otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the @@ -445,7 +450,7 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, target_shape = first_niimg.shape[:3] if dtype == None: - dtype = first_niimg.get_data().dtype + dtype = _get_data(first_niimg).dtype data = np.ndarray(target_shape + (sum(lengths), ), order="F", dtype=dtype) cur_4d_index = 0 @@ -460,7 +465,7 @@ def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, nii_str = "image #" + str(index) print("Concatenating {0}: {1}".format(index + 1, nii_str)) - data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data() + data[..., cur_4d_index:cur_4d_index + size] = _get_data(niimg) cur_4d_index += size return new_img_like(first_niimg, data, first_niimg.affine, copy_header=True) diff --git a/nilearn/_utils/param_validation.py b/nilearn/_utils/param_validation.py index 9fd04cd946..8ebca56ad8 100644 --- a/nilearn/_utils/param_validation.py +++ b/nilearn/_utils/param_validation.py @@ -10,6 +10,7 @@ f_classif) from .compat import _basestring +from .niimg import _get_data # Volume of a standard (MNI152) brain mask in mm^3 @@ -89,7 +90,7 @@ def _get_mask_volume(mask_img): """ affine = mask_img.affine prod_vox_dims = 1. * np.abs(np.linalg.det(affine[:3, :3])) - return prod_vox_dims * mask_img.get_data().astype(np.bool).sum() + return prod_vox_dims * _get_data(mask_img).astype(np.bool).sum() def _adjust_screening_percentile(screening_percentile, mask_img, diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 3897f95110..601f09920e 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -16,7 +16,7 @@ from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr from .._utils import check_niimg from .._utils.compat import _basestring -from ..image import new_img_like +from ..image import new_img_like, get_data _TALAIRACH_LEVELS = ['hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'] @@ -264,7 +264,7 @@ def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, if lateralized: return Bunch(maps=atlas_img, labels=names) - atlas = atlas_img.get_data() + atlas = get_data(atlas_img) labels = np.unique(atlas) # Build a mask of both halves of the brain @@ -1080,7 +1080,7 @@ def _separate_talairach_levels(atlas_img, labels, verbose=1): level_labels = {'*': 0} for region_nb, region in enumerate(labels[:, pos]): level_labels.setdefault(region, len(level_labels)) - level_img[atlas_img.get_data() == region_nb] = level_labels[ + level_img[get_data(atlas_img) == region_nb] = level_labels[ region] # shift this level to its own octet and add it to the new image level_img <<= 8 * pos @@ -1185,7 +1185,7 @@ def fetch_atlas_talairach(level_name, data_dir=None, verbose=1): atlas_img = check_niimg(atlas_file) with open(labels_file) as fp: labels = json.load(fp)[position][1] - level_data = (atlas_img.get_data() >> 8 * position) & 255 + level_data = (get_data(atlas_img) >> 8 * position) & 255 atlas_img = new_img_like(atlas_img, data=level_data) description = _get_dataset_descr( 'talairach_atlas').decode('utf-8').format(level_name) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 16316965c6..390b149c98 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -18,6 +18,7 @@ from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array from .._utils.exceptions import VisibleDeprecationWarning +from nilearn.image import get_data @deprecated("fetch_haxby_simple will be removed in future releases. " @@ -1317,7 +1318,7 @@ def _load_mixed_gambles(zmap_imgs): mask = [] for zmap_img in zmap_imgs: # load subject data - this_X = zmap_img.get_data() + this_X = get_data(zmap_img) affine = zmap_img.affine finite_mask = np.all(np.isfinite(this_X), axis=-1) this_mask = np.logical_and(np.all(this_X != 0, axis=-1), diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index b452d589bc..18b443e073 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -12,7 +12,7 @@ from .._utils import check_niimg, niimg from .._utils.exceptions import VisibleDeprecationWarning -from ..image import new_img_like +from ..image import new_img_like, get_data _package_directory = os.path.dirname(os.path.abspath(__file__)) # Useful for the very simple examples @@ -151,7 +151,7 @@ def load_mni152_brain_mask(): """ # Load MNI template target_img = load_mni152_template() - mask_voxels = (target_img.get_data() > 0).astype(int) + mask_voxels = (get_data(target_img) > 0).astype(int) mask_img = new_img_like(target_img, mask_voxels) return mask_img diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index a2ce5b8cf1..b21b3fa041 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -25,6 +25,7 @@ from nilearn._utils.compat import _basestring, _urllib from nilearn.datasets import utils, atlas +from nilearn.image import get_data def setup_mock(): @@ -542,11 +543,11 @@ def test_fetch_atlas_talairach(data_dir=tst.tmpdir): atlas._fetch_files = _mock_talairach_fetch_files level_values = np.ones((81, 3)) * [0, 1, 2] talairach = atlas.fetch_atlas_talairach('hemisphere', data_dir=tst.tmpdir) - assert_array_equal(talairach.maps.get_data().ravel(), + assert_array_equal(get_data(talairach.maps).ravel(), level_values.T.ravel()) assert_array_equal(talairach.labels, ['Background', 'b', 'a']) talairach = atlas.fetch_atlas_talairach('ba', data_dir=tst.tmpdir) - assert_array_equal(talairach.maps.get_data().ravel(), + assert_array_equal(get_data(talairach.maps).ravel(), level_values.ravel()) assert_raises(ValueError, atlas.fetch_atlas_talairach, 'bad_level') @@ -557,7 +558,7 @@ def test_fetch_atlas_pauli_2017(): data = atlas.fetch_atlas_pauli_2017('labels', data_dir) assert_equal(len(data.labels), 16) - values = nibabel.load(data.maps).get_data() + values = get_data(nibabel.load(data.maps)) assert_equal(len(np.unique(values)), 17) data = atlas.fetch_atlas_pauli_2017('prob', data_dir) diff --git a/nilearn/decoding/space_net.py b/nilearn/decoding/space_net.py index 368513a8ef..f9dab55013 100644 --- a/nilearn/decoding/space_net.py +++ b/nilearn/decoding/space_net.py @@ -37,6 +37,7 @@ from nilearn.masking import _unmask_from_to_3d_array from .space_net_solvers import (tvl1_solver, _graph_net_logistic, _graph_net_squared_loss) +from nilearn.image import get_data def _crop_mask(mask): @@ -770,7 +771,7 @@ def fit(self, X, y): self.Xstd_ = X.std(axis=0) self.Xstd_[self.Xstd_ < 1e-8] = 1 self.mask_img_ = self.masker_.mask_img_ - self.mask_ = self.mask_img_.get_data().astype(np.bool) + self.mask_ = get_data(self.mask_img_).astype(np.bool) n_samples, _ = X.shape y = np.array(y).copy() l1_ratios = self.l1_ratios diff --git a/nilearn/decoding/tests/test_same_api.py b/nilearn/decoding/tests/test_same_api.py index 2b8e9e5b46..093f99f3dc 100644 --- a/nilearn/decoding/tests/test_same_api.py +++ b/nilearn/decoding/tests/test_same_api.py @@ -23,6 +23,7 @@ tvl1_solver) from nilearn.decoding.space_net import (BaseSpaceNet, SpaceNetClassifier, SpaceNetRegressor) +from nilearn.image import get_data def _make_data(rng=None, masked=False, dim=(2, 2, 2)): @@ -147,7 +148,7 @@ def test_graph_net_and_tvl1_same_for_pure_l1_logistic(max_iter=20, y = y > 0. alpha = 1. / X.shape[0] X_, mask_ = to_niimgs(X, (2, 2, 2)) - mask = mask_.get_data().astype(np.bool).ravel() + mask = get_data(mask_).astype(np.bool).ravel() # results should be exactly the same for pure lasso a = _graph_net_logistic(X, y, alpha, 1., mask=mask, diff --git a/nilearn/decoding/tests/test_space_net.py b/nilearn/decoding/tests/test_space_net.py index bd18818bb9..d418a8b118 100644 --- a/nilearn/decoding/tests/test_space_net.py +++ b/nilearn/decoding/tests/test_space_net.py @@ -18,6 +18,7 @@ from nilearn._utils.param_validation import _adjust_screening_percentile from nilearn.decoding.space_net_solvers import (_graph_net_logistic, _graph_net_squared_loss) +from nilearn.image import get_data mni152_brain_mask = ( "/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz") @@ -125,7 +126,7 @@ def test_logistic_path_scores(): iris = load_iris() X, y = iris.data, iris.target _, mask = to_niimgs(X, [2, 2, 2]) - mask = mask.get_data().astype(np.bool) + mask = get_data(mask).astype(np.bool) alphas = [1., .1, .01] test_scores, best_w = logistic_path_scores( _graph_net_logistic, X, y, mask, alphas, .5, @@ -139,7 +140,7 @@ def test_squared_loss_path_scores(): iris = load_iris() X, y = iris.data, iris.target _, mask = to_niimgs(X, [2, 2, 2]) - mask = mask.get_data().astype(np.bool) + mask = get_data(mask).astype(np.bool) alphas = [1., .1, .01] test_scores, best_w = squared_loss_path_scores( _graph_net_squared_loss, X, y, mask, alphas, .5, @@ -160,7 +161,7 @@ def test_tv_regression_simple(): X += rng.randn(n, p) y = np.dot(X, W_init.ravel()) X, mask = to_niimgs(X, dim) - print("%s %s" % (X.shape, mask.get_data().sum())) + print("%s %s" % (X.shape, get_data(mask).sum())) alphas = [.1, 1.] for l1_ratio in [1.]: diff --git a/nilearn/decomposition/tests/test_canica.py b/nilearn/decomposition/tests/test_canica.py index 016ccf72c7..42646134a9 100644 --- a/nilearn/decomposition/tests/test_canica.py +++ b/nilearn/decomposition/tests/test_canica.py @@ -11,6 +11,7 @@ from nilearn.input_data import MultiNiftiMasker from nilearn.image import iter_img from nilearn.decomposition.tests.test_multi_pca import _tmp_dir +from nilearn.image import get_data def _make_data_from_components(components, affine, shape, rng=None, @@ -91,13 +92,13 @@ def test_canica_square_img(): canica = CanICA(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0., n_init=50) canica.fit(data) - maps = canica.components_img_.get_data() + maps = get_data(canica.components_img_) maps = np.rollaxis(maps, 3, 0) # FIXME: This could be done more efficiently, e.g. thanks to hungarian # Find pairs of matching components # compute the cross-correlation matrix between components - mask = mask_img.get_data() != 0 + mask = get_data(mask_img) != 0 K = np.corrcoef(components[:, mask.ravel()], maps[:, mask])[4:, :4] # K should be a permutation matrix, hence its coefficients @@ -135,7 +136,7 @@ def test_component_sign(): for _ in range(3): canica.fit(data) for mp in iter_img(canica.components_img_): - mp = mp.get_data() + mp = get_data(mp) assert_less_equal(-mp.min(), mp.max()) diff --git a/nilearn/decomposition/tests/test_dict_learning.py b/nilearn/decomposition/tests/test_dict_learning.py index 40229f0ae7..9422945756 100644 --- a/nilearn/decomposition/tests/test_dict_learning.py +++ b/nilearn/decomposition/tests/test_dict_learning.py @@ -6,7 +6,7 @@ write_tmp_imgs) from nilearn.decomposition.dict_learning import DictLearning from nilearn.decomposition.tests.test_canica import _make_canica_test_data -from nilearn.image import iter_img +from nilearn.image import iter_img, get_data from nilearn.input_data import NiftiMasker from nilearn.decomposition.tests.test_multi_pca import _tmp_dir @@ -14,7 +14,7 @@ def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) masker = NiftiMasker(mask_img=mask_img).fit() - mask = mask_img.get_data() != 0 + mask = get_data(mask_img) != 0 flat_mask = mask.ravel() dict_init = masker.inverse_transform(components[:, flat_mask]) dict_learning = DictLearning(n_components=4, random_state=0, @@ -30,7 +30,7 @@ def test_dict_learning(): for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) - maps[estimator] = estimator.components_img_.get_data() + maps[estimator] = get_data(estimator.components_img_) maps[estimator] = np.reshape( np.rollaxis(maps[estimator], 3, 0)[:, mask], (4, flat_mask.sum())) @@ -74,7 +74,7 @@ def test_component_sign(): smoothing_fwhm=0., alpha=1) dict_learning.fit(data) for mp in iter_img(dict_learning.components_img_): - mp = mp.get_data() + mp = get_data(mp) assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0])) diff --git a/nilearn/image/__init__.py b/nilearn/image/__init__.py index 833f1f684a..d730ec74a2 100644 --- a/nilearn/image/__init__.py +++ b/nilearn/image/__init__.py @@ -6,7 +6,7 @@ coord_transform from .image import high_variance_confounds, smooth_img, crop_img, \ mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img, \ - math_img, load_img, clean_img, largest_connected_component_img + math_img, load_img, clean_img, largest_connected_component_img, get_data from .image import new_img_like # imported this way to avoid circular imports from .._utils.niimg_conversions import concat_niimgs as concat_imgs from .._utils.niimg import copy_img @@ -15,5 +15,5 @@ 'smooth_img', 'crop_img', 'mean_img', 'reorder_img', 'swap_img_hemispheres', 'concat_imgs', 'copy_img', 'index_img', 'iter_img', 'new_img_like', 'threshold_img', - 'math_img', 'load_img', 'clean_img', + 'math_img', 'load_img', 'clean_img', 'get_data', 'largest_connected_component_img', 'coord_transform'] diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 61623200df..1db799bb3c 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -20,11 +20,30 @@ from .._utils import (check_niimg_4d, check_niimg_3d, check_niimg, as_ndarray, _repr_niimgs) from .._utils.niimg_conversions import _index_img, _check_same_fov -from .._utils.niimg import _safe_get_data +from .._utils.niimg import _safe_get_data, _get_data from .._utils.compat import _basestring from .._utils.param_validation import check_threshold +def get_data(img): + """Get the image data as a numpy array. + + Parameters + ---------- + img: Niimg-like object or iterable of Niimg-like objects + See http://nilearn.github.io/manipulating_images/input_output.html + + Returns + ------- + 3-d or 4-d numpy array depending on the shape of `img`. This function + preserves the type of the image data. If `img` is an in-memory Nifti image + it returns the image data array itself -- not a copy. + + """ + img = check_niimg(img) + return _get_data(img) + + def high_variance_confounds(imgs, n_confounds=5, percentile=2., detrend=True, mask_img=None): """ Return confounds signals extracted from input signals with highest @@ -81,7 +100,7 @@ def high_variance_confounds(imgs, n_confounds=5, percentile=2., else: # Load the data only if it doesn't need to be masked imgs = check_niimg_4d(imgs) - sigs = as_ndarray(imgs.get_data()) + sigs = as_ndarray(get_data(imgs)) # Not using apply_mask here saves memory in most cases. del imgs # help reduce memory consumption sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T @@ -266,7 +285,7 @@ def smooth_img(imgs, fwhm): for img in imgs: img = check_niimg(img) affine = img.affine - filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm, + filtered = _smooth_array(get_data(img), affine, fwhm=fwhm, ensure_finite=True, copy=True) ret.append(new_img_like(img, filtered, affine, copy_header=True)) @@ -308,7 +327,7 @@ def _crop_img_to(img, slices, copy=True): img = check_niimg(img) - data = img.get_data() + data = get_data(img) affine = img.affine cropped_data = data[tuple(slices)] @@ -368,7 +387,7 @@ def crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False): """ img = check_niimg(img) - data = img.get_data() + data = get_data(img) infinity_norm = max(-data.min(), data.max()) passes_threshold = np.logical_or(data < -rtol * infinity_norm, data > rtol * infinity_norm) @@ -448,7 +467,7 @@ def _compute_mean(imgs, target_affine=None, target_affine=target_affine, target_shape=target_shape, copy=False) affine = mean_data.affine - mean_data = mean_data.get_data() + mean_data = get_data(mean_data) if smooth: nan_mask = np.isnan(mean_data) @@ -565,7 +584,7 @@ def swap_img_hemispheres(img): img = reorder_img(img) # create swapped nifti object - out_img = new_img_like(img, img.get_data()[::-1], img.affine, + out_img = new_img_like(img, get_data(img)[::-1], img.affine, copy_header=True) return out_img @@ -665,9 +684,11 @@ def new_img_like(ref_niimg, data, affine=None, copy_header=False): orig_ref_niimg = ref_niimg if (not isinstance(ref_niimg, _basestring) and not hasattr(ref_niimg, 'get_data') + and not hasattr(ref_niimg, 'get_fdata') and hasattr(ref_niimg, '__iter__')): ref_niimg = ref_niimg[0] - if not (hasattr(ref_niimg, 'get_data') + if not ((hasattr(ref_niimg, 'get_data') + or hasattr(ref_niimg, 'get_fdata')) and hasattr(ref_niimg, 'affine')): if isinstance(ref_niimg, _basestring): ref_niimg = nibabel.load(ref_niimg) @@ -972,7 +993,7 @@ def clean_img(imgs, sessions=None, detrend=True, standardize=True, if mask_img is not None: signals = masking.apply_mask(imgs_, mask_img) else: - signals = imgs_.get_data().reshape(-1, imgs_.shape[-1]).T + signals = get_data(imgs_).reshape(-1, imgs_.shape[-1]).T # Clean signal data = signal.clean( @@ -1002,8 +1023,8 @@ def load_img(img, wildcards=True, dtype=None): If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. The '~' symbol is expanded to the user home folder. - If it is an object, check if get_data() - and affine attributes are present, raise TypeError otherwise. + If it is an object, check if affine attribute is present, raise + TypeError otherwise. wildcards: bool, optional Use niimg as a regular expression to get a list of matching input @@ -1022,7 +1043,8 @@ def load_img(img, wildcards=True, dtype=None): ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed - that the returned object has get_data() and affine attributes. + that the returned object has an affine attributes and that + nilearn.image.get_data returns its data. """ return check_niimg(img, wildcards=wildcards, dtype=dtype) diff --git a/nilearn/image/resampling.py b/nilearn/image/resampling.py index a699351341..4b1b97472d 100644 --- a/nilearn/image/resampling.py +++ b/nilearn/image/resampling.py @@ -16,6 +16,7 @@ from .image import crop_img from .. import _utils from .._utils.compat import _basestring +from .._utils.niimg import _get_data ############################################################################### # Affine utils @@ -207,7 +208,7 @@ def get_mask_bounds(img): """ img = _utils.check_niimg_3d(img) - mask = _utils.numpy_conversions._asarray(img.get_data(), dtype=np.bool) + mask = _utils.numpy_conversions._asarray(_get_data(img), dtype=np.bool) affine = img.affine (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine) slices = ndimage.find_objects(mask) @@ -444,7 +445,7 @@ def resample_img(img, target_affine=None, target_shape=None, # We now know that some resampling must be done. # The value of "copy" is of no importance: output is always a separate # array. - data = img.get_data() + data = _get_data(img) # Get a bounding box for the transformed data # Embed target_affine in 4x4 shape if necessary @@ -562,7 +563,7 @@ def resample_img(img, target_affine=None, target_shape=None, # ensure the source image being placed isn't larger than the dest subset_indices = tuple(slice(0, s.stop-s.start) for s in slices) - resampled_data[slices] = cropped_img.get_data()[subset_indices] + resampled_data[slices] = _get_data(cropped_img)[subset_indices] else: # If A is diagonal, ndimage.affine_transform is clever enough to use a # better algorithm. @@ -706,7 +707,7 @@ def reorder_img(img, resample=None): interpolation=resample) axis_numbers = np.argmax(np.abs(A), axis=0) - data = img.get_data() + data = _get_data(img) while not np.all(np.sort(axis_numbers) == axis_numbers): first_inversion = np.argmax(np.diff(axis_numbers)<0) axis1 = first_inversion + 1 diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index fe78003965..c2681ddf9c 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -7,6 +7,8 @@ import platform import os import sys +import tempfile + import nibabel from nibabel import Nifti1Image import numpy as np @@ -21,9 +23,10 @@ from nilearn._utils import testing, niimg_conversions, data_gen from nilearn.image import new_img_like from nilearn.image import threshold_img -from nilearn.image import iter_img +from nilearn.image import iter_img, index_img from nilearn.image import math_img from nilearn.image import largest_connected_component_img +from nilearn.image import get_data try: import pandas as pd @@ -36,6 +39,25 @@ datadir = os.path.join(currdir, 'data') +def test_get_data(): + img, *_ = data_gen.generate_fake_fmri(shape=(10, 11, 12)) + data = get_data(img) + assert data.shape == img.shape + assert data is img._data_cache + mask_img = new_img_like(img, data > 0) + data = get_data(mask_img) + assert data.dtype == np.dtype('int8') + img_3d = index_img(img, 0) + with tempfile.TemporaryDirectory() as tempdir: + filename = os.path.join(tempdir, 'img_{}.nii.gz') + img_3d.to_filename(filename.format('a')) + img_3d.to_filename(filename.format('b')) + data = get_data(filename.format('a')) + assert len(data.shape) == 3 + data = get_data(filename.format('*')) + assert len(data.shape) == 4 + + def test_high_variance_confounds(): # See also test_signals.test_high_variance_confounds() # There is only tests on what is added by image.high_variance_confounds() @@ -177,7 +199,7 @@ def test_smooth_img(): # Test output equal when fwhm=None and fwhm=0 out_fwhm_none = image.smooth_img(img1, fwhm=None) out_fwhm_zero = image.smooth_img(img1, fwhm=0.) - assert_array_equal(out_fwhm_none.get_data(), out_fwhm_zero.get_data()) + assert_array_equal(get_data(out_fwhm_none), get_data(out_fwhm_zero)) data1 = np.zeros((10, 11, 12)) data1[2:4, 1:5, 3:6] = 1 @@ -200,7 +222,7 @@ def test__crop_img_to(): new_origin = np.array((4, 3, 2)) * np.array((2, 1, 3)) # check that correct part was extracted: - assert_true((cropped_img.get_data() == 1).all()) + assert_true((get_data(cropped_img) == 1).all()) assert_true(cropped_img.shape == (2, 4, 3)) # check that affine was adjusted correctly @@ -208,12 +230,12 @@ def test__crop_img_to(): # check that data was really not copied data[2:4, 1:5, 3:6] = 2 - assert_true((cropped_img.get_data() == 2).all()) + assert_true((get_data(cropped_img) == 2).all()) # check that copying works copied_cropped_img = image._crop_img_to(img, slices) data[2:4, 1:5, 3:6] = 1 - assert_true((copied_cropped_img.get_data() == 2).all()) + assert_true((get_data(copied_cropped_img) == 2).all()) def test_crop_img(): @@ -229,7 +251,7 @@ def test_crop_img(): # check that correct part was extracted: # This also corrects for padding - assert_true((cropped_img.get_data()[1:-1, 1:-1, 1:-1] == 1).all()) + assert_true((get_data(cropped_img)[1:-1, 1:-1, 1:-1] == 1).all()) assert_true(cropped_img.shape == (2 + 2, 4 + 2, 3 + 2)) @@ -268,7 +290,7 @@ def test_mean_img(): arrays = list() # Ground-truth: for img in imgs: - img = img.get_data() + img = get_data(img) if img.ndim == 4: img = np.mean(img, axis=-1) arrays.append(img) @@ -276,20 +298,20 @@ def test_mean_img(): mean_img = image.mean_img(imgs) assert_array_equal(mean_img.affine, affine) - assert_array_equal(mean_img.get_data(), truth) + assert_array_equal(get_data(mean_img), truth) # Test with files with testing.write_tmp_imgs(*imgs) as imgs: mean_img = image.mean_img(imgs) assert_array_equal(mean_img.affine, affine) if X64: - assert_array_equal(mean_img.get_data(), truth) + assert_array_equal(get_data(mean_img), truth) else: # We don't really understand but arrays are not # exactly equal on 32bit. Given that you can not do # much real world data analysis with nilearn on a # 32bit machine it is not worth investigating more - assert_allclose(mean_img.get_data(), truth, + assert_allclose(get_data(mean_img), truth, rtol=np.finfo(truth.dtype).resolution, atol=0) @@ -307,8 +329,8 @@ def test_mean_img_resample(): target_affine=target_affine) resampled_mean_image = resampling.resample_img(mean_img, target_affine=target_affine) - assert_array_equal(resampled_mean_image.get_data(), - mean_img_with_resampling.get_data()) + assert_array_equal(get_data(resampled_mean_image), + get_data(mean_img_with_resampling)) assert_array_equal(resampled_mean_image.affine, mean_img_with_resampling.affine) assert_array_equal(mean_img_with_resampling.affine, target_affine) @@ -319,15 +341,15 @@ def test_swap_img_hemispheres(): data = np.random.randn(4, 5, 7) data_img = nibabel.Nifti1Image(data, np.eye(4)) image.swap_img_hemispheres(data_img) - np.testing.assert_array_equal(data_img.get_data(), data) + np.testing.assert_array_equal(get_data(data_img), data) # swapping operations work np.testing.assert_array_equal( # one turn - image.swap_img_hemispheres(data_img).get_data(), + get_data(image.swap_img_hemispheres(data_img)), data[::-1]) np.testing.assert_array_equal( # two turns -> back to original data - image.swap_img_hemispheres( - image.swap_img_hemispheres(data_img)).get_data(), + get_data(image.swap_img_hemispheres( + image.swap_img_hemispheres(data_img))), data) @@ -355,8 +377,8 @@ def test_index_img(): (np.arange(fourth_dim_size) % 3) == 1]) for i in tested_indices: this_img = image.index_img(img_4d, i) - expected_data_3d = img_4d.get_data()[..., i] - assert_array_equal(this_img.get_data(), + expected_data_3d = get_data(img_4d)[..., i] + assert_array_equal(get_data(this_img), expected_data_3d) assert_array_equal(this_img.affine, img_4d.affine) @@ -389,8 +411,8 @@ def test_pd_index_img(): np_index_img = image.index_img(img_4d, arr) pd_index_img = image.index_img(img_4d, df) - assert_array_equal(np_index_img.get_data(), - pd_index_img.get_data()) + assert_array_equal(get_data(np_index_img), + get_data(pd_index_img)) def test_iter_img(): @@ -408,15 +430,15 @@ def test_iter_img(): img_4d, _ = data_gen.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): - expected_data_3d = img_4d.get_data()[..., i] - assert_array_equal(img.get_data(), + expected_data_3d = get_data(img_4d)[..., i] + assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): - expected_data_3d = img_4d.get_data()[..., i] - assert_array_equal(img.get_data(), + expected_data_3d = get_data(img_4d)[..., i] + assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_4d_filename" on windows @@ -424,15 +446,15 @@ def test_iter_img(): img_3d_list = list(image.iter_img(img_4d)) for i, img in enumerate(image.iter_img(img_3d_list)): - expected_data_3d = img_4d.get_data()[..., i] - assert_array_equal(img.get_data(), + expected_data_3d = get_data(img_4d)[..., i] + assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): - expected_data_3d = img_4d.get_data()[..., i] - assert_array_equal(img.get_data(), + expected_data_3d = get_data(img_4d)[..., i] + assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_3d_filename" on windows @@ -446,7 +468,7 @@ def test_new_img_like_mgz(): """ ref_img = nibabel.load(os.path.join(datadir, 'test.mgz')) - data = np.ones(ref_img.get_data().shape, dtype=np.bool) + data = np.ones(get_data(ref_img).shape, dtype=np.bool) affine = ref_img.affine new_img_like(ref_img, data, affine, copy_header=False) @@ -458,12 +480,12 @@ def test_new_img_like(): affine = np.diag((4, 3, 2, 1)) img = nibabel.Nifti1Image(data, affine=affine) img2 = new_img_like([img, ], data) - np.testing.assert_array_equal(img.get_data(), img2.get_data()) + np.testing.assert_array_equal(get_data(img), get_data(img2)) # test_new_img_like_with_nifti2image_copy_header img_nifti2 = nibabel.Nifti2Image(data, affine=affine) img2_nifti2 = new_img_like([img_nifti2, ], data, copy_header=True) - np.testing.assert_array_equal(img_nifti2.get_data(), img2_nifti2.get_data()) + np.testing.assert_array_equal(get_data(img_nifti2), get_data(img2_nifti2)) def test_validity_threshold_value_in_threshold_img(): @@ -508,23 +530,23 @@ def test_threshold_img_copy(): thresholded = threshold_img(img_ones, 2) # threshold 2 > 1 # Original img_ones should have all ones. - assert_array_equal(img_ones.get_data(), np.ones((10, 10, 10, 10))) + assert_array_equal(get_data(img_ones), np.ones((10, 10, 10, 10))) # Thresholded should have all zeros. - assert_array_equal(thresholded.get_data(), np.zeros((10, 10, 10, 10))) + assert_array_equal(get_data(thresholded), np.zeros((10, 10, 10, 10))) # Check that not copying does mutate. img_to_mutate = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) thresholded = threshold_img(img_to_mutate, 2, copy=False) # Check that original mutates - assert_array_equal(img_to_mutate.get_data(), np.zeros((10, 10, 10, 10))) + assert_array_equal(get_data(img_to_mutate), np.zeros((10, 10, 10, 10))) # And that returned value is also thresholded. - assert_array_equal(img_to_mutate.get_data(), thresholded.get_data()) + assert_array_equal(get_data(img_to_mutate), get_data(thresholded)) def test_isnan_threshold_img_data(): shape = (10, 10, 10) maps, _ = data_gen.generate_maps(shape, n_regions=2) - data = maps.get_data() + data = get_data(maps) data[:, :, 0] = np.nan maps_img = nibabel.Nifti1Image(data, np.eye(4)) @@ -565,8 +587,8 @@ def test_math_img(): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: result = math_img(formula, img1=imgs[0], img2=imgs[1]) - assert_array_equal(result.get_data(), - expected_result.get_data()) + assert_array_equal(get_data(result), + get_data(expected_result)) assert_array_equal(result.affine, expected_result.affine) assert_equal(result.shape, expected_result.shape) @@ -587,7 +609,7 @@ def test_clean_img(): data_flat_ = signal.clean( data_flat, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) - np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1), + np.testing.assert_almost_equal(get_data(data_img_).T.reshape(100, -1), data_flat_) # if NANs data[:, 9, 9] = np.nan @@ -595,7 +617,7 @@ def test_clean_img(): data[:, 5, 5] = np.inf nan_img = nibabel.Nifti1Image(data, np.eye(4)) clean_im = image.clean_img(nan_img, ensure_finite=True) - assert_true(np.any(np.isfinite(clean_im.get_data())), True) + assert_true(np.any(np.isfinite(get_data(clean_im))), True) # test_clean_img_passing_nifti2image data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4)) @@ -609,8 +631,8 @@ def test_clean_img(): # Checks that output with full mask and without is equal data_img_ = image.clean_img(img) - np.testing.assert_almost_equal(data_img_.get_data(), - data_img_mask_.get_data()) + np.testing.assert_almost_equal(get_data(data_img_), + get_data(data_img_mask_)) def test_largest_cc_img(): @@ -649,9 +671,9 @@ def test_largest_cc_img(): assert_raises(DimensionError, largest_connected_component_img, img_4D) # tests adapted to non-native endian data dtype - img1_change_dtype = nibabel.Nifti1Image(img1.get_data().astype('>f8'), + img1_change_dtype = nibabel.Nifti1Image(get_data(img1).astype('>f8'), affine=img1.affine) - img2_change_dtype = nibabel.Nifti1Image(img2.get_data().astype('>f8'), + img2_change_dtype = nibabel.Nifti1Image(get_data(img2).astype('>f8'), affine=img2.affine) for create_files in (False, True): @@ -673,7 +695,7 @@ def test_largest_cc_img(): out_native = largest_connected_component_img(img1) out_non_native = largest_connected_component_img(img1_change_dtype) - np.testing.assert_equal(out_native.get_data(), out_non_native.get_data()) + np.testing.assert_equal(get_data(out_native), get_data(out_non_native)) def test_new_img_like_mgh_image(): diff --git a/nilearn/image/tests/test_resampling.py b/nilearn/image/tests/test_resampling.py index 9cff7ed923..950886caaa 100644 --- a/nilearn/image/tests/test_resampling.py +++ b/nilearn/image/tests/test_resampling.py @@ -20,6 +20,7 @@ from nilearn.image.resampling import BoundingBoxError from nilearn.image.image import _pad_array, crop_img from nilearn._utils import testing +from nilearn.image import get_data ############################################################################### @@ -49,7 +50,7 @@ def test_identity_resample(): affine[:3, -1] = 0.5 * np.array(shape[:3]) rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine, interpolation='nearest') - np.testing.assert_almost_equal(data, rot_img.get_data()) + np.testing.assert_almost_equal(data, get_data(rot_img)) # Smoke-test with a list affine rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine.tolist(), @@ -58,7 +59,7 @@ def test_identity_resample(): rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine[:3, :3], interpolation='nearest') - np.testing.assert_almost_equal(data, rot_img.get_data()) + np.testing.assert_almost_equal(data, get_data(rot_img)) # Test with non native endian data @@ -67,14 +68,14 @@ def test_identity_resample(): rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine), target_affine=affine.tolist(), interpolation=interpolation) - np.testing.assert_almost_equal(data, rot_img.get_data()) + np.testing.assert_almost_equal(data, get_data(rot_img)) # Test with little endian data (' data.min()) & (no_clip_data < data.max())) @@ -628,11 +631,11 @@ def test_reorder_img(): new_affine = from_matrix_vector(rot, b) rot_img = resample_img(ref_img, target_affine=new_affine) np.testing.assert_array_equal(rot_img.affine, new_affine) - np.testing.assert_array_equal(rot_img.get_data().shape, shape) + np.testing.assert_array_equal(get_data(rot_img).shape, shape) reordered_img = reorder_img(rot_img) np.testing.assert_array_equal(reordered_img.affine[:3, :3], np.eye(3)) - np.testing.assert_almost_equal(reordered_img.get_data(), + np.testing.assert_almost_equal(get_data(reordered_img), data) # Create a non-diagonal affine, and check that we raise a sensible @@ -651,8 +654,8 @@ def test_reorder_img(): resampled_img = resample_img(ref_img, target_affine=reordered_img.affine, interpolation=interpolation) - np.testing.assert_array_equal(reordered_img.get_data(), - resampled_img.get_data()) + np.testing.assert_array_equal(get_data(reordered_img), + get_data(resampled_img)) # Make sure invalid resample argument is included in the error message interpolation = 'an_invalid_interpolation' @@ -676,8 +679,8 @@ def test_reorder_img(): img2 = reorder_img(img) # Check that img has not been changed np.testing.assert_array_equal(img.affine, orig_img.affine) - np.testing.assert_array_equal(img.get_data(), - orig_img.get_data()) + np.testing.assert_array_equal(get_data(img), + get_data(orig_img)) # Test that the affine is indeed diagonal: np.testing.assert_array_equal(img2.affine[:3, :3], np.diag(np.diag(img2.affine[:3, :3]))) @@ -706,7 +709,7 @@ def _get_resampled_img(dtype): img_1 = _get_resampled_img('f8') - np.testing.assert_equal(img_1.get_data(), img_2.get_data()) + np.testing.assert_equal(get_data(img_1), get_data(img_2)) def test_reorder_img_mirror(): diff --git a/nilearn/input_data/multi_nifti_masker.py b/nilearn/input_data/multi_nifti_masker.py index b70f44297e..5aa65bb826 100644 --- a/nilearn/input_data/multi_nifti_masker.py +++ b/nilearn/input_data/multi_nifti_masker.py @@ -18,6 +18,7 @@ from .._utils.compat import _basestring, izip from .._utils.niimg_conversions import _iter_check_niimg from .nifti_masker import NiftiMasker, filter_and_mask +from nilearn.image import get_data class MultiNiftiMasker(NiftiMasker, CacheMixin): @@ -227,7 +228,7 @@ def fit(self, imgs=None, y=None): else: self.affine_ = self.mask_img_.affine # Load data in memory - self.mask_img_.get_data() + get_data(self.mask_img_) return self def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): diff --git a/nilearn/input_data/nifti_maps_masker.py b/nilearn/input_data/nifti_maps_masker.py index c9ff8525ab..08cf949ef0 100644 --- a/nilearn/input_data/nifti_maps_masker.py +++ b/nilearn/input_data/nifti_maps_masker.py @@ -11,6 +11,7 @@ from .._utils.niimg_conversions import _check_same_fov from .. import image from .base_masker import filter_and_extract, BaseMasker +from nilearn.image import get_data class _ExtractionFunctor(object): @@ -291,7 +292,7 @@ def transform_single_imgs(self, imgs, confounds=None): # Check if there is an overlap. # If float, we set low values to 0 - data = self._resampled_maps_img_.get_data() + data = get_data(self._resampled_maps_img_) dtype = data.dtype if dtype.kind == 'f': data[data < np.finfo(dtype).eps] = 0. diff --git a/nilearn/input_data/nifti_masker.py b/nilearn/input_data/nifti_masker.py index cd1a29fa36..7120fb333d 100644 --- a/nilearn/input_data/nifti_masker.py +++ b/nilearn/input_data/nifti_masker.py @@ -18,6 +18,7 @@ from .._utils.class_inspect import get_params from .._utils.niimg import img_data_dtype from .._utils.niimg_conversions import _check_same_fov +from nilearn.image import get_data class _ExtractionFunctor(object): @@ -344,7 +345,7 @@ def fit(self, imgs=None, y=None): else: # resample image to mask affine self.affine_ = self.mask_img_.affine # Load data in memory - self.mask_img_.get_data() + get_data(self.mask_img_) if self.verbose > 10: print("[%s.fit] Finished fit" % self.__class__.__name__) diff --git a/nilearn/input_data/tests/test_multi_nifti_masker.py b/nilearn/input_data/tests/test_multi_nifti_masker.py index ef46fe0814..7953fb9f58 100644 --- a/nilearn/input_data/tests/test_multi_nifti_masker.py +++ b/nilearn/input_data/tests/test_multi_nifti_masker.py @@ -19,6 +19,7 @@ from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs from nilearn.input_data.multi_nifti_masker import MultiNiftiMasker +from nilearn.image import get_data def test_auto_mask(): @@ -41,7 +42,7 @@ def test_auto_mask(): img2 = Nifti1Image(data2, np.eye(4)) masker.fit([[img, img2]]) - assert_array_equal(masker.mask_img_.get_data(), + assert_array_equal(get_data(masker.mask_img_), np.logical_or(data, data2)) # Smoke test the transform masker.transform([[img, ]]) @@ -67,7 +68,7 @@ def test_nan(): img = Nifti1Image(data, np.eye(4)) masker = MultiNiftiMasker(mask_args=dict(opening=0)) masker.fit([img]) - mask = masker.mask_img_.get_data() + mask = get_data(masker.mask_img_) assert_true(mask[1:-1, 1:-1, 1:-1].all()) assert_false(mask[0].any()) assert_false(mask[:, 0].any()) @@ -126,7 +127,7 @@ def test_joblib_cache(): masker = MultiNiftiMasker(mask_img=filename) masker.fit() mask_hash = hash(masker.mask_img_) - masker.mask_img_.get_data() + get_data(masker.mask_img_) assert_true(mask_hash == hash(masker.mask_img_)) # enables to delete "filename" on windows del masker @@ -182,8 +183,8 @@ def test_compute_multi_gray_matter_mask(): mask_ref = np.zeros((9, 9, 5)) mask_ref[2:7, 2:7, 2] = 1 - np.testing.assert_array_equal(mask.get_data(), mask_ref) - np.testing.assert_array_equal(mask2.get_data(), mask_ref) + np.testing.assert_array_equal(get_data(mask), mask_ref) + np.testing.assert_array_equal(get_data(mask2), mask_ref) def test_dtype(): diff --git a/nilearn/input_data/tests/test_nifti_labels_masker.py b/nilearn/input_data/tests/test_nifti_labels_masker.py index fe936028a6..9a34dfbb6f 100644 --- a/nilearn/input_data/tests/test_nifti_labels_masker.py +++ b/nilearn/input_data/tests/test_nifti_labels_masker.py @@ -14,6 +14,7 @@ from nilearn._utils import testing, as_ndarray, data_gen from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less +from nilearn.image import get_data def generate_random_img(shape, length=1, affine=np.eye(4), @@ -113,7 +114,7 @@ def test_nifti_labels_masker_with_nans_and_infs(): affine=np.eye(4), n_regions=n_regions) # nans - mask_data = mask_img.get_data() + mask_data = get_data(mask_img) mask_data[:, :, 7] = np.nan mask_data[:, :, 4] = np.inf mask_img = nibabel.Nifti1Image(mask_data, np.eye(4)) @@ -206,7 +207,7 @@ def test_nifti_labels_masker_resampling(): masker.labels_img_.affine) assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3]) - uniq_labels = np.unique(masker.labels_img_.get_data()) + uniq_labels = np.unique(get_data(masker.labels_img_)) assert_equal(uniq_labels[0], 0) assert_equal(len(uniq_labels) - 1, n_regions) @@ -254,7 +255,7 @@ def test_nifti_labels_masker_resampling(): resampling_target=resampling_target) transformed = masker.fit_transform(fmri_img) resampled_labels_img = masker._resampled_labels_img_ - n_resampled_labels = len(np.unique(resampled_labels_img.get_data())) + n_resampled_labels = len(np.unique(get_data(resampled_labels_img))) assert_equal(n_resampled_labels - 1, transformed.shape[1]) # inverse transform compressed_img = masker.inverse_transform(transformed) @@ -264,8 +265,8 @@ def test_nifti_labels_masker_resampling(): transformed2 = masker.fit_transform(fmri_img) # inverse transform again compressed_img2 = masker.inverse_transform(transformed2) - np.testing.assert_array_equal(compressed_img.get_data(), - compressed_img2.get_data()) + np.testing.assert_array_equal(get_data(compressed_img), + get_data(compressed_img2)) def test_standardization(): diff --git a/nilearn/input_data/tests/test_nifti_maps_masker.py b/nilearn/input_data/tests/test_nifti_maps_masker.py index e931220999..2ca18e2a54 100644 --- a/nilearn/input_data/tests/test_nifti_maps_masker.py +++ b/nilearn/input_data/tests/test_nifti_maps_masker.py @@ -14,6 +14,7 @@ from nilearn._utils import testing, as_ndarray, data_gen from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less, assert_raises_regex +from nilearn.image import get_data def generate_random_img(shape, length=1, affine=np.eye(4), @@ -128,8 +129,8 @@ def test_nifti_maps_masker_with_nans(): affine=np.eye(4)) # nans - maps_data = maps_img.get_data() - mask_data = mask_img.get_data() + maps_data = get_data(maps_img) + mask_data = get_data(mask_img) maps_data[:, 9, 9] = np.nan maps_data[:, 5, 5] = np.inf diff --git a/nilearn/input_data/tests/test_nifti_masker.py b/nilearn/input_data/tests/test_nifti_masker.py index 94eb09aa54..6596e52bdc 100644 --- a/nilearn/input_data/tests/test_nifti_masker.py +++ b/nilearn/input_data/tests/test_nifti_masker.py @@ -24,6 +24,7 @@ from nilearn._utils.testing import assert_raises_regex from nilearn.image import index_img from nilearn.input_data.nifti_masker import NiftiMasker, filter_and_mask +from nilearn.image import get_data def test_auto_mask(): @@ -97,7 +98,7 @@ def test_nan(): img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(mask_args=dict(opening=0)) masker.fit(img) - mask = masker.mask_img_.get_data() + mask = get_data(masker.mask_img_) assert_true(mask[1:-1, 1:-1, 1:-1].all()) assert_false(mask[0].any()) assert_false(mask[:, 0].any()) @@ -117,7 +118,7 @@ def test_matrix_orientation(): masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True) timeseries = masker.fit_transform(fmri) assert(timeseries.shape[0] == fmri.shape[3]) - assert(timeseries.shape[1] == mask.get_data().sum()) + assert(timeseries.shape[1] == get_data(mask).sum()) std = timeseries.std(axis=0) assert(std.shape[0] == timeseries.shape[1]) # paranoid assert(not np.any(std < 0.1)) @@ -127,7 +128,7 @@ def test_matrix_orientation(): masker.fit() timeseries = masker.transform(fmri) recovered = masker.inverse_transform(timeseries) - np.testing.assert_array_almost_equal(recovered.get_data(), fmri.get_data()) + np.testing.assert_array_almost_equal(get_data(recovered), get_data(fmri)) def test_mask_3d(): @@ -164,7 +165,7 @@ def test_mask_4d(): masker.fit() data_trans = masker.transform(data_imgs) data_trans_img = index_img(data_img_4d, sample_mask) - data_trans_direct = data_trans_img.get_data()[mask_bool, :] + data_trans_direct = get_data(data_trans_img)[mask_bool, :] data_trans_direct = np.swapaxes(data_trans_direct, 0, 1) assert_array_equal(data_trans, data_trans_direct) @@ -242,7 +243,7 @@ def test_joblib_cache(): masker = NiftiMasker(mask_img=filename) masker.fit() mask_hash = hash(masker.mask_img_) - masker.mask_img_.get_data() + get_data(masker.mask_img_) assert_true(mask_hash == hash(masker.mask_img_)) # Test a tricky issue with memmapped joblib.memory that makes @@ -290,27 +291,27 @@ def test_compute_epi_mask(): # With an array with no zeros, exclude_zeros should not make # any difference - np.testing.assert_array_equal(mask1.get_data(), mask2.get_data()) + np.testing.assert_array_equal(get_data(mask1), get_data(mask2)) # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30, 3)) - mean_image2[3:12, 3:12, :] = mean_image.get_data() + mean_image2[3:12, 3:12, :] = get_data(mean_image) mean_image2 = Nifti1Image(mean_image2, np.eye(4)) masker3 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False, exclude_zeros=True)) masker3.fit(mean_image2) mask3 = masker3.mask_img_ - np.testing.assert_array_equal(mask1.get_data(), - mask3.get_data()[3:12, 3:12]) + np.testing.assert_array_equal(get_data(mask1), + get_data(mask3)[3:12, 3:12]) # However, without exclude_zeros, it does masker4 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False)) masker4.fit(mean_image2) mask4 = masker4.mask_img_ - assert_false(np.allclose(mask1.get_data(), - mask4.get_data()[3:12, 3:12])) + assert_false(np.allclose(get_data(mask1), + get_data(mask4)[3:12, 3:12])) def test_compute_gray_matter_mask(): @@ -333,8 +334,8 @@ def test_compute_gray_matter_mask(): mask_ref = np.zeros((9, 9, 5)) mask_ref[2:7, 2:7, 2] = 1 - np.testing.assert_array_equal(mask1.get_data(), mask_ref) - np.testing.assert_array_equal(mask2.get_data(), mask_ref) + np.testing.assert_array_equal(get_data(mask1), mask_ref) + np.testing.assert_array_equal(get_data(mask2), mask_ref) def test_filter_and_mask_error(): diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index 0df537ec1c..b141a13e58 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -4,6 +4,7 @@ from nilearn.input_data import NiftiSpheresMasker from nilearn._utils.testing import assert_raises_regex from nose.tools import assert_false +from nilearn.image import get_data def test_seed_extraction(): @@ -38,7 +39,7 @@ def test_sphere_extraction(): masker.fit() s = masker.transform(img) assert_array_equal(s[:, 0], - np.mean(data[np.logical_and(mask, mask_img.get_data())], + np.mean(data[np.logical_and(mask, get_data(mask_img))], axis=0)) diff --git a/nilearn/masking.py b/nilearn/masking.py index 6708151537..aa92504208 100644 --- a/nilearn/masking.py +++ b/nilearn/masking.py @@ -15,6 +15,7 @@ from ._utils.cache_mixin import cache from ._utils.ndimage import largest_connected_component, get_border_data from ._utils.niimg import _safe_get_data, img_data_dtype +from nilearn.image import get_data class MaskWarning(UserWarning): @@ -42,7 +43,7 @@ def _load_mask_img(mask_img, allow_empty=False): boolean version of the mask """ mask_img = _utils.check_niimg_3d(mask_img) - mask = mask_img.get_data() + mask = get_data(mask_img) values = np.unique(mask) if len(values) == 1: @@ -570,12 +571,12 @@ def compute_gray_matter_mask(target_img, threshold=.5, template = load_mni152_brain_mask() dtype = img_data_dtype(target_img) template = new_img_like(template, - template.get_data().astype(dtype)) + get_data(template).astype(dtype)) from .image.resampling import resample_to_img resampled_template = cache(resample_to_img, memory)(template, target_img) - mask = resampled_template.get_data() >= threshold + mask = get_data(resampled_template) >= threshold mask, affine = _post_process_mask(mask, target_img.affine, opening=opening, connected=connected, @@ -721,7 +722,7 @@ def _apply_mask_fmri(imgs, mask_img, dtype='f', mask_img = _utils.check_niimg_3d(mask_img) mask_affine = mask_img.affine - mask_data = _utils.as_ndarray(mask_img.get_data(), + mask_data = _utils.as_ndarray(get_data(mask_img), dtype=np.bool) if smoothing_fwhm is not None: diff --git a/nilearn/plotting/displays.py b/nilearn/plotting/displays.py index 84aff68e8f..5f25e5433f 100644 --- a/nilearn/plotting/displays.py +++ b/nilearn/plotting/displays.py @@ -29,6 +29,7 @@ from ..image import new_img_like from ..image.resampling import (get_bounds, reorder_img, coord_transform, get_mask_bounds) +from nilearn.image import get_data ############################################################################### @@ -892,7 +893,7 @@ def add_edges(self, img, color='r'): The color used to display the edge map """ img = reorder_img(img, resample='continuous') - data = img.get_data() + data = get_data(img) affine = img.affine single_color_cmap = colors.ListedColormap([color]) data_bounds = get_bounds(data.shape, img.affine) diff --git a/nilearn/plotting/find_cuts.py b/nilearn/plotting/find_cuts.py index 3b9a9cb2b7..636a51ed4e 100644 --- a/nilearn/plotting/find_cuts.py +++ b/nilearn/plotting/find_cuts.py @@ -19,6 +19,7 @@ from .._utils.numpy_conversions import as_ndarray from .._utils import check_niimg_3d, check_niimg_4d from .._utils.niimg import _safe_get_data +from nilearn.image import get_data ################################################################################ # Functions for automatic choice of cuts coordinates @@ -390,7 +391,7 @@ def find_parcellation_cut_coords(labels_img, background_label=0, return_label_na "of these 'left' or 'right'.".format(label_hemisphere)) # Grab data and affine labels_img = reorder_img(check_niimg_3d(labels_img)) - labels_data = labels_img.get_data() + labels_data = get_data(labels_img) labels_affine = labels_img.affine # Grab number of unique values in 3d image @@ -406,8 +407,8 @@ def find_parcellation_cut_coords(labels_img, background_label=0, return_label_na # Grab hemispheres separately x, y, z = coord_transform(0, 0, 0, np.linalg.inv(labels_affine)) - left_hemi = labels_img.get_data().copy() == cur_label - right_hemi = labels_img.get_data().copy() == cur_label + left_hemi = get_data(labels_img).copy() == cur_label + right_hemi = get_data(labels_img).copy() == cur_label left_hemi[int(x):] = 0 right_hemi[:int(x)] = 0 diff --git a/nilearn/plotting/img_plotting.py b/nilearn/plotting/img_plotting.py index 52d7867fd5..da991ecfae 100644 --- a/nilearn/plotting/img_plotting.py +++ b/nilearn/plotting/img_plotting.py @@ -38,6 +38,7 @@ from ..image import iter_img from .displays import get_slicer, get_projector from . import cm +from nilearn.image import get_data def show(): @@ -339,7 +340,7 @@ def __init__(self, data=None, affine=None, header=None): def load(self): if self.data is None: anat_img = load_mni152_template() - data = anat_img.get_data() + data = get_data(anat_img) data = data.astype(np.float) anat_mask = ndimage.morphology.binary_fill_holes(data > 0) data = np.ma.masked_array(data, np.logical_not(anat_mask)) @@ -348,6 +349,16 @@ def load(self): self.vmax = data.max() self._shape = anat_img.shape + @property + def _data_cache(self): + self.load() + return self.data + + @property + def _dataobj(self): + self.load() + return self.data + def get_data(self): self.load() return self.data @@ -888,7 +899,7 @@ def plot_prob_atlas(maps_img, bg_img=MNI152TEMPLATE, view_type='auto', filled = view_type.startswith('filled') for (map_img, color, thr) in zip(iter_img(maps_img), color_list, threshold): - data = map_img.get_data() + data = get_data(map_img) # To threshold or choose the level of the contours thr = check_threshold(thr, data, percentile_func=fast_abs_percentile, diff --git a/nilearn/plotting/tests/test_html_stat_map.py b/nilearn/plotting/tests/test_html_stat_map.py index 08998aa4d3..3054a1851a 100644 --- a/nilearn/plotting/tests/test_html_stat_map.py +++ b/nilearn/plotting/tests/test_html_stat_map.py @@ -9,6 +9,7 @@ from nilearn import datasets, image from nilearn.plotting import html_stat_map from nilearn.image import new_img_like +from nilearn.image import get_data from ..js_plotting_utils import colorscale from ..._utils.compat import _basestring @@ -143,12 +144,12 @@ def test_mask_stat_map(): # Try not to threshold anything mask_img, img, data_t, thre = html_stat_map._mask_stat_map(img, threshold=None) - assert np.max(mask_img.get_data()) == 0 + assert np.max(get_data(mask_img)) == 0 # Now threshold at zero mask_img, img, data_t, thre = html_stat_map._mask_stat_map(img, threshold=0) - assert np.min((data == 0) == mask_img.get_data()) + assert np.min((data == 0) == get_data(mask_img)) def test_load_bg_img(): @@ -314,7 +315,7 @@ def test_view_img(): html_view = html_stat_map.view_img(img, threshold=2., vmax=4.) _check_html(html_view) html_view = html_stat_map.view_img(img, symmetric_cmap=False) - img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis]) + img_4d = image.new_img_like(img, get_data(img)[:, :, :, np.newaxis]) assert len(img_4d.shape) == 4 html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.) _check_html(html_view) diff --git a/nilearn/plotting/tests/test_html_surface.py b/nilearn/plotting/tests/test_html_surface.py index 14a2b2950f..d51dad66b0 100644 --- a/nilearn/plotting/tests/test_html_surface.py +++ b/nilearn/plotting/tests/test_html_surface.py @@ -8,6 +8,7 @@ from nilearn.plotting.js_plotting_utils import decode from nilearn.datasets import fetch_surf_fsaverage from nilearn._utils.exceptions import DimensionError +from nilearn.image import get_data from .test_js_plotting_utils import check_colors, check_html @@ -143,10 +144,10 @@ def test_view_img_on_surf(): html = html_surface.view_img_on_surf(img, surf_mesh='fsaverage') check_html(html) assert_raises(DimensionError, html_surface.view_img_on_surf, [img, img]) - img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis]) + img_4d = image.new_img_like(img, get_data(img)[:, :, :, np.newaxis]) assert len(img_4d.shape) == 4 html = html_surface.view_img_on_surf(img, threshold='92.3%') check_html(html) - np.clip(img.get_data(), 0, None, out=img.get_data()) + np.clip(get_data(img), 0, None, out=get_data(img)) html = html_surface.view_img_on_surf(img, symmetric_cmap=False) check_html(html) diff --git a/nilearn/plotting/tests/test_img_plotting.py b/nilearn/plotting/tests/test_img_plotting.py index 28d138bb8b..6822ba56fd 100644 --- a/nilearn/plotting/tests/test_img_plotting.py +++ b/nilearn/plotting/tests/test_img_plotting.py @@ -15,6 +15,7 @@ from nilearn._utils.testing import assert_raises_regex from nilearn.image.resampling import coord_transform +from nilearn.image import get_data from nilearn.datasets import load_mni152_template from nilearn.plotting.find_cuts import find_cut_slices from nilearn.plotting.img_plotting import (MNI152TEMPLATE, plot_anat, plot_img, @@ -147,7 +148,7 @@ def test_plot_stat_map(): # Smoke test coordinate finder, with and without mask masked_img = nibabel.Nifti1Image( - np.ma.masked_equal(img.get_data(), 0), + np.ma.masked_equal(get_data(img), 0), mni_affine) plot_stat_map(masked_img, display_mode='x') plot_stat_map(img, display_mode='y', cut_coords=2) @@ -293,7 +294,7 @@ def test_plot_with_axes_or_figure(): def test_plot_stat_map_colorbar_variations(): # This is only a smoke test img_positive = _generate_img() - data_positive = img_positive.get_data() + data_positive = get_data(img_positive) rng = np.random.RandomState(42) data_negative = -data_positive data_heterogeneous = data_positive * rng.randn(*data_positive.shape) @@ -343,7 +344,7 @@ def test_plot_img_with_auto_cut_coords(): def test_plot_img_with_resampling(): - data = _generate_img().get_data() + data = get_data(_generate_img()) affine = np.array([[1., -1., 0., 0.], [1., 1., 0., 0.], [0., 0., 1., 0.], @@ -1042,7 +1043,7 @@ def test_outlier_cut_coords(): def test_plot_stat_map_with_nans(): img = _generate_img() - data = img.get_data() + data = get_data(img) data[6, 5, 1] = np.nan data[1, 5, 2] = np.nan @@ -1071,7 +1072,7 @@ def test_plotting_functions_with_cmaps(): def test_plotting_functions_with_nans_in_bg_img(): bg_img = _generate_img() - bg_data = bg_img.get_data() + bg_data = get_data(bg_img) bg_data[6, 5, 1] = np.nan bg_data[1, 5, 2] = np.nan @@ -1122,7 +1123,7 @@ def test_display_methods_with_display_mode_tiled(): def test_plot_glass_brain_colorbar_having_nans(): img = _generate_img() - data = img.get_data() + data = get_data(img) data[6, 5, 2] = np.inf img = nibabel.Nifti1Image(data, np.eye(4)) diff --git a/nilearn/regions/rena_clustering.py b/nilearn/regions/rena_clustering.py index fac552aa5a..9cbfd951cf 100644 --- a/nilearn/regions/rena_clustering.py +++ b/nilearn/regions/rena_clustering.py @@ -16,6 +16,7 @@ from sklearn.utils.validation import check_is_fitted from sklearn.utils import check_array from nilearn.masking import _unmask_from_to_3d_array +from nilearn.image import get_data from nibabel import Nifti1Image @@ -41,7 +42,7 @@ def _compute_weights(X, mask_img): n_samples, n_features = X.shape - mask = mask_img.get_data().astype('bool') + mask = get_data(mask_img).astype('bool') shape = mask.shape data = np.empty((shape[0], shape[1], shape[2], n_samples)) @@ -117,7 +118,7 @@ def _make_edges_and_weights(X, mask_img): Weights corresponding to all edges in the mask. shape: (n_edges,) """ - mask = mask_img.get_data() + mask = get_data(mask_img) shape = mask.shape n_vertices = np.prod(shape) diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index ab74b7c581..10c3b76de0 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -15,6 +15,7 @@ from nilearn._utils.testing import assert_raises_regex from nilearn._utils.data_gen import generate_maps, generate_labeled_regions from nilearn._utils.exceptions import DimensionError +from nilearn.image import get_data def _make_random_data(shape): @@ -22,7 +23,7 @@ def _make_random_data(shape): rng = np.random.RandomState(0) data_rng = rng.normal(size=shape) img = nibabel.Nifti1Image(data_rng, affine) - data = img.get_data() + data = get_data(img) return img, data @@ -41,7 +42,7 @@ def test_invalid_thresholds_in_threshold_maps_ratio(): def test_nans_threshold_maps_ratio(): maps, _ = generate_maps((10, 10, 10), n_regions=2) - data = maps.get_data() + data = get_data(maps) data[:, :, 0] = np.nan maps_img = nibabel.Nifti1Image(data, np.eye(4)) @@ -55,10 +56,10 @@ def test_threshold_maps_ratio(): maps, _ = generate_maps((6, 8, 10), n_regions=3) # test that there is no side effect - maps.get_data()[:3] = 100 - maps_data = maps.get_data().copy() + get_data(maps)[:3] = 100 + maps_data = get_data(maps).copy() thr_maps = _threshold_maps_ratio(maps, threshold=1.0) - np.testing.assert_array_equal(maps.get_data(), maps_data) + np.testing.assert_array_equal(get_data(maps), maps_data) # make sure that n_regions (4th dimension) are kept same even # in thresholded image @@ -108,15 +109,16 @@ def test_connected_regions(): assert_true(connected_extraction_3d_img.shape[-1] >= 1) # Test input mask_img - mask = mask_img.get_data() + mask = get_data(mask_img) mask[1, 1, 1] = 0 extraction_with_mask_img, index = connected_regions(maps, mask_img=mask_img) assert_true(extraction_with_mask_img.shape[-1] >= 1) extraction_without_mask_img, index = connected_regions(maps) - assert_true(np.all(extraction_with_mask_img.get_data()[mask == 0] == 0.)) - assert_false(np.all(extraction_without_mask_img.get_data()[mask == 0] == 0.)) + assert_true(np.all(get_data(extraction_with_mask_img)[mask == 0] == 0.)) + assert_false( + np.all(get_data(extraction_without_mask_img)[mask == 0] == 0.)) # mask_img with different shape mask = np.zeros(shape=(10, 11, 12), dtype=np.int) @@ -132,8 +134,8 @@ def test_connected_regions(): assert_not_equal(mask_img.shape, extraction_not_same_fov_mask.shape[:3]) extraction_not_same_fov, _ = connected_regions(maps) - assert_greater(np.sum(extraction_not_same_fov.get_data() == 0), - np.sum(extraction_not_same_fov_mask.get_data() == 0)) + assert_greater(np.sum(get_data(extraction_not_same_fov) == 0), + np.sum(get_data(extraction_not_same_fov_mask) == 0)) def test_invalid_threshold_strategies(): @@ -166,16 +168,16 @@ def test_region_extractor_fit_and_transform(): maps, mask_img = generate_maps((40, 40, 40), n_regions=n_regions) # Test maps are zero in the mask - mask_data = mask_img.get_data() + mask_data = get_data(mask_img) mask_data[1, 1, 1] = 0 extractor_without_mask = RegionExtractor(maps) extractor_without_mask.fit() extractor_with_mask = RegionExtractor(maps, mask_img=mask_img) extractor_with_mask.fit() assert_false(np.all( - extractor_without_mask.regions_img_.get_data()[mask_data == 0] == 0.)) + get_data(extractor_without_mask.regions_img_)[mask_data == 0] == 0.)) assert_true(np.all( - extractor_with_mask.regions_img_.get_data()[mask_data == 0] == 0.)) + get_data(extractor_with_mask.regions_img_)[mask_data == 0] == 0.)) # smoke test to RegionExtractor with thresholding_strategy='ratio_n_voxels' extract_ratio = RegionExtractor(maps, threshold=0.2, @@ -274,12 +276,12 @@ def test_connected_label_regions(): n_regions = 9 labels_img = generate_labeled_regions(shape, affine=affine, n_regions=n_regions) - labels_data = labels_img.get_data() + labels_data = get_data(labels_img) n_labels_wo_reg_ext = len(np.unique(labels_data)) # region extraction without specifying min_size extracted_regions_on_labels_img = connected_label_regions(labels_img) - extracted_regions_labels_data = extracted_regions_on_labels_img.get_data() + extracted_regions_labels_data = get_data(extracted_regions_on_labels_img) n_labels_wo_min = len(np.unique(extracted_regions_labels_data)) assert_true(n_labels_wo_reg_ext < n_labels_wo_min) @@ -287,7 +289,7 @@ def test_connected_label_regions(): # with specifying min_size extracted_regions_with_min = connected_label_regions(labels_img, min_size=100) - extracted_regions_with_min_data = extracted_regions_with_min.get_data() + extracted_regions_with_min_data = get_data(extracted_regions_with_min) n_labels_with_min = len(np.unique(extracted_regions_with_min_data)) assert_true(n_labels_wo_min > n_labels_with_min) @@ -295,7 +297,7 @@ def test_connected_label_regions(): # Test connect_diag=False ext_reg_without_connect_diag = connected_label_regions(labels_img, connect_diag=False) - data_wo_connect_diag = ext_reg_without_connect_diag.get_data() + data_wo_connect_diag = get_data(ext_reg_without_connect_diag) n_labels_wo_connect_diag = len(np.unique(data_wo_connect_diag)) assert_true(n_labels_wo_connect_diag > n_labels_wo_reg_ext) @@ -303,7 +305,7 @@ def test_connected_label_regions(): # will be returned extract_reg_min_size_large = connected_label_regions(labels_img, min_size=500) - assert_true(np.unique(extract_reg_min_size_large.get_data()) == 0) + assert_true(np.unique(get_data(extract_reg_min_size_large)) == 0) # Test the names of the brain regions given in labels. # Test labels for 9 regions in n_regions @@ -337,7 +339,7 @@ def test_connected_label_regions(): # unique labels in labels_img), then we raise an error # Test whether error raises - unique_labels = set(np.unique(np.asarray(labels_img.get_data()))) + unique_labels = set(np.unique(np.asarray(get_data(labels_img)))) unique_labels.remove(0) # labels given are less than n_regions=9 diff --git a/nilearn/regions/tests/test_rena_clustering.py b/nilearn/regions/tests/test_rena_clustering.py index 6ae6e0515e..083df5ecf4 100644 --- a/nilearn/regions/tests/test_rena_clustering.py +++ b/nilearn/regions/tests/test_rena_clustering.py @@ -7,17 +7,18 @@ from nilearn._utils.data_gen import generate_fake_fmri from nilearn.regions.rena_clustering import ReNA from nilearn.input_data import NiftiMasker +from nilearn.image import get_data def test_rena_clustering(): data_img, mask_img = generate_fake_fmri(shape=(10, 11, 12), length=5) - data = data_img.get_data() - mask = mask_img.get_data() + data = get_data(data_img) + mask = get_data(mask_img) X = np.empty((data.shape[3], int(mask.sum()))) for i in range(data.shape[3]): - X[i, :] = np.copy(data[:, :, :, i])[mask_img.get_data() != 0] + X[i, :] = np.copy(data[:, :, :, i])[get_data(mask_img) != 0] nifti_masker = NiftiMasker(mask_img=mask_img).fit() n_voxels = nifti_masker.transform(data_img).shape[1] diff --git a/nilearn/regions/tests/test_signal_extraction.py b/nilearn/regions/tests/test_signal_extraction.py index a8acf50a57..df37933866 100644 --- a/nilearn/regions/tests/test_signal_extraction.py +++ b/nilearn/regions/tests/test_signal_extraction.py @@ -15,6 +15,7 @@ from nilearn._utils.data_gen import generate_labeled_regions, generate_maps from nilearn._utils.data_gen import generate_fake_fmri from nilearn._utils.exceptions import DimensionError +from nilearn.image import get_data _TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " @@ -62,7 +63,7 @@ def test_generate_labeled_regions(): n_regions = 10 regions = generate_labeled_regions(shape, n_regions) assert_true(regions.shape == shape) - assert (len(np.unique(regions.get_data())) == n_regions + 1) + assert (len(np.unique(get_data(regions))) == n_regions + 1) def test_signals_extraction_with_labels(): @@ -109,7 +110,7 @@ def test_signals_extraction_with_labels(): # Without mask # from labels data_img = signal_extraction.signals_to_img_labels(signals, labels_img) - data = data_img.get_data() + data = get_data(data_img) assert_true(data_img.shape == (shape + (n_instants,))) assert_true(np.all(data.std(axis=-1) > 0)) @@ -153,10 +154,10 @@ def test_signals_extraction_with_labels(): data_img, labels_4d_img, mask_img=mask_img) assert_true(data_img.shape == (shape + (n_instants,))) - data = data_img.get_data() + data = get_data(data_img) assert_true(abs(data).max() > 1e-9) # Zero outside of the mask - assert_true(np.all(data[np.logical_not(mask_img.get_data()) + assert_true(np.all(data[np.logical_not(get_data(mask_img)) ].std(axis=-1) < eps) ) @@ -165,16 +166,16 @@ def test_signals_extraction_with_labels(): signals, filenames[0], mask_img=filenames[1]) assert_true(data_img.shape == (shape + (n_instants,))) - data = data_img.get_data() + data = get_data(data_img) assert_true(abs(data).max() > 1e-9) # Zero outside of the mask - assert_true(np.all(data[np.logical_not(mask_img.get_data()) + assert_true(np.all(data[np.logical_not(get_data(mask_img)) ].std(axis=-1) < eps) ) # mask labels before checking masked_labels_data = labels_data.copy() - masked_labels_data[np.logical_not(mask_img.get_data())] = 0 + masked_labels_data[np.logical_not(get_data(mask_img))] = 0 for n in range(1, n_regions + 1): sigs = data[masked_labels_data == n, :] np.testing.assert_almost_equal(sigs[0, :], signals[:, n - 1]) @@ -219,7 +220,7 @@ def test_signal_extraction_with_maps(): rand_gen = np.random.RandomState(0) maps_img, mask_img = generate_maps(shape, n_regions, border=1) - maps_data = maps_img.get_data() + maps_data = get_data(maps_img) data = np.zeros(shape + (n_instants,), dtype=np.float32) mask_4d_img = nibabel.Nifti1Image(np.ones((shape + (2, ))), np.eye(4)) @@ -251,9 +252,9 @@ def test_signal_extraction_with_maps(): # Recover image img_r = signal_extraction.signals_to_img_maps(signals, maps_img, mask_img=mask_img) - np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) + np.testing.assert_almost_equal(get_data(img_r), get_data(img)) img_r = signal_extraction.signals_to_img_maps(signals, maps_img) - np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) + np.testing.assert_almost_equal(get_data(img_r), get_data(img)) # Test input validation data_img = nibabel.Nifti1Image(np.zeros((2, 3, 4, 5)), np.eye(4)) @@ -287,7 +288,7 @@ def test_signal_extraction_with_maps_and_labels(): # Generate labels labels = list(range(n_regions + 1)) # 0 is background labels_img = generate_labeled_regions(shape, n_regions, labels=labels) - labels_data = labels_img.get_data() + labels_data = get_data(labels_img) # Convert to maps maps_data = np.zeros(shape + (n_regions,)) for n, l in enumerate(labels): @@ -338,7 +339,7 @@ def test_signal_extraction_with_maps_and_labels(): # Check that NaNs in regions inside mask are preserved region1 = labels_data == 2 indices = [ind[:1] for ind in np.where(region1)] - fmri_img.get_data()[indices + [slice(None)]] = float('nan') + get_data(fmri_img)[indices + [slice(None)]] = float('nan') labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) assert_true(np.all(np.isnan(labels_signals[:, labels_labels.index(2)]))) @@ -349,7 +350,7 @@ def test_generate_maps(): shape = (10, 11, 12) n_regions = 9 maps_img, _ = generate_maps(shape, n_regions, border=1) - maps = maps_img.get_data() + maps = get_data(maps_img) assert_true(maps.shape == shape + (n_regions,)) # no empty map assert_true(np.all(abs(maps).sum(axis=0).sum(axis=0).sum(axis=0) > 0)) diff --git a/nilearn/surface/surface.py b/nilearn/surface/surface.py index fac0282204..8ed57a7e5b 100644 --- a/nilearn/surface/surface.py +++ b/nilearn/surface/surface.py @@ -25,6 +25,7 @@ class EfficiencyWarning(UserWarning): from .._utils.compat import _basestring from .._utils.path_finding import _resolve_globbing from .. import _utils +from nilearn.image import get_data def _uniform_ball_cloud(n_points=20, dim=3, n_monte_carlo=50000): @@ -499,13 +500,13 @@ def vol_to_surf(img, surf_mesh, img = load_img(img) if mask_img is not None: mask_img = _utils.check_niimg(mask_img) - mask = resampling.resample_to_img( - mask_img, img, interpolation='nearest', copy=False).get_data() + mask = get_data(resampling.resample_to_img( + mask_img, img, interpolation='nearest', copy=False)) else: mask = None original_dimension = len(img.shape) img = _utils.check_niimg(img, atleast_4d=True) - frames = np.rollaxis(img.get_data(), -1) + frames = np.rollaxis(get_data(img), -1) mesh = load_surf_mesh(surf_mesh) sampling = sampling_schemes[interpolation] texture = sampling( @@ -583,7 +584,7 @@ def load_surf_data(surf_data): surf_data = file_list[f] if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or surf_data.endswith('mgz')): - data_part = np.squeeze(nibabel.load(surf_data).get_data()) + data_part = np.squeeze(get_data(nibabel.load(surf_data))) elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or surf_data.endswith('thickness')): data_part = fs.io.read_morph_data(surf_data) diff --git a/nilearn/tests/test_masking.py b/nilearn/tests/test_masking.py index 8ece5f423c..72b3f0857b 100644 --- a/nilearn/tests/test_masking.py +++ b/nilearn/tests/test_masking.py @@ -16,6 +16,7 @@ from nibabel import Nifti1Image from nilearn import masking +from nilearn.image import get_data from nilearn.masking import (compute_epi_mask, compute_multi_epi_mask, compute_background_mask, compute_gray_matter_mask, compute_multi_gray_matter_mask, @@ -45,19 +46,19 @@ def test_compute_epi_mask(): opening=False) # With an array with no zeros, exclude_zeros should not make # any difference - np.testing.assert_array_equal(mask1.get_data(), mask2.get_data()) + np.testing.assert_array_equal(get_data(mask1), get_data(mask2)) # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30, 3)) - mean_image2[3:12, 3:12, :] = mean_image.get_data() + mean_image2[3:12, 3:12, :] = get_data(mean_image) mean_image2 = Nifti1Image(mean_image2, np.eye(4)) mask3 = compute_epi_mask(mean_image2, exclude_zeros=True, opening=False) - np.testing.assert_array_equal(mask1.get_data(), - mask3.get_data()[3:12, 3:12]) + np.testing.assert_array_equal(get_data(mask1), + get_data(mask3)[3:12, 3:12]) # However, without exclude_zeros, it does mask3 = compute_epi_mask(mean_image2, opening=False) - assert_false(np.allclose(mask1.get_data(), - mask3.get_data()[3:12, 3:12])) + assert_false(np.allclose(get_data(mask1), + get_data(mask3)[3:12, 3:12])) # Check that we get a ValueError for incorrect shape mean_image = np.ones((9, 9)) @@ -85,7 +86,7 @@ def test_compute_background_mask(): mask = mean_image == 1 mean_image = Nifti1Image(mean_image, np.eye(4)) mask1 = compute_background_mask(mean_image, opening=False) - np.testing.assert_array_equal(mask1.get_data(), + np.testing.assert_array_equal(get_data(mask1), mask.astype(np.int8)) # Check that we get a ValueError for incorrect shape @@ -111,7 +112,7 @@ def test_compute_gray_matter_mask(): mask1 = np.zeros((9, 9, 9)) mask1[2:-2, 2:-2, 2:-2] = 1 - np.testing.assert_array_equal(mask1, mask.get_data()) + np.testing.assert_array_equal(mask1, get_data(mask)) # Check that we get a useful warning for empty masks assert_warns(masking.MaskWarning, @@ -123,8 +124,8 @@ def test_compute_gray_matter_mask(): mask_img1 = compute_gray_matter_mask(img1) mask_img2 = compute_gray_matter_mask(img2) - np.testing.assert_array_equal(mask_img1.get_data(), - mask_img2.get_data()) + np.testing.assert_array_equal(get_data(mask_img1), + get_data(mask_img2)) def test_apply_mask(): @@ -215,13 +216,13 @@ def test_unmask(): unmasked3D[np.logical_not(mask)] = 0 # 4D Test, test value ordering at the same time. - t = unmask(masked4D, mask_img, order="C").get_data() + t = get_data(unmask(masked4D, mask_img, order="C")) assert_equal(t.ndim, 4) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked4D) t = unmask([masked4D], mask_img, order="F") - t = [t_.get_data() for t_ in t] + t = [get_data(t_) for t_ in t] assert_true(isinstance(t, list)) assert_equal(t[0].ndim, 4) assert_false(t[0].flags["C_CONTIGUOUS"]) @@ -231,13 +232,13 @@ def test_unmask(): # 3D Test - check both with Nifti1Image and file for create_files in (False, True): with write_tmp_imgs(mask_img, create_files=create_files) as filename: - t = unmask(masked3D, filename, order="C").get_data() + t = get_data(unmask(masked3D, filename, order="C")) assert_equal(t.ndim, 3) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked3D) t = unmask([masked3D], filename, order="F") - t = [t_.get_data() for t_ in t] + t = [get_data(t_) for t_ in t] assert_true(isinstance(t, list)) assert_equal(t[0].ndim, 3) assert_false(t[0].flags["C_CONTIGUOUS"]) @@ -300,7 +301,7 @@ def test_intersect_masks_filename(): mask_ab = np.zeros((4, 4, 1), dtype=np.bool) mask_ab[2, 2] = 1 mask_ab_ = intersect_masks(filenames, threshold=1.) - assert_array_equal(mask_ab, mask_ab_.get_data()) + assert_array_equal(mask_ab, get_data(mask_ab_)) def test_intersect_masks(): @@ -354,38 +355,38 @@ def test_intersect_masks(): mask_ab = np.zeros((4, 4, 1), dtype=np.bool) mask_ab[2, 2] = 1 mask_ab_ = intersect_masks([mask_a_img, mask_b_img], threshold=1.) - assert_array_equal(mask_ab, mask_ab_.get_data()) + assert_array_equal(mask_ab, get_data(mask_ab_)) # Test intersect mask images with '>f8'. This function uses # largest_connected_component to check if intersect_masks passes with # connected=True (which is by default) - mask_a_img_change_dtype = Nifti1Image(mask_a_img.get_data().astype('>f8'), + mask_a_img_change_dtype = Nifti1Image(get_data(mask_a_img).astype('>f8'), affine=mask_a_img.affine) - mask_b_img_change_dtype = Nifti1Image(mask_b_img.get_data().astype('>f8'), + mask_b_img_change_dtype = Nifti1Image(get_data(mask_b_img).astype('>f8'), affine=mask_b_img.affine) mask_ab_change_type = intersect_masks([mask_a_img_change_dtype, mask_b_img_change_dtype], threshold=1.) - assert_array_equal(mask_ab, mask_ab_change_type.get_data()) + assert_array_equal(mask_ab, get_data(mask_ab_change_type)) mask_abc = mask_a + mask_b + mask_c mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=0., connected=False) - assert_array_equal(mask_abc, mask_abc_.get_data()) + assert_array_equal(mask_abc, get_data(mask_abc_)) mask_abc[0, 0] = 0 mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=0.) - assert_array_equal(mask_abc, mask_abc_.get_data()) + assert_array_equal(mask_abc, get_data(mask_abc_)) mask_abc = mask_ab mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=1.) - assert_array_equal(mask_abc, mask_abc_.get_data()) + assert_array_equal(mask_abc, get_data(mask_abc_)) mask_abc[1, 2] = 1 mask_abc[3, 2] = 1 mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img]) - assert_array_equal(mask_abc, mask_abc_.get_data()) + assert_array_equal(mask_abc, get_data(mask_abc_)) def test_compute_multi_epi_mask(): @@ -411,7 +412,7 @@ def test_compute_multi_epi_mask(): opening=0, target_affine=np.eye(4), target_shape=(4, 4, 1)) - assert_array_equal(mask_ab, mask_ab_.get_data()) + assert_array_equal(mask_ab, get_data(mask_ab_)) def test_compute_multi_gray_matter_mask(): @@ -431,7 +432,7 @@ def test_compute_multi_gray_matter_mask(): Nifti1Image(np.random.randn(9, 9, 9), np.eye(4))] mask2 = compute_multi_gray_matter_mask(imgs2) - assert_array_equal(mask1.get_data(), mask2.get_data()) + assert_array_equal(get_data(mask1), get_data(mask2)) def test_error_shape(random_state=42, shape=(3, 5, 7, 11)): @@ -472,7 +473,7 @@ def test_unmask_list(random_state=42): mask_img = Nifti1Image(mask_data.astype(np.uint8), affine) a = unmask(mask_data[mask_data], mask_img) b = unmask(mask_data[mask_data].tolist(), mask_img) # shouldn't crash - assert_array_equal(a.get_data(), b.get_data()) + assert_array_equal(get_data(a), get_data(b)) def test__extrapolate_out_mask(): diff --git a/nilearn/tests/test_niimg.py b/nilearn/tests/test_niimg.py index 3127c982d5..90d2f2f4e0 100644 --- a/nilearn/tests/test_niimg.py +++ b/nilearn/tests/test_niimg.py @@ -11,6 +11,7 @@ from nilearn.image import new_img_like from nilearn._utils import niimg from nilearn._utils.testing import assert_raises_regex +from nilearn.image import get_data currdir = os.path.dirname(os.path.abspath(__file__)) @@ -40,14 +41,14 @@ def test_new_img_like_side_effect(): def test_get_target_dtype(): img = Nifti1Image(np.ones((2, 2, 2), dtype=np.float64), affine=np.eye(4)) - assert_equal(img.get_data().dtype.kind, 'f') - dtype_kind_float = niimg._get_target_dtype(img.get_data().dtype, + assert_equal(get_data(img).dtype.kind, 'f') + dtype_kind_float = niimg._get_target_dtype(get_data(img).dtype, target_dtype='auto') assert_equal(dtype_kind_float, np.float32) img2 = Nifti1Image(np.ones((2, 2, 2), dtype=np.int64), affine=np.eye(4)) - assert_equal(img2.get_data().dtype.kind, 'i') - dtype_kind_int = niimg._get_target_dtype(img2.get_data().dtype, + assert_equal(get_data(img2).dtype.kind, 'i') + dtype_kind_int = niimg._get_target_dtype(get_data(img2).dtype, target_dtype='auto') assert_equal(dtype_kind_int, np.int32) @@ -71,7 +72,6 @@ def test_img_data_dtype(): # To verify later that sometimes these differ meaningfully dtype_matches.append( loaded.get_data_dtype() == niimg.img_data_dtype(loaded)) - # Use np.array(dataobj) because get_data() is to be deprecated assert_equal(np.array(loaded.dataobj).dtype, niimg.img_data_dtype(loaded)) # Verify that the distinction is worth making diff --git a/nilearn/tests/test_niimg_conversions.py b/nilearn/tests/test_niimg_conversions.py index e359915622..91296b4cd1 100644 --- a/nilearn/tests/test_niimg_conversions.py +++ b/nilearn/tests/test_niimg_conversions.py @@ -27,6 +27,7 @@ from nilearn._utils.testing import with_memory_profiler from nilearn._utils.testing import assert_memory_less_than from nilearn._utils.niimg_conversions import _iter_check_niimg +from nilearn.image import get_data class PhonyNiimage(nibabel.spatialimages.SpatialImage): @@ -45,6 +46,14 @@ def get_affine(self): def shape(self): return self.data.shape + @property + def _data_cache(self): + return self.data + + @property + def _dataobj(self): + return self.data + def test_check_same_fov(): @@ -116,7 +125,7 @@ def test_check_niimg_3d(): # check data dtype equal with dtype='auto' img_check = _utils.check_niimg_3d(img, dtype='auto') - assert_equal(img.get_data().dtype.kind, img_check.get_data().dtype.kind) + assert_equal(get_data(img).dtype.kind, get_data(img_check).dtype.kind) def test_check_niimg_4d(): @@ -131,11 +140,11 @@ def test_check_niimg_4d(): # Tests with return_iterator=False img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d]) - assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2)) + assert_true(get_data(img_4d_1).shape == (10, 10, 10, 2)) assert_array_equal(img_4d_1.affine, affine) img_4d_2 = _utils.check_niimg_4d(img_4d_1) - assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data()) + assert_array_equal(get_data(img_4d_2), get_data(img_4d_2)) assert_array_equal(img_4d_2.affine, img_4d_2.affine) # Tests with return_iterator=True @@ -149,8 +158,8 @@ def test_check_niimg_4d(): img_3d_iterator_2 = _utils.check_niimg_4d(img_3d_iterator_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): - assert_true(img_1.get_data().shape == (10, 10, 10)) - assert_array_equal(img_1.get_data(), img_2.get_data()) + assert_true(get_data(img_1).shape == (10, 10, 10)) + assert_array_equal(get_data(img_1), get_data(img_2)) assert_array_equal(img_1.affine, img_2.affine) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], @@ -158,8 +167,8 @@ def test_check_niimg_4d(): img_3d_iterator_2 = _utils.check_niimg_4d(img_4d_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): - assert_true(img_1.get_data().shape == (10, 10, 10)) - assert_array_equal(img_1.get_data(), img_2.get_data()) + assert_true(get_data(img_1).shape == (10, 10, 10)) + assert_array_equal(get_data(img_1), get_data(img_2)) assert_array_equal(img_1.affine, img_2.affine) # This should raise an error: a 3D img is given and we want a 4D @@ -210,10 +219,12 @@ def test_check_niimg(): # check data dtype equal with dtype='auto' img_3d_check = _utils.check_niimg(img_3d, dtype='auto') - assert_equal(img_3d.get_data().dtype.kind, img_3d_check.get_data().dtype.kind) + assert_equal( + get_data(img_3d).dtype.kind, get_data(img_3d_check).dtype.kind) img_4d_check = _utils.check_niimg(img_4d, dtype='auto') - assert_equal(img_4d.get_data().dtype.kind, img_4d_check.get_data().dtype.kind) + assert_equal( + get_data(img_4d).dtype.kind, get_data(img_4d_check).dtype.kind) def test_check_niimg_wildcards(): @@ -250,24 +261,24 @@ def test_check_niimg_wildcards(): ####### # Testing with an existing filename with testing.write_tmp_imgs(img_3d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename).get_data(), - img_3d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(filename)), + get_data(img_3d)) # No globbing behavior with testing.write_tmp_imgs(img_3d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename, - wildcards=False).get_data(), - img_3d.get_data()) + assert_array_equal( + get_data(_utils.check_niimg(filename, wildcards=False)), + get_data(img_3d)) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_4d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename).get_data(), - img_4d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(filename)), + get_data(img_4d)) # No globbing behavior with testing.write_tmp_imgs(img_4d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename, - wildcards=False).get_data(), - img_4d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(filename, + wildcards=False)), + get_data(img_4d)) ####### # Testing with a glob matching exactly one filename @@ -277,8 +288,8 @@ def test_check_niimg_wildcards(): create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs - assert_array_equal(_utils.check_niimg(glob_input).get_data()[..., 0], - img_3d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(glob_input))[..., 0], + get_data(img_3d)) # Disabled globbing behavior should raise an ValueError exception with testing.write_tmp_imgs(img_3d, create_files=True, @@ -296,8 +307,8 @@ def test_check_niimg_wildcards(): with testing.write_tmp_imgs(img_3d, img_3d, create_files=True, use_wildcards=True) as globs: - assert_array_equal(_utils.check_niimg(glob_input).get_data(), - img_4d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(glob_input)), + get_data(img_4d)) ####### # Test when global variable is set to False => no globbing allowed @@ -317,13 +328,13 @@ def test_check_niimg_wildcards(): # Testing with an exact filename matching (3d case) with testing.write_tmp_imgs(img_3d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename).get_data(), - img_3d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(filename)), + get_data(img_3d)) # Testing with an exact filename matching (4d case) with testing.write_tmp_imgs(img_4d, create_files=True) as filename: - assert_array_equal(_utils.check_niimg(filename).get_data(), - img_4d.get_data()) + assert_array_equal(get_data(_utils.check_niimg(filename)), + get_data(img_4d)) # Reverting to default behavior ni.EXPAND_PATH_WILDCARDS = True @@ -351,16 +362,16 @@ def test_iter_check_niimgs(): dir=None) img_4d.to_filename(filename) niimgs = list(_iter_check_niimg([filename])) - assert_array_equal(niimgs[0].get_data(), - _utils.check_niimg(img_4d).get_data()) + assert_array_equal(get_data(niimgs[0]), + get_data(_utils.check_niimg(img_4d))) del img_4d del niimgs os.remove(filename) # Regular case niimgs = list(_iter_check_niimg(img_2_4d)) - assert_array_equal(niimgs[0].get_data(), - _utils.check_niimg(img_2_4d).get_data()) + assert_array_equal(get_data(niimgs[0]), + get_data(_utils.check_niimg(img_2_4d))) def _check_memory(list_img_3d): @@ -453,9 +464,9 @@ def test_concat_niimgs(): nibabel.save(img3, tmpimg2) concatenated = _utils.concat_niimgs(os.path.join(tempdir, '*')) assert_array_equal( - concatenated.get_data()[..., 0], img1.get_data()) + get_data(concatenated)[..., 0], get_data(img1)) assert_array_equal( - concatenated.get_data()[..., 1], img3.get_data()) + get_data(concatenated)[..., 1], get_data(img3)) finally: _remove_if_exists(tmpimg1) _remove_if_exists(tmpimg2) @@ -474,9 +485,9 @@ def test_concat_niimg_dtype(): np.zeros(shape + [n_scans]).astype(np.int16), np.eye(4)) for n_scans in [1, 5]] nimg = _utils.concat_niimgs(vols) - assert_equal(nimg.get_data().dtype, np.float32) + assert_equal(get_data(nimg).dtype, np.float32) nimg = _utils.concat_niimgs(vols, dtype=None) - assert_equal(nimg.get_data().dtype, np.int16) + assert_equal(get_data(nimg).dtype, np.int16) def nifti_generator(buffer): @@ -491,13 +502,13 @@ def test_iterator_generator(): for i in range(10)] cc = _utils.concat_niimgs(l) assert_equal(cc.shape[-1], 10) - assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data()) + assert_array_almost_equal(get_data(cc)[..., 0], get_data(l[0])) # Same with iteration i = image.iter_img(l) cc = _utils.concat_niimgs(i) assert_equal(cc.shape[-1], 10) - assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data()) + assert_array_almost_equal(get_data(cc)[..., 0], get_data(l[0])) # Now, a generator b = [] From 1a821a2b8620cf56491708ca84d0bc8b1e70d9ee Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Mon, 4 Nov 2019 14:50:14 +0100 Subject: [PATCH 12/25] Installation should fail on Python < 3.5 (#2198) * Only Python3 binary distribution/wheels are built by default * Required Python is >=3.5; Updated whats_new; Bumped up the version to beta --- doc/whats_new.rst | 7 +++++++ nilearn/version.py | 2 +- setup.cfg | 3 --- setup.py | 4 +++- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 94b6a37914..8b79860cd1 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,6 +1,13 @@ 0.6.0b ====== +.. warning:: + + | **Python2 and 3.4 are no longer supported. Pip will raise an error in these environments.** + | **Minimum supported version of Python is now 3.5 .** + | **We recommend upgrading to Python 3.6 .** + + NEW --- diff --git a/nilearn/version.py b/nilearn/version.py index 913654d4f5..1ab95ed14e 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.6.0a' +__version__ = '0.6.0b' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') diff --git a/setup.cfg b/setup.cfg index 3e61ea08c1..0a9f856f10 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,9 +3,6 @@ [bdist_rpm] doc-files = doc -[wheel] -universal=1 - [flake8] # For PEP8 error codes see # http://pep8.readthedocs.org/en/latest/intro.html#error-codes diff --git a/setup.py b/setup.py index 0d2147ca1d..c4bc793612 100755 --- a/setup.py +++ b/setup.py @@ -96,4 +96,6 @@ def is_installing(): 'nilearn.datasets.tests.data': ['*.*'], 'nilearn.datasets.description': ['*.rst'], 'nilearn.reporting.data.html': ['*.html']}, - install_requires=install_requires,) + install_requires=install_requires, + python_requires='>=3.5', + ) From 8a623c1d9082341a33ffe7b12fd1981ebf3834c4 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Fri, 8 Nov 2019 13:16:52 +0100 Subject: [PATCH 13/25] Refactor CircleCI config for reduced redundancy (#2204) * Reduced redundancy in CircleCI config by making custom reusable commands * Corrected indentation * Cleared the cache to address conda install issues * Added mechanism to manually clear packages cache; Cleared to fix install probs * Changed command names for easier reding * Changed more command names for easier perusal * Changed more command names for easier perusal -2 * Improved task names & descriptions to be more self-documenting --- .circleci/auto-cache-timestamp | 1 - .circleci/clean-cache.py | 6 +- .circleci/config.yml | 167 ++++++++++++++++------------- .circleci/docs-cache-timestamp | 1 + .circleci/manual-cache-timestamp | 1 - .circleci/packages-cache-timestamp | 1 + 6 files changed, 99 insertions(+), 78 deletions(-) delete mode 100644 .circleci/auto-cache-timestamp create mode 100644 .circleci/docs-cache-timestamp delete mode 100644 .circleci/manual-cache-timestamp create mode 100644 .circleci/packages-cache-timestamp diff --git a/.circleci/auto-cache-timestamp b/.circleci/auto-cache-timestamp deleted file mode 100644 index 50b266a34f..0000000000 --- a/.circleci/auto-cache-timestamp +++ /dev/null @@ -1 +0,0 @@ -2019-04-19 15:05:58.522213 \ No newline at end of file diff --git a/.circleci/clean-cache.py b/.circleci/clean-cache.py index 0b05f6f4d1..297df18b0a 100755 --- a/.circleci/clean-cache.py +++ b/.circleci/clean-cache.py @@ -7,7 +7,7 @@ def update_cache_timestamp(timestamp_filename): - """ Updates the contents of the manual-cache-timestamp file + """ Updates the contents of the docs-cache-timestamp file with current timestamp. Returns @@ -22,5 +22,5 @@ def update_cache_timestamp(timestamp_filename): if __name__ == '__main__': - update_cache_timestamp('manual-cache-timestamp') - update_cache_timestamp('auto-cache-timestamp') + update_cache_timestamp('docs-cache-timestamp') + update_cache_timestamp('packages-cache-timestamp') diff --git a/.circleci/config.yml b/.circleci/config.yml index 9ec84ad53f..0e2e930779 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,42 +3,46 @@ # It doesn't operate on master branch. New branches are always built from scratch. # full-build always rebuilds from scratch, without any cache. Only for changes in master branch. -version: 2 +version: 2.1 -jobs: - quick-build: - docker: - - image: circleci/python:3.6 - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.6" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - JOBLIB_VERSION: "*" - MATPLOTLIB_VERSION: "*" +commands: + preinstall: + description: "Cleans up unused packages; Updates system packages" + steps: + - run: + name: Remove conflicting packages + command: | + # Get rid of existing virtualenvs on circle ci as they conflict with conda. + # Trick found here: + # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 + cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs + + # We need to remove conflicting texlive packages. + sudo -E apt-get -yq remove texlive-binaries --purge + - run: + name: Install packages for make -C doc check + command: | + # Installing required packages for `make -C doc check command` to work. + sudo -E apt-get -yq update + sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra + + restore_from_cache: + description: "Restores the cache of previously built docs & python packages if present" steps: - - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - run: - name: Today & Week # Saving today's date and current week of the year in files to generate daily & weekly new cache key respectively. + name: Generate cache keys from today's date for built docs & week number for python packages command: | date +%F > today date +%U > week_num - restore_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} + key: v1-packages+datasets-{{ checksum "week_num" }}-{{ checksum ".circleci/packages-cache-timestamp" }} - restore_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} + key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/docs-cache-timestamp" }} + cache_aware_conda_setup: + description: "Downloads & installs conda if not restord by cache" + steps: - run: name: Download & install conda if absent command: | @@ -64,91 +68,108 @@ jobs: else conda create -n testenv -yq fi + + cache_ignorant_conda_setup: + description: "Downloads & installs only the fresh copy of conda." + steps: + - run: + name: setup conda afresh + command: | + wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh + chmod +x ~/miniconda.sh && ~/miniconda.sh -b + echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV + + install_dependencies: + description: "Installs the necessary Python packages" + steps: - run: name: Install packages in conda env command: | - conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ + conda install -n testenv python=3.7 numpy scipy scikit-learn matplotlib pandas \ lxml mkl sphinx numpydoc pillow pandas -yq conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq + + build_docs: + description: "Installs Nilearn & builds documentation using Sphinx's make html-strict" + steps: - run: - name: Running CircleCI test (make html) + name: Building documentation command: | source activate testenv pip install -e . set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt no_output_timeout: 7h + + store_results: + description: "Stores build times and artifacts" + steps: - store_test_results: path: doc/_build/test-results - store_artifacts: path: doc/_build/test-results + - store_artifacts: + path: doc/_build/html + - store_artifacts: + path: coverage + - store_artifacts: + path: doc/log.txt + save_to_cache: + description: "Caches the downloaded packages & buit docs." + steps: - save_cache: key: v1-packages+datasets-{{ checksum "week_num" }} paths: - ../nilearn_data - ../miniconda3 - save_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} + key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/docs-cache-timestamp" }} paths: - doc - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt +jobs: - full-build: + quick-build: docker: - - image: circleci/python:3.6 + - image: circleci/python:3.7 environment: DISTRIB: "conda" - PYTHON_VERSION: "3.6" + PYTHON_VERSION: "3.7" NUMPY_VERSION: "*" SCIPY_VERSION: "*" SCIKIT_LEARN_VERSION: "*" + JOBLIB_VERSION: "*" MATPLOTLIB_VERSION: "*" steps: - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b - - run: echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Install packages in conda env - command: | - conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq - - run: - name: Running CircleCI test (make html) - command: | - source activate testenv - pip install -e . - set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt - no_output_timeout: 7h - - store_test_results: - path: doc/_build/test-results - - store_artifacts: - path: doc/_build/test-results + - preinstall + - restore_from_cache + - cache_aware_conda_setup + - install_dependencies + - build_docs + - store_results + - save_to_cache - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt + full-build: + docker: + - image: circleci/python:3.7 + environment: + DISTRIB: "conda" + PYTHON_VERSION: "3.7" + NUMPY_VERSION: "*" + SCIPY_VERSION: "*" + SCIKIT_LEARN_VERSION: "*" + MATPLOTLIB_VERSION: "*" + + steps: + - checkout + - preinstall + - cache_ignorant_conda_setup + - install_dependencies + - build_docs + - store_results workflows: diff --git a/.circleci/docs-cache-timestamp b/.circleci/docs-cache-timestamp new file mode 100644 index 0000000000..972236cedd --- /dev/null +++ b/.circleci/docs-cache-timestamp @@ -0,0 +1 @@ +2019-11-07 13:17:41.900352 \ No newline at end of file diff --git a/.circleci/manual-cache-timestamp b/.circleci/manual-cache-timestamp deleted file mode 100644 index e3790b2eeb..0000000000 --- a/.circleci/manual-cache-timestamp +++ /dev/null @@ -1 +0,0 @@ -2019-04-19 15:05:58.522064 \ No newline at end of file diff --git a/.circleci/packages-cache-timestamp b/.circleci/packages-cache-timestamp new file mode 100644 index 0000000000..ed02d172e6 --- /dev/null +++ b/.circleci/packages-cache-timestamp @@ -0,0 +1 @@ +2019-11-07 13:17:41.900617 \ No newline at end of file From 8a1b4e6910692b4976444e2e2262b6c7c087c5c3 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Fri, 8 Nov 2019 13:21:07 +0100 Subject: [PATCH 14/25] Conda environment is created for full-builds --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0e2e930779..74af9b7ada 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -78,6 +78,7 @@ commands: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh chmod +x ~/miniconda.sh && ~/miniconda.sh -b echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV + conda create -n testenv install_dependencies: description: "Installs the necessary Python packages" From 4c01e37ab53f875f7e4f21de99fd532931850252 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Fri, 8 Nov 2019 13:31:38 +0100 Subject: [PATCH 15/25] New conda env is created once conda path has been activated --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 74af9b7ada..094db28481 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -78,7 +78,7 @@ commands: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh chmod +x ~/miniconda.sh && ~/miniconda.sh -b echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - conda create -n testenv + - run: conda create -n testenv install_dependencies: description: "Installs the necessary Python packages" From 3c117b1358f22a2abf565d91d55862924e7eb212 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Fri, 8 Nov 2019 16:03:29 +0100 Subject: [PATCH 16/25] Verbose doc building to ease tracking of progress & diagnose stalls (#2203) --- doc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/Makefile b/doc/Makefile index f5e4288c71..f62e78c9c6 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -2,7 +2,7 @@ # # You can set these variables from the command line. -SPHINXOPTS = +SPHINXOPTS = -v SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build From 019e37274e1c9c0eb99ee037df4769d260be0e69 Mon Sep 17 00:00:00 2001 From: Jake Vogel Date: Sat, 9 Nov 2019 14:52:17 -0500 Subject: [PATCH 17/25] Modify fetch_development_fmri to fetch adults or children (#2035) * put adult_ids before child_ids * added adults_or_children argument to fetch_dev * fixed new argument, implemented test * replaced clunky if block with one-liner * fixed n=1 issue * Update nilearn/datasets/func.py Co-Authored-By: illdopejake * made lines 2113-4 PEP8 compatible * made lines 2113-4 shorter * typo on l 2032 * removed extra line break * fixed the adult-child sorting issue in a more sustainable way * fix: return child if requested with n=1, otherwise adult * dealt with edge case n=2, added info about ratio to doc * Fix flake 8 errors * Fix flake 8 errors -2 * sty: rename adults_or_children to age_group, add test * Added resume parameter into inner function call (inadvertently missed) * Refactored the function for clarity & testing ease * Added tests to check warning and exception * Used pytest.warns for brevity, fixed failing test * Placate flake8 * Refactored function to ease writing unit test (faster) * Replaced integration with unit test for faster testing * Skipped a flake 8 warning which is unavoidable & frequent --- nilearn/datasets/func.py | 146 ++++++++++++++++++++-------- nilearn/datasets/tests/test_func.py | 48 +++++++++ setup.cfg | 3 +- 3 files changed, 157 insertions(+), 40 deletions(-) diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 390b149c98..d3e0307130 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -1910,7 +1910,8 @@ def _fetch_development_fmri_participants(data_dir, url, verbose): return participants -def _fetch_development_fmri_functional(participants, data_dir, url, verbose): +def _fetch_development_fmri_functional(participants, data_dir, url, resume, + verbose): """Helper function to fetch_development_fmri. This function helps in downloading functional MRI data in Nifti @@ -1932,6 +1933,9 @@ def _fetch_development_fmri_functional(participants, data_dir, url, verbose): Override download URL. Used for test only (or if you setup a mirror of the data). Default: None + resume: bool, optional (default True) + Whether to resume download of a partly-downloaded file. + verbose: int Defines the level of verbosity of the output. @@ -1981,13 +1985,15 @@ def _fetch_development_fmri_functional(participants, data_dir, url, verbose): func_url = url.format(this_osf_id['key_b'][0]) func_file = [(func.format(participant_id, participant_id), func_url, {'move': func.format(participant_id)})] - path_to_func = _fetch_files(data_dir, func_file, verbose=verbose)[0] + path_to_func = _fetch_files(data_dir, func_file, resume=resume, + verbose=verbose)[0] funcs.append(path_to_func) return funcs, regressors def fetch_development_fmri(n_subjects=None, reduce_confounds=True, - data_dir=None, resume=True, verbose=1): + data_dir=None, resume=True, verbose=1, + age_group='both'): """Fetch movie watching based brain development dataset (fMRI) The data is downsampled to 4mm resolution for convenience. The origin of @@ -2019,6 +2025,12 @@ def fetch_development_fmri(n_subjects=None, reduce_confounds=True, verbose: int, optional (default 1) Defines the level of verbosity of the output. + age_group: str, optional (default 'both') + Which age group to fetch + - 'adults' = fetch adults only (n=33, ages 18-39) + - 'child' = fetch children only (n=122, ages 3-12) + - 'both' = fetch full sample (n=155) + Returns ------- data: Bunch @@ -2044,6 +2056,10 @@ def fetch_development_fmri(n_subjects=None, reduce_confounds=True, Preprocessing details: https://osf.io/wjtyq/ + Note that if n_subjects > 2, and age_group is 'both', + fetcher will return a ratio of children and adults representative + of the total sample. + References ---------- Please cite this paper if you are using this dataset. @@ -2070,52 +2086,104 @@ def fetch_development_fmri(n_subjects=None, reduce_confounds=True, url=None, verbose=verbose) - max_subjects = len(participants) - if n_subjects is None: - n_subjects = max_subjects - - if (isinstance(n_subjects, numbers.Number) and - ((n_subjects > max_subjects) or (n_subjects < 1))): - warnings.warn("Wrong value for n_subjects={0}. The maximum " - "value will be used instead n_subjects={1}" - .format(n_subjects, max_subjects)) - n_subjects = max_subjects + adult_count, child_count = _filter_func_regressors_by_participants( + participants, age_group) # noqa: E126 + max_subjects = adult_count + child_count - # Download functional and regressors based on participants - child_count = participants['Child_Adult'].tolist().count('child') - adult_count = participants['Child_Adult'].tolist().count('adult') + n_subjects = _set_invalid_n_subjects_to_max(n_subjects, + max_subjects, + age_group) # To keep the proportion of children versus adults - n_child = np.round(float(n_subjects) / max_subjects * child_count).astype(int) - n_adult = np.round(float(n_subjects) / max_subjects * adult_count).astype(int) + percent_total = float(n_subjects) / max_subjects + n_child = np.round(percent_total * child_count).astype(int) + n_adult = np.round(percent_total * adult_count).astype(int) - # First, restrict the csv files to the adequate number of subjects - child_ids = participants[participants['Child_Adult'] == - 'child']['participant_id'][:n_child] - adult_ids = participants[participants['Child_Adult'] == - 'adult']['participant_id'][:n_adult] - ids = np.hstack([child_ids, adult_ids]) - participants = participants[np.in1d(participants['participant_id'], - ids)] + # We want to return adults by default (i.e., `age_group=both`) or + # if explicitly requested. + if (age_group != 'child') and (n_subjects == 1): + n_adult, n_child = 1, 0 + + if (age_group == 'both') and (n_subjects == 2): + n_adult, n_child = 1, 1 + + participants = _filter_csv_by_n_subjects(participants, n_adult, n_child) funcs, regressors = _fetch_development_fmri_functional(participants, data_dir=data_dir, url=None, + resume=resume, verbose=verbose) if reduce_confounds: - reduced_regressors = [] - for in_file in regressors: - out_file = in_file.replace('desc-confounds', - 'desc-reducedConfounds') - if not os.path.isfile(out_file): - confounds = np.recfromcsv(in_file, delimiter='\t') - selected_confounds = confounds[keep_confounds] - header = '\t'.join(selected_confounds.dtype.names) - np.savetxt(out_file, np.array(selected_confounds.tolist()), - header=header, delimiter='\t', comments='') - reduced_regressors.append(out_file) - regressors = reduced_regressors - + regressors = _reduce_confounds(regressors, keep_confounds) return Bunch(func=funcs, confounds=regressors, phenotypic=participants, description=fdescr) + + +def _filter_func_regressors_by_participants(participants, age_group): + """ Filter functional and regressors based on participants + """ + valid_age_groups = ('both', 'child', 'adult') + if age_group not in valid_age_groups: + raise ValueError("Wrong value for age_group={0}. " + "Valid arguments are: {1}".format(age_group, + valid_age_groups) + ) + + child_adult = participants['Child_Adult'].tolist() + + if age_group != 'adult': + child_count = child_adult.count('child') + else: + child_count = 0 + + if age_group != 'child': + adult_count = child_adult.count('adult') + else: + adult_count = 0 + return adult_count, child_count + + +def _filter_csv_by_n_subjects(participants, n_adult, n_child): + """Restrict the csv files to the adequate number of subjects + """ + child_ids = participants[participants['Child_Adult'] == + 'child']['participant_id'][:n_child] + adult_ids = participants[participants['Child_Adult'] == + 'adult']['participant_id'][:n_adult] + ids = np.hstack([adult_ids, child_ids]) + participants = participants[np.in1d(participants['participant_id'], ids)] + participants = participants[np.argsort(participants, order='Child_Adult')] + return participants + + +def _set_invalid_n_subjects_to_max(n_subjects, max_subjects, age_group): + """ If n_subjects is invalid, sets it to max. + """ + if n_subjects is None: + n_subjects = max_subjects + + if (isinstance(n_subjects, numbers.Number) and + ((n_subjects > max_subjects) or (n_subjects < 1))): + warnings.warn("Wrong value for n_subjects={0}. The maximum " + "value (for age_group={1}) will be used instead: " + "n_subjects={2}" + .format(n_subjects, age_group, max_subjects)) + n_subjects = max_subjects + return n_subjects + + +def _reduce_confounds(regressors, keep_confounds): + reduced_regressors = [] + for in_file in regressors: + out_file = in_file.replace('desc-confounds', + 'desc-reducedConfounds') + if not os.path.isfile(out_file): + confounds = np.recfromcsv(in_file, delimiter='\t') + selected_confounds = confounds[keep_confounds] + header = '\t'.join(selected_confounds.dtype.names) + np.savetxt(out_file, np.array(selected_confounds.tolist()), + header=header, delimiter='\t', comments='') + reduced_regressors.append(out_file) + return reduced_regressors diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 8f7de18ea4..aaf0e9a24c 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -6,10 +6,13 @@ import os import uuid + import numpy as np import json import nibabel import gzip + +import pytest from sklearn.utils import check_random_state from nose import with_setup @@ -609,6 +612,7 @@ def test_fetch_development_fmri_functional(): funcs, confounds = func._fetch_development_fmri_functional(csv, data_dir=tst.tmpdir, url=local_url, + resume=True, verbose=1) assert_equal(len(funcs), 8) assert_equal(len(confounds), 8) @@ -633,3 +637,47 @@ def test_fetch_development_fmri(): verbose=1) confounds = np.recfromcsv(data.confounds[0], delimiter='\t') assert_equal(len(confounds[0]), 28) + + # check first subject is an adult + data = func.fetch_development_fmri(n_subjects=1, reduce_confounds=False, + verbose=1) + age_group = data.phenotypic['Child_Adult'][0] + assert_equal(age_group, 'adult') + + # check first subject is an child if requested with age_group + data = func.fetch_development_fmri(n_subjects=1, reduce_confounds=False, + verbose=1, age_group='child') + age_group = data.phenotypic['Child_Adult'][0] + assert_equal(age_group, 'child') + + # check one of each age group returned if n_subject == 2 + # and age_group == 'both + data = func.fetch_development_fmri(n_subjects=2, reduce_confounds=False, + verbose=1, age_group='both') + age_group = data.phenotypic['Child_Adult'] + assert(all(age_group == ['adult', 'child'])) + + # check age_group + data = func.fetch_development_fmri(n_subjects=2, reduce_confounds=False, + verbose=1, age_group='child') + assert(all([x == 'child' for x in data.phenotypic['Child_Adult']])) + + +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_development_fmri_invalid_n_subjects(): + max_subjects = 155 + n_subjects = func._set_invalid_n_subjects_to_max(n_subjects=-1, + max_subjects=max_subjects, + age_group='adult') + assert n_subjects == max_subjects + with pytest.warns(UserWarning, match='Wrong value for n_subjects='): + func._set_invalid_n_subjects_to_max(n_subjects=-1, + max_subjects=max_subjects, + age_group='adult') + + +@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +def test_fetch_development_fmri_exception(): + with pytest.raises(ValueError, match='Wrong value for age_group'): + func._filter_func_regressors_by_participants(participants='junk', + age_group='junk for test') diff --git a/setup.cfg b/setup.cfg index 0a9f856f10..63789ed5bf 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,7 +7,8 @@ doc-files = doc # For PEP8 error codes see # http://pep8.readthedocs.org/en/latest/intro.html#error-codes # E402: module level import not at top of file -ignore=E402 +# W504: line break after binary operator +ignore=E402, W504 [tool:pytest] doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS From 4e1ccac93753ffb2eced2d7cbb20d91688d21fab Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Sat, 9 Nov 2019 22:31:46 +0100 Subject: [PATCH 18/25] Fixed the redundant & missing test case in merged PR #2035 (#2205) --- nilearn/datasets/tests/test_func.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index aaf0e9a24c..d9ad7e19e5 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -666,7 +666,7 @@ def test_fetch_development_fmri(): @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_development_fmri_invalid_n_subjects(): max_subjects = 155 - n_subjects = func._set_invalid_n_subjects_to_max(n_subjects=-1, + n_subjects = func._set_invalid_n_subjects_to_max(n_subjects=None, max_subjects=max_subjects, age_group='adult') assert n_subjects == max_subjects From a82717bca8a7444187ebcc2c14613c577c858a26 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Sun, 10 Nov 2019 22:40:15 +0100 Subject: [PATCH 19/25] Nilearn 0.6.0b0 release (#2206) --- doc/whats_new.rst | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 8b79860cd1..bec7665473 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,5 +1,5 @@ -0.6.0b -====== +0.6.0b0 +======= .. warning:: @@ -35,6 +35,15 @@ Fixes - :func:`nilearn.plotting.plot_connectome` now correctly displays marker size on 'l' and 'r' orientations, if an array or a list is passed to the function. +Contributors +------------ + +The following people contributed to this release (in alphabetical order):: + + Jake Vogel + Jerome Dockes + Kshitij Chawla (kchawla-pi) + robbisg 0.6.0a0 ======= From 0941897aca6f3073221fcd3483afe91b8305b27c Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Mon, 11 Nov 2019 13:46:38 +0100 Subject: [PATCH 20/25] Rel 060b0 (#2208) * Updated whats_new * Updated whats_new and .mailmap * Updated release month in whats_new --- .mailmap | 1 + doc/whats_new.rst | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 3a978ca81c..089bc31929 100644 --- a/.mailmap +++ b/.mailmap @@ -42,6 +42,7 @@ Michael Waskom Moritz Boos Óscar Nájera Philippe Gervais +Roberto Guidotti Ronald Phlypo Salma Bougacha Vincent Michel diff --git a/doc/whats_new.rst b/doc/whats_new.rst index bec7665473..7c900333fd 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,6 +1,8 @@ 0.6.0b0 ======= +**Released November 2019** + .. warning:: | **Python2 and 3.4 are no longer supported. Pip will raise an error in these environments.** @@ -29,6 +31,10 @@ Changes - :func:`nilearn.datasets.fetch_neurovault` now does not filter out images that have their metadata field `is_valid` cleared by default. +- Users can now specify fetching data for adults, children, or both from + :func:`nilearn.datasets.fetch_development_fmri` . + + Fixes ----- @@ -43,7 +49,7 @@ The following people contributed to this release (in alphabetical order):: Jake Vogel Jerome Dockes Kshitij Chawla (kchawla-pi) - robbisg + Roberto Guidotti 0.6.0a0 ======= From b76551fbb2957106bddb787c4fe8bc5bea06511c Mon Sep 17 00:00:00 2001 From: Derek Pisner <16432683+dPys@users.noreply.github.com> Date: Wed, 20 Nov 2019 21:33:41 -0600 Subject: [PATCH 21/25] [DOC] Add note about decreasing memory usage (#2223) * [DOC] Add note about decreasing memory usage by decompressing nii's #2222 * Update doc/manipulating_images/input_output.rst Co-Authored-By: Elizabeth DuPre --- doc/manipulating_images/input_output.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/manipulating_images/input_output.rst b/doc/manipulating_images/input_output.rst index 5960c47f86..139d021667 100644 --- a/doc/manipulating_images/input_output.rst +++ b/doc/manipulating_images/input_output.rst @@ -234,6 +234,16 @@ data, which we call Niimgs or Niimg-4D. Accepted input arguments are: If you provide a sequence of Nifti images, all of them must have the same affine. +.. topic:: Decreasing memory used when loading Nifti images + + When Nifti images are stored compressed (.nii.gz), loading them directly + consumes more memory. As a result, large 4D images may + raise "MemoryError", especially on smaller computers and when using Nilearn + routines that require intensive 4D matrix operations. One step to improve + the situation may be to decompress the data onto disk as an initial step. + If multiple images are loaded into memory sequentially, another solution may + be to `uncache `_ one before loading and performing operations on another. + Text files: phenotype or behavior ---------------------------------- From a7e22781188aa338b69dfdbcb2f68073ffd67a6b Mon Sep 17 00:00:00 2001 From: Gael Varoquaux Date: Wed, 20 Nov 2019 22:36:59 -0500 Subject: [PATCH 22/25] DOC: title in bold --- doc/manipulating_images/input_output.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manipulating_images/input_output.rst b/doc/manipulating_images/input_output.rst index 139d021667..fd4cb36dd7 100644 --- a/doc/manipulating_images/input_output.rst +++ b/doc/manipulating_images/input_output.rst @@ -234,7 +234,7 @@ data, which we call Niimgs or Niimg-4D. Accepted input arguments are: If you provide a sequence of Nifti images, all of them must have the same affine. -.. topic:: Decreasing memory used when loading Nifti images +.. topic:: **Decreasing memory used when loading Nifti images** When Nifti images are stored compressed (.nii.gz), loading them directly consumes more memory. As a result, large 4D images may From 39c798e0310999e02341452ada6387958c04f72d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 21 Nov 2019 04:18:39 -0500 Subject: [PATCH 23/25] MAINT: sklearn Deprecations (#2219) * MAINT: sklearn Deprecations * FIX: Syntax --- nilearn/_utils/testing.py | 2 +- nilearn/datasets/atlas.py | 2 +- nilearn/datasets/func.py | 3 +-- nilearn/datasets/neurovault.py | 2 +- nilearn/datasets/struct.py | 2 +- nilearn/datasets/tests/test_neurovault.py | 2 +- 6 files changed, 6 insertions(+), 7 deletions(-) diff --git a/nilearn/_utils/testing.py b/nilearn/_utils/testing.py index b700500429..11afa36134 100644 --- a/nilearn/_utils/testing.py +++ b/nilearn/_utils/testing.py @@ -11,7 +11,7 @@ import gc import numpy as np -from sklearn.utils.testing import assert_warns +from numpy.testing import assert_warns # noqa: F401 from .compat import _basestring, _urllib from ..datasets.utils import _fetch_files diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 601f09920e..fc2e3bc7a2 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -11,7 +11,7 @@ import nibabel as nb import numpy as np from numpy.lib import recfunctions -from sklearn.datasets.base import Bunch +from sklearn.utils import Bunch from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr from .._utils import check_niimg diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index d3e0307130..dccfaaa492 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -9,8 +9,7 @@ import numbers import nibabel -from sklearn.datasets.base import Bunch -from sklearn.utils import deprecated +from sklearn.utils import Bunch, deprecated from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns, _fetch_file) diff --git a/nilearn/datasets/neurovault.py b/nilearn/datasets/neurovault.py index f54a7aae71..c088dc209f 100644 --- a/nilearn/datasets/neurovault.py +++ b/nilearn/datasets/neurovault.py @@ -27,7 +27,7 @@ from collections import Container import numpy as np -from sklearn.datasets.base import Bunch +from sklearn.utils import Bunch from sklearn.feature_extraction import DictVectorizer from .._utils.compat import _basestring diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index 18b443e073..a6b43cac7c 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -5,7 +5,7 @@ import os import numpy as np from scipy import ndimage -from sklearn.datasets.base import Bunch +from sklearn.utils import Bunch from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _uncompress_file) diff --git a/nilearn/datasets/tests/test_neurovault.py b/nilearn/datasets/tests/test_neurovault.py index df62b7cc07..802b01f7e5 100644 --- a/nilearn/datasets/tests/test_neurovault.py +++ b/nilearn/datasets/tests/test_neurovault.py @@ -21,7 +21,7 @@ import numpy as np from nose import SkipTest from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from sklearn.utils.testing import assert_warns +from nilearn._utils.testing import assert_warns from nilearn.datasets import neurovault From 9461deed81b08273d289e222a1a3c8d6c2a29c0d Mon Sep 17 00:00:00 2001 From: jeromedockes Date: Thu, 21 Nov 2019 10:37:58 +0100 Subject: [PATCH 24/25] Fix uniform ball cloud test for sklearn >= 0.22 (#2175) * fix uniform ball cloud test for sklearn >= 0.22 * add comment * remove comparison of computed and shipped point clouds * add comment --- nilearn/surface/tests/test_surface.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/nilearn/surface/tests/test_surface.py b/nilearn/surface/tests/test_surface.py index a57032c004..f85c8f166f 100644 --- a/nilearn/surface/tests/test_surface.py +++ b/nilearn/surface/tests/test_surface.py @@ -374,6 +374,11 @@ def test_vertex_outer_normals(): def test_load_uniform_ball_cloud(): + # Note: computed and shipped point clouds may differ since KMeans results + # change after + # https://github.com/scikit-learn/scikit-learn/pull/9288 + # but the exact position of the points does not matter as long as they are + # well spread inside the unit ball for n_points in [10, 20, 40, 80, 160]: with warnings.catch_warnings(record=True) as w: points = surface._load_uniform_ball_cloud(n_points=n_points) @@ -381,10 +386,12 @@ def test_load_uniform_ball_cloud(): assert_equal(len(w), 0) assert_warns(surface.EfficiencyWarning, surface._load_uniform_ball_cloud, n_points=3) - for n_points in [3, 10, 20]: + for n_points in [3, 7]: computed = surface._uniform_ball_cloud(n_points) loaded = surface._load_uniform_ball_cloud(n_points) assert_array_almost_equal(computed, loaded) + assert (np.std(computed, axis=0) > .1).all() + assert (np.linalg.norm(computed, axis=1) <= 1).all() def test_sample_locations(): From bbc750aba1f0337a6a2299ae064bca647e66f415 Mon Sep 17 00:00:00 2001 From: Kshitij Chawla Date: Thu, 21 Nov 2019 10:38:54 +0100 Subject: [PATCH 25/25] Add testing for Nilearn setup & install & Fix the broken dependency installation (#2201) * Added job in Azure Pipelines to build & install Nilearn wheel * Fix inadvertant pip install with the intended pip list * Wheel install command is Nilearn version agnostic to work with future versions [skip ci] * Ensure all required packages are listed for `pip install` * Eanble error mode in shell * Fixed Could not find a version that satisfies the requirement sklearn>=0.19 (Replaced name sklearn with proper name scikit-learn.) * Removed deactivated code * Corrected typo in package name * Restored sklearn as module name in conf.py to fix import failure * Restored sklearn as module name in version.py to fix import failure * scikit-learn is the package name passed on to pip * Dummy sklearn added to required modules list * Flake8 fix * Tinkering with sklearn version for successful Appveyor install * Added installation testing on Windows as well - Changed next version to 0.6.0b0 as setuptools warns & normalizes to that. - Tweaked Azure job names for clarity. * Fixed spaces in Azure Pipelines job names * Fixed non-alphanumeric characters in Azure Pipelines job names * Removed code & package name redundancy * Restored previous code, (sklearn not scikit-learn is essential in versions.py) * Placate Fake8 * Added installation testing for MacOS - Renamed Azure Pipelines jobs for clarity. * Added test for checking the required packages are installed * `\` path separator for windows installation test; sorted jobs alphabetically * Setting Error mode for Windows Powershell * Corrected incorrect commands and path specs * Corrected installation of Nilearn wheel in Powershell * Replaced Powershell commands with cmd commands (the correct shell for AZP) * Corrected command to execute pip install on discovered file * Proactively installed sklearn to fix not found error when installing nilearn * Installation tests are only done for Py3.6, seems unnecessary for more * Expected failure tests (older python); windows install test during code test * Added expected to fail tests for older python versions * Segregated steps for easier human parsing * Removed py34 (not supported by AZP), Py27 test failure now expected in test --- appveyor.yml | 2 +- azure-pipelines.yml | 149 +++++++++++++++++- .../show-python-packages-versions.py | 3 +- nilearn/tests/test_package_installation.py | 17 ++ nilearn/version.py | 2 +- setup.py | 21 ++- 6 files changed, 184 insertions(+), 10 deletions(-) create mode 100644 nilearn/tests/test_package_installation.py diff --git a/appveyor.yml b/appveyor.yml index ed77a89874..8cc7846210 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -27,7 +27,7 @@ install: - "conda install pip numpy scipy scikit-learn joblib nose pytest wheel matplotlib -y -q" # Install other nilearn dependencies - - "pip install nibabel coverage nose-timer pytest-cov" + - "pip install nibabel coverage nose-timer pytest-cov sklearn" - "python setup.py bdist_wheel" - ps: "ls dist" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d6d5180a78..92a353925d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -8,7 +8,8 @@ trigger: jobs: -- job: 'TestWindows' + +- job: 'run_tests_on_windows' pool: vmImage: 'vs2017-win2016' strategy: @@ -44,3 +45,149 @@ jobs: testResultsFiles: '**/test-results.xml' testRunTitle: 'Python $(python.version)' condition: succeededOrFailed() + + +- job: 'test_installing_on_linux' + pool: + vmImage: 'ubuntu-18.04' + strategy: + matrix: + Python36: + python.version: '3.6' + maxParallel: 4 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python.version)' + architecture: 'x64' + + - script: | + set -e + python -m pip install --upgrade pip + pip install --prefer-binary setuptools wheel + displayName: 'Install dependencies for building Nilearn' + + - script: | + set -e + python setup.py sdist bdist_wheel + displayName: 'Build Nilearn' + + - script: | + set -e + pip install dist/nilearn-*-py3-none-any.whl + python -c "from nilearn.tests.test_package_installation import test_required_package_installation as test_pkg; test_pkg()" + displayName: 'Install Nilearn from wheel & check required package installation' + + - script: | + set -e + python -c "from nilearn.tests.test_package_installation import test_required_package_installation as test_pkg; test_pkg()" + displayName: 'Test required package installation' + + +- job: 'test_installing_on_macos' + pool: + vmImage: 'macOS-10.13' + strategy: + matrix: + Python36: + python.version: '3.6' + maxParallel: 4 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python.version)' + architecture: 'x64' + + - script: | + set -e + python -m pip install --upgrade pip + pip install --prefer-binary setuptools wheel + displayName: 'Install dependencies for building Nilearn' + + - script: | + set -e + python setup.py sdist bdist_wheel + displayName: 'Build Nilearn' + + - script: | + set -e + pip install dist/nilearn-*-py3-none-any.whl + python -c "from nilearn.tests.test_package_installation import test_required_package_installation as test_pkg; test_pkg()" + displayName: 'Install Nilearn from wheel & check required package installation' + + - script: | + set -e + python -c "from nilearn.tests.test_package_installation import test_required_package_installation as test_pkg; test_pkg()" + displayName: 'Test required package installation' + + +- job: 'test_installing_on_windows' + pool: + vmImage: 'vs2017-win2016' + strategy: + matrix: + Python36: + python.version: '3.6' + maxParallel: 4 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python.version)' + architecture: 'x64' + + - script: | + :; set -o errexit + python -m pip install --upgrade pip + pip install --prefer-binary setuptools wheel + python setup.py sdist bdist_wheel + displayName: 'Install dependencies for building Nilearn' + + - script: | + :; set -o errexit + python setup.py sdist bdist_wheel + displayName: 'Build Nilearn' + + - script: | + :; set -o errexit + forfiles /p dist /m nilearn-*-py3-none-any.whl /c "cmd /c pip install @file" + displayName: 'Install binary distribution' + + - script: | + :; set -o errexit + python -c "from nilearn.tests.test_package_installation import test_required_package_installation as test_pkg; test_pkg()" + displayName: 'Test that required package are installed' + + +- job: 'test_expeceted_to_fail_installation_on_linux' + pool: + vmImage: 'ubuntu-18.04' + strategy: + matrix: + Python27: + python.version: '2.7' + maxParallel: 4 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python.version)' + architecture: 'x64' + + - script: | + set -e + python -m pip install --upgrade pip + pip install --prefer-binary setuptools wheel + displayName: 'Install dependencies for building Nilearn' + + - script: | + set -e + python setup.py sdist bdist_wheel + displayName: 'Build Nilearn' + + - script: | + set -e + pip install dist/nilearn-*-py3-none-any.whl 2>&1 | grep "^ERROR" + displayName: 'Attempt to install Nilearn from wheel (expected to fail)' diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py index 3c2f3319a3..d28ea5fc19 100644 --- a/continuous_integration/show-python-packages-versions.py +++ b/continuous_integration/show-python-packages-versions.py @@ -1,6 +1,7 @@ import sys -DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'joblib', 'matplotlib', 'nibabel'] +DEPENDENCIES = ['numpy', 'scipy', 'scikit-learn', 'joblib', 'matplotlib', + 'nibabel'] def print_package_version(package_name, indent=' '): diff --git a/nilearn/tests/test_package_installation.py b/nilearn/tests/test_package_installation.py new file mode 100644 index 0000000000..3b436ffd4e --- /dev/null +++ b/nilearn/tests/test_package_installation.py @@ -0,0 +1,17 @@ +from distutils.version import LooseVersion + +from nilearn.version import REQUIRED_MODULE_METADATA + + +def test_required_package_installation(): + for package_specs in REQUIRED_MODULE_METADATA: + package = package_specs[0] + min_version = package_specs[1]['min_version'] + imported_package = __import__(package) + installed_version = imported_package.__version__ + assert LooseVersion(installed_version) >= LooseVersion(min_version) + print(package, 'min:', min_version, 'installed:', installed_version) + + +if __name__ == '__main__': + test_required_package_installation() diff --git a/nilearn/version.py b/nilearn/version.py index 1ab95ed14e..9e096c145b 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.6.0b' +__version__ = '0.6.0b0' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') diff --git a/setup.py b/setup.py index c4bc793612..0731749d76 100755 --- a/setup.py +++ b/setup.py @@ -30,6 +30,20 @@ def is_installing(): return install_commands.intersection(set(sys.argv)) +def list_required_packages(): + required_packages = [] + required_packages_orig = ['%s>=%s' % (mod, meta['min_version']) + for mod, meta + in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA'] + ] + for package in required_packages_orig: + if package.startswith('sklearn'): + package = package.replace('sklearn', 'scikit-learn') + required_packages.append(package) + required_packages.append('sklearn') + return required_packages + + # Make sources available using relative paths from this file's directory. os.chdir(os.path.dirname(os.path.abspath(__file__))) @@ -51,11 +65,6 @@ def is_installing(): module_check_fn = _VERSION_GLOBALS['_check_module_dependencies'] module_check_fn(is_nilearn_installing=True) - install_requires = \ - ['%s>=%s' % (mod, meta['min_version']) - for mod, meta in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA'] - if not meta['required_at_installation']] - setup(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, @@ -96,6 +105,6 @@ def is_installing(): 'nilearn.datasets.tests.data': ['*.*'], 'nilearn.datasets.description': ['*.rst'], 'nilearn.reporting.data.html': ['*.html']}, - install_requires=install_requires, + install_requires=list_required_packages(), python_requires='>=3.5', )