Skip to content

Commit

Permalink
Merge branch 'master' of github.com:nilearn/nilearn into int-niftis
Browse files Browse the repository at this point in the history
* 'master' of github.com:nilearn/nilearn:
  Installation should fail on Python < 3.5 (nilearn#2198)
  [MRG] Add get data function (nilearn#2172)
  • Loading branch information
kchawla-pi committed Nov 4, 2019
2 parents fb88423 + 1a821a2 commit 392a1a9
Show file tree
Hide file tree
Showing 57 changed files with 489 additions and 343 deletions.
4 changes: 2 additions & 2 deletions doc/manipulating_images/input_output.rst
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ Analyze files) is the standard way of sharing data in neuroimaging
research. Three main components are:

:data:
raw scans in form of a numpy array: ``data = img.get_data()``
raw scans in form of a numpy array: ``data = nilearn.image.get_data(img)``
:affine:
returns the transformation matrix that maps
from voxel indices of the numpy array to actual real-world
Expand Down Expand Up @@ -218,7 +218,7 @@ objects":
**Niimg:** A Niimg-like object can be one of the following:

* A string with a file path to a Nifti or Analyse image
* An ``SpatialImage`` from nibabel, ie an object exposing ``get_data()``
* An ``SpatialImage`` from nibabel, ie an object exposing ``get_fdata()``
method and ``affine`` attribute, typically a ``Nifti1Image`` from nibabel_.

**Niimg-4D:** Similarly, some functions require 4D Nifti-like
Expand Down
1 change: 1 addition & 0 deletions doc/modules/reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ uses.
coord_transform
copy_img
crop_img
get_data
high_variance_confounds
index_img
iter_img
Expand Down
22 changes: 22 additions & 0 deletions doc/whats_new.rst
Original file line number Diff line number Diff line change
@@ -1,9 +1,31 @@
0.6.0b
======

.. warning::

| **Python2 and 3.4 are no longer supported. Pip will raise an error in these environments.**
| **Minimum supported version of Python is now 3.5 .**
| **We recommend upgrading to Python 3.6 .**

NEW
---

- A new function :func:`nilearn.image.get_data` to replace the deprecated
nibabel method `Nifti1Image.get_data`. Now use `nilearn.image.get_data(img)`
rather than `img.get_data()`. This is because Nibabel is removing the
`get_data` method. You may also consider using the Nibabel
`Nifti1Image.get_fdata`, which returns the data cast to floating-point.
See https://github.com/nipy/nibabel/wiki/BIAP8 .
As a benefit, the `get_data` function works on niimg-like objects such as
filenames (see http://nilearn.github.io/manipulating_images/input_output.html ).

Changes
-------

- All functions and examples now use `nilearn.image.get_data` rather than the
deprecated method `nibabel.Nifti1Image.get_data`.

- :func:`nilearn.datasets.fetch_neurovault` now does not filter out images that
have their metadata field `is_valid` cleared by default.

Expand Down
3 changes: 2 additions & 1 deletion examples/02_decoding/plot_haxby_different_estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

# Fetch data using nilearn dataset fetcher
from nilearn import datasets
from nilearn.image import get_data
# by default 2nd subject data will be fetched
haxby_dataset = datasets.fetch_haxby()

Expand Down Expand Up @@ -193,7 +194,7 @@
else:
continue
weight_img = masker.inverse_transform(weights)
weight_map = weight_img.get_data()
weight_map = get_data(weight_img)
threshold = np.max(np.abs(weight_map)) * 1e-3
plot_stat_map(weight_img, bg_img=mean_epi_img,
display_mode='z', cut_coords=[-15],
Expand Down
6 changes: 3 additions & 3 deletions examples/02_decoding/plot_haxby_searchlight.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# -------------------
import pandas as pd
from nilearn import datasets
from nilearn.image import new_img_like, load_img
from nilearn.image import new_img_like, load_img, get_data

# We fetch 2nd subject from haxby datasets (which is default)
haxby_dataset = datasets.fetch_haxby()
Expand Down Expand Up @@ -49,7 +49,7 @@
mask_img = load_img(haxby_dataset.mask)

# .astype() makes a copy.
process_mask = mask_img.get_data().astype(np.int)
process_mask = get_data(mask_img).astype(np.int)
picked_slice = 29
process_mask[..., (picked_slice + 1):] = 0
process_mask[..., :picked_slice] = 0
Expand Down Expand Up @@ -96,7 +96,7 @@
f_values, p_values = f_classif(fmri_masked, y)
p_values = -np.log10(p_values)
p_values[p_values > 10] = 10
p_unmasked = nifti_masker.inverse_transform(p_values).get_data()
p_unmasked = get_data(nifti_masker.inverse_transform(p_values))

#########################################################################
# Visualization
Expand Down
3 changes: 2 additions & 1 deletion examples/02_decoding/plot_oasis_vbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.image import get_data

n_subjects = 100 # more subjects requires more memory

Expand Down Expand Up @@ -165,7 +166,7 @@
'\n(Non-parametric + max-type correction)')
display.title(title, y=1.2)

n_detections = (signed_neg_log_pvals_unmasked.get_data() > threshold).sum()
n_detections = (get_data(signed_neg_log_pvals_unmasked) > threshold).sum()
print('\n%d detections' % n_detections)

show()
7 changes: 4 additions & 3 deletions examples/03_connectivity/plot_data_driven_parcellations.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,15 +121,16 @@

# Grab number of voxels from attribute mask image (mask_img_).
import numpy as np
original_voxels = np.sum(ward.mask_img_.get_data())
from nilearn.image import get_data
original_voxels = np.sum(get_data(ward.mask_img_))

# Compute mean over time on the functional image to use the mean
# image for compressed representation comparisons
mean_func_img = mean_img(dataset.func[0])

# Compute common vmin and vmax
vmin = np.min(mean_func_img.get_data())
vmax = np.max(mean_func_img.get_data())
vmin = np.min(get_data(mean_func_img))
vmax = np.max(get_data(mean_func_img))

plotting.plot_epi(mean_func_img, cut_coords=cut_coords,
title='Original (%i voxels)' % original_voxels,
Expand Down
8 changes: 5 additions & 3 deletions examples/04_manipulating_images/plot_affine_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@

# Create the data with numpy
import numpy as np
from nilearn.image import get_data

grid = np.mgrid[0:192, 0:128]
circle = np.sum(
(grid - np.array([32, 32])[:, np.newaxis, np.newaxis]) ** 2,
Expand Down Expand Up @@ -111,17 +113,17 @@
plt.title("The original data in voxel space")

plt.figure()
plt.imshow(img_in_mm_space.get_data()[:, :, 0], vmin=0, vmax=vmax)
plt.imshow(get_data(img_in_mm_space)[:, :, 0], vmin=0, vmax=vmax)
plt.title("The original data in mm space")

plt.figure()
plt.imshow(img_3d_affine_in_mm_space.get_data()[:, :, 0],
plt.imshow(get_data(img_3d_affine_in_mm_space)[:, :, 0],
vmin=0, vmax=vmax)
plt.title("Transformed using a 3x3 affine -\n leads to "
"re-estimation of bounding box")

plt.figure()
plt.imshow(img_4d_affine_in_mm_space.get_data()[:, :, 0],
plt.imshow(get_data(img_4d_affine_in_mm_space)[:, :, 0],
vmin=0, vmax=vmax)
plt.title("Transformed using a 4x4 affine -\n Uses affine anchor "
"and estimates bounding box size")
Expand Down
5 changes: 3 additions & 2 deletions examples/04_manipulating_images/plot_roi_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,9 @@
# setting, machine-learning algorithms can perform poorly due to the so-called
# curse of dimensionality. However, simple means from the realms of classical
# statistics can help reducing the number of voxels.
from nilearn.image import get_data

fmri_data = fmri_img.get_data()
fmri_data = get_data(fmri_img)
# number of voxels being x*y*z, samples in 4th dimension
print(fmri_data.shape)

Expand Down Expand Up @@ -211,7 +212,7 @@
# numbers to boolean type
from nilearn.image import load_img

vt = load_img(mask_vt_filename).get_data().astype(bool)
vt = get_data(load_img(mask_vt_filename)).astype(bool)

# We can then use a logical "and" operation - numpy.logical_and - to keep only
# voxels that have been selected in both masks. In neuroimaging jargon, this
Expand Down
4 changes: 3 additions & 1 deletion examples/05_advanced/plot_haxby_mass_univariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@

# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
from nilearn.image import get_data

mean_fmri_img = image.mean_img(func_filename)

threshold = -np.log10(0.1) # 10% corrected
Expand All @@ -135,7 +137,7 @@
display_mode='z', cut_coords=[-1, ],
vmax=vmax)

neg_log_pvals_bonferroni_data = neg_log_pvals_bonferroni_unmasked.get_data()
neg_log_pvals_bonferroni_data = get_data(neg_log_pvals_bonferroni_unmasked)
n_detections = (neg_log_pvals_bonferroni_data > threshold).sum()
title = ('Negative $\log_{10}$ p-values'
'\n(Parametric two-sided F-test'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn.image import get_data

##############################################################################
# Load Localizer contrast
Expand Down Expand Up @@ -93,7 +94,7 @@
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)

n_detections = (neg_log_pvals_anova_unmasked.get_data() > threshold).sum()
n_detections = (get_data(neg_log_pvals_anova_unmasked) > threshold).sum()
title = ('Negative $\log_{10}$ p-values'
'\n(Parametric + Bonferroni correction)'
'\n%d detections') % n_detections
Expand All @@ -108,7 +109,7 @@
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)

n_detections = (neg_log_pvals_permuted_ols_unmasked.get_data()
n_detections = (get_data(neg_log_pvals_permuted_ols_unmasked)
> threshold).sum()
title = ('Negative $\log_{10}$ p-values'
'\n(Non-parametric + max-type correction)'
Expand Down
3 changes: 2 additions & 1 deletion examples/05_advanced/plot_localizer_simple_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import matplotlib.pyplot as plt
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.image import get_data


############################################################################
Expand Down Expand Up @@ -65,7 +66,7 @@
display_mode='z', cut_coords=[z_slice],
figure=fig)

masked_pvals = np.ma.masked_less(neg_log_pvals_anova_unmasked.get_data(),
masked_pvals = np.ma.masked_less(get_data(neg_log_pvals_anova_unmasked),
threshold)

title = ('Negative $\log_{10}$ p-values'
Expand Down
4 changes: 2 additions & 2 deletions examples/05_advanced/plot_neurovault_meta_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from nilearn.datasets import fetch_neurovault_ids
from nilearn import plotting
from nilearn.image import new_img_like, load_img, math_img
from nilearn.image import new_img_like, load_img, math_img, get_data


######################################################################
Expand Down Expand Up @@ -88,7 +88,7 @@ def t_to_z(t_scores, deg_of_freedom):

# Convert data, create new image.
z_img = new_img_like(
t_img, t_to_z(t_img.get_data(), deg_of_freedom=deg_of_freedom))
t_img, t_to_z(get_data(t_img), deg_of_freedom=deg_of_freedom))

z_imgs.append(z_img)

Expand Down
4 changes: 2 additions & 2 deletions nilearn/_utils/ndimage.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ def largest_connected_component(volume):
is done inplace to avoid big-endian issues with scipy ndimage module.
"""
if hasattr(volume, "get_data") \
or isinstance(volume, _basestring):
if (hasattr(volume, "get_data") or hasattr(
volume, "get_fdata") or isinstance(volume, _basestring)):
raise ValueError('Please enter a valid numpy array. For images use\
largest_connected_component_img')
# Get the new byteorder to handle issues like "Big-endian buffer not
Expand Down
21 changes: 16 additions & 5 deletions nilearn/_utils/niimg.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@
from .compat import _basestring


def _get_data(img):
# copy-pasted from https://github.com/nipy/nibabel/blob/de44a105c1267b07ef9e28f6c35b31f851d5a005/nibabel/dataobj_images.py#L204
# get_data is removed from nibabel because:
# see https://github.com/nipy/nibabel/wiki/BIAP8
if img._data_cache is not None:
return img._data_cache
data = np.asanyarray(img._dataobj)
img._data_cache = data
return data


def _safe_get_data(img, ensure_finite=False):
""" Get the data in the image without having a side effect on the
Nifti1Image object
Expand All @@ -30,7 +41,7 @@ def _safe_get_data(img, ensure_finite=False):
Returns
-------
data: numpy array
get_data() return from Nifti image.
nilearn.image.get_data return from Nifti image.
"""
if hasattr(img, '_data_cache') and img._data_cache is None:
# By loading directly dataobj, we prevent caching if the data is
Expand All @@ -40,7 +51,7 @@ def _safe_get_data(img, ensure_finite=False):
# that's why we invoke a forced call to the garbage collector
gc.collect()

data = img.get_data()
data = _get_data(img)
if ensure_finite:
non_finite_mask = np.logical_not(np.isfinite(data))
if non_finite_mask.sum() > 0: # any non_finite_mask values?
Expand Down Expand Up @@ -113,16 +124,16 @@ def load_niimg(niimg, dtype=None):
" not compatible with nibabel format:\n"
+ short_repr(niimg))

dtype = _get_target_dtype(niimg.get_data().dtype, dtype)
dtype = _get_target_dtype(_get_data(niimg).dtype, dtype)

if dtype is not None:
# Copyheader and set dtype in header if header exists
if niimg.header is not None:
niimg = new_img_like(niimg, niimg.get_data().astype(dtype),
niimg = new_img_like(niimg, _get_data(niimg).astype(dtype),
niimg.affine, copy_header=True)
niimg.header.set_data_dtype(dtype)
else:
niimg = new_img_like(niimg, niimg.get_data().astype(dtype),
niimg = new_img_like(niimg, _get_data(niimg).astype(dtype),
niimg.affine)

return niimg
Expand Down
Loading

0 comments on commit 392a1a9

Please sign in to comment.