diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..4bd22d0da3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,40 @@ +--- +name: Bug report +about: Something not working as described? Missing/incorrect documentation? This is the place. +title: '' +labels: 'bug' +assignees: '' + +--- + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..c4b5cd3c07 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,30 @@ +--- +name: Feature request +about: Got an idea for a new feature, or changing an existing one? This is the place. +title: '' +labels: 'feature' +assignees: '' + +--- + + + diff --git a/doc/manipulating_images/masker_objects.rst b/doc/manipulating_images/masker_objects.rst index ac8e774678..f9ba26aad4 100644 --- a/doc/manipulating_images/masker_objects.rst +++ b/doc/manipulating_images/masker_objects.rst @@ -101,7 +101,7 @@ slice and create a :ref:`Niimg ` in memory: .. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py - :start-after: Load ADHD resting-state dataset + :start-after: Load MAIN resting-state dataset :end-before: # To display the background Controlling how the mask is computed from the data diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index f0302663b0..3bd4aa1961 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -87,6 +87,7 @@ uses. fetch_localizer_contrasts fetch_localizer_calculation_task fetch_miyawaki2008 + fetch_main fetch_nyu_rest fetch_surf_nki_enhanced fetch_surf_fsaverage diff --git a/doc/plotting/index.rst b/doc/plotting/index.rst index fe30311ec3..acbc8952fc 100644 --- a/doc/plotting/index.rst +++ b/doc/plotting/index.rst @@ -166,6 +166,10 @@ Different display modes :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html :scale: 50 +.. |plot_tiled| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_more_plotting_009.png + :target: ../auto_examples/01_plotting/plot_demo_more_plotting.html + :scale: 50 + .. |plot_lzr| image:: ../auto_examples/01_plotting/images/sphx_glr_plot_demo_glass_brain_extensive_006.png :target: ../auto_examples/01_plotting/plot_demo_glass_brain_extensive.html :scale: 50 @@ -216,6 +220,11 @@ Different display modes Cutting in the y and z direction, with cuts manually positionned +|plot_tiled| `display_mode='tiled', cut_coords=[36, -27, 60]` + |hack| + Tiled slicer: 3 cuts along the x, y, z directions, + arranged in a 2x2 grid + |plot_lzr| `Glass brain display_mode='lzr'` |hack| Glass brain and Connectome provide additional display modes diff --git a/doc/whats_new.rst b/doc/whats_new.rst index ab853774b1..4b66a22b56 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,3 +1,37 @@ +0.5.1 +===== + +NEW +--- + +- Calculate image data dtype from header information +- New display mode 'tiled' which allows 2x2 plot arrangement when plotting three cuts + (see :ref:`plotting`). +- NiftiLabelsMasker now consumes less memory when extracting the signal from a 3D/4D + image. This is especially noteworthy when extracting signals from large 4D images. + +Changes +------- + +- Lighting used for interactive surface plots changed; plots may look a bit + different. +- plotting.view_connectome default colormap is `bwr`, consistent with plot_connectome. +- plotting.view_connectome parameter names are consistent with plot_connectome: + + - coords is now node_coord + - marker_size is noe node_size + - cmap is now edge_cmap + - threshold is now edge_threshold + +- plotting.view_markers and plotting.view_connectome can accept different marker + sizes for each node / marker. + +- plotting.view_markers() default marker color is now 'red', consistent with add_markers(). +- plotting.view_markers() parameter names are consistent with add_markers(): + + - coords is now marker_coords + - colors is now marker_color + 0.5.0 ===== diff --git a/examples/01_plotting/plot_demo_more_plotting.py b/examples/01_plotting/plot_demo_more_plotting.py index 2557de1cea..91015158a4 100644 --- a/examples/01_plotting/plot_demo_more_plotting.py +++ b/examples/01_plotting/plot_demo_more_plotting.py @@ -15,7 +15,7 @@ The parameter `display_mode` is used to draw brain slices along given specific directions, where directions can be one of 'ortho', -'x', 'y', 'z', 'xy', 'xz', 'yz'. whereas parameter `cut_coords` +'tiled','x', 'y', 'z', 'yx', 'xz', 'yz'. whereas parameter `cut_coords` is used to specify a limited number of slices to visualize along given specific slice direction. The parameter `cut_coords` can also be used to draw the specific cuts in the slices by giving its particular @@ -111,7 +111,7 @@ ######################################## # Changing the views to 'coronal', 'sagittal' views with coordinates # ------------------------------------------------------------------- -# display_mode='yx' for coronal and saggital view and coordinates will be +# display_mode='yx' for coronal and sagittal view and coordinates will be # assigned in the order of direction as [x, y, z] plotting.plot_stat_map(stat_img, display_mode='yx', cut_coords=[-27, 36], @@ -125,6 +125,15 @@ cut_coords=[-27, 60], title="display_mode='yz', cut_coords=[-27, 60]") +######################################## +# Visualizing three views in 2x2 fashion +# ------------------------------------------------------------------------- +# display_mode='tiled' for sagittal, coronal and axial view + +plotting.plot_stat_map(stat_img, display_mode='tiled', + cut_coords=[36, -27, 60], + title="display_mode='tiled'") + ############################################################################### # Demonstrating various display features # --------------------------------------- diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py index 29882c5fc3..0a0a765b0d 100644 --- a/examples/03_connectivity/plot_adhd_spheres.py +++ b/examples/03_connectivity/plot_adhd_spheres.py @@ -13,11 +13,11 @@ # Retrieve the dataset # --------------------- from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=1) +main_dataset = datasets.fetch_main(n_subjects=1) # print basic information on the dataset print('First subject functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data + main_dataset.func[0]) # 4D data ########################################################################## @@ -43,8 +43,8 @@ low_pass=0.1, high_pass=0.01, t_r=2.5, memory='nilearn_cache', memory_level=1, verbose=2) -func_filename = adhd_dataset.func[0] -confound_filename = adhd_dataset.confounds[0] +func_filename = main_dataset.func[0] +confound_filename = main_dataset.confounds[0] time_series = masker.fit_transform(func_filename, confounds=[confound_filename]) diff --git a/examples/03_connectivity/plot_atlas_comparison.py b/examples/03_connectivity/plot_atlas_comparison.py index 462c28c48d..5dad189fe6 100644 --- a/examples/03_connectivity/plot_atlas_comparison.py +++ b/examples/03_connectivity/plot_atlas_comparison.py @@ -35,7 +35,7 @@ ######################################################################### # Load functional data # -------------------- -data = datasets.fetch_adhd(n_subjects=10) +data = datasets.fetch_main(n_subjects=10) print('Functional nifti images (4D, e.g., one subject) are located at : %r' % data['func'][0]) diff --git a/examples/03_connectivity/plot_canica_resting_state.py b/examples/03_connectivity/plot_canica_resting_state.py index 8a22b60155..5f6d1153d1 100644 --- a/examples/03_connectivity/plot_canica_resting_state.py +++ b/examples/03_connectivity/plot_canica_resting_state.py @@ -35,8 +35,8 @@ # ------------------------------- from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=30) -func_filenames = adhd_dataset.func # list of 4D nifti files for each subject +main_dataset = datasets.fetch_main(n_subjects=30) +func_filenames = main_dataset.func # list of 4D nifti files for each subject # print basic information on the dataset print('First functional nifti image (4D) is at: %s' % diff --git a/examples/03_connectivity/plot_compare_resting_state_decomposition.py b/examples/03_connectivity/plot_compare_resting_state_decomposition.py index d3f8c93bfe..d8d311c2fa 100644 --- a/examples/03_connectivity/plot_compare_resting_state_decomposition.py +++ b/examples/03_connectivity/plot_compare_resting_state_decomposition.py @@ -26,12 +26,12 @@ # ----------------------- from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=30) -func_filenames = adhd_dataset.func # list of 4D nifti files for each subject +main_dataset = datasets.fetch_main(n_subjects=30) +func_filenames = main_dataset.func # list of 4D nifti files for each subject # print basic information on the dataset print('First functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data + main_dataset.func[0]) # 4D data ############################################################################### # Create two decomposition estimators @@ -43,7 +43,7 @@ ############################################################################### # Dictionary learning # -------------------- -# +# # We use as "template" as a strategy to compute the mask, as this leads # to slightly faster and more reproducible results. However, the images # need to be in MNI template space @@ -90,7 +90,7 @@ from nilearn.image import index_img # Selecting specific maps to display: maps were manually chosen to be similar -indices = {dict_learning: 25, canica: 33} +indices = {dict_learning: 24, canica: 32} # We select relevant cut coordinates for displaying cut_component = index_img(components_imgs[0], indices[dict_learning]) cut_coords = find_xyz_cut_coords(cut_component) diff --git a/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py index fc77374edd..ab03d7d7ab 100644 --- a/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py +++ b/examples/03_connectivity/plot_extract_regions_dictlearning_maps.py @@ -6,7 +6,7 @@ to extract spatially constrained brain regions from whole brain maps decomposed using dictionary learning and use them to build a functional connectome. -We used 20 resting state ADHD functional datasets from :func:`nilearn.datasets.fetch_adhd` +We used 20 resting state MAIN functional datasets from :func:`nilearn.datasets.fetch_main` and :class:`nilearn.decomposition.DictLearning` for set of brain atlas maps. This example can also be inspired to apply the same steps to even regions extraction @@ -32,9 +32,9 @@ # We use nilearn's datasets downloading utilities from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=20) -func_filenames = adhd_dataset.func -confounds = adhd_dataset.confounds +main_dataset = datasets.fetch_main(n_subjects=20) +func_filenames = main_dataset.func +confounds = main_dataset.confounds ################################################################################ # Extract resting-state networks with DictionaryLearning @@ -45,7 +45,7 @@ from nilearn.decomposition import DictLearning # Initialize DictLearning object -dict_learn = DictLearning(n_components=5, smoothing_fwhm=6., +dict_learn = DictLearning(n_components=8, smoothing_fwhm=6., memory="nilearn_cache", memory_level=2, random_state=0) # Fit to the data @@ -87,7 +87,7 @@ # Visualization of region extraction results title = ('%d regions are extracted from %d components.' '\nEach separate color of region indicates extracted region' - % (n_regions_extracted, 5)) + % (n_regions_extracted, 8)) plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours', title=title) diff --git a/examples/03_connectivity/plot_group_level_connectivity.py b/examples/03_connectivity/plot_group_level_connectivity.py index 3f29e7ca28..ed6ba7b90c 100644 --- a/examples/03_connectivity/plot_group_level_connectivity.py +++ b/examples/03_connectivity/plot_group_level_connectivity.py @@ -28,12 +28,12 @@ def plot_matrices(matrices, matrix_kind): ############################################################################### -# Load ADHD dataset and MSDL atlas +# Load MAIN dataset and MSDL atlas # -------------------------------- -# We study only 20 subjects from the ADHD dataset, to save computation time. +# We study only 20 subjects from the MAIN dataset, to save computation time. from nilearn import datasets -adhd_data = datasets.fetch_adhd(n_subjects=20) +main_data = datasets.fetch_main(n_subjects=20) ############################################################################### # We use probabilistic regions of interest (ROIs) from the MSDL atlas. @@ -57,26 +57,24 @@ def plot_matrices(matrices, matrix_kind): ############################################################################### # Then we compute region signals and extract useful phenotypic informations. -adhd_subjects = [] +children = [] pooled_subjects = [] -site_names = [] -adhd_labels = [] # 1 if ADHD, 0 if control +groups = [] # child or adult for func_file, confound_file, phenotypic in zip( - adhd_data.func, adhd_data.confounds, adhd_data.phenotypic): + main_data.func, main_data.confounds, main_data.phenotypic): time_series = masker.fit_transform(func_file, confounds=confound_file) pooled_subjects.append(time_series) - is_adhd = phenotypic['adhd'] - if is_adhd: - adhd_subjects.append(time_series) + is_child = phenotypic['Child_Adult'] == 'child' + if is_child: + children.append(time_series) - site_names.append(phenotypic['site']) - adhd_labels.append(is_adhd) + groups.append(phenotypic['Child_Adult']) -print('Data has {0} ADHD subjects.'.format(len(adhd_subjects))) +print('Data has {0} children.'.format(len(children))) ############################################################################### -# ROI-to-ROI correlations of ADHD patients -# ---------------------------------------- +# ROI-to-ROI correlations of children +# ----------------------------------- # The simpler and most commonly used kind of connectivity is correlation. It # models the full (marginal) connectivity between pairwise ROIs. We can # estimate it using :class:`nilearn.connectome.ConnectivityMeasure`. @@ -85,12 +83,12 @@ def plot_matrices(matrices, matrix_kind): correlation_measure = ConnectivityMeasure(kind='correlation') ############################################################################### -# From the list of ROIs time-series for ADHD subjects, the +# From the list of ROIs time-series for children, the # `correlation_measure` computes individual correlation matrices. -correlation_matrices = correlation_measure.fit_transform(adhd_subjects) +correlation_matrices = correlation_measure.fit_transform(children) # All individual coefficients are stacked in a unique 2D matrix. -print('Correlations of ADHD patients are stacked in an array of shape {0}' +print('Correlations of children are stacked in an array of shape {0}' .format(correlation_matrices.shape)) ############################################################################### @@ -99,13 +97,13 @@ def plot_matrices(matrices, matrix_kind): print('Mean correlation has shape {0}.'.format(mean_correlation_matrix.shape)) ############################################################################### -# We display the connectomes of the first 3 ADHD subjects and the mean -# correlation matrix over all ADHD patients. +# We display the connectomes of the first 4 children and the mean +# correlation matrix over all children. from nilearn import plotting plot_matrices(correlation_matrices[:4], 'correlation') plotting.plot_connectome(mean_correlation_matrix, msdl_coords, - title='mean correlation over 13 ADHD subjects') + title='mean correlation over 12 childrens') ############################################################################### # Look at blocks structure, reflecting functional networks. @@ -120,7 +118,7 @@ def plot_matrices(matrices, matrix_kind): ############################################################################### # and repeat the previous operation. partial_correlation_matrices = partial_correlation_measure.fit_transform( - adhd_subjects) + children) ############################################################################### # Most of direct connections are weaker than full connections, resulting @@ -128,7 +126,7 @@ def plot_matrices(matrices, matrix_kind): plot_matrices(partial_correlation_matrices[:4], 'partial') plotting.plot_connectome( partial_correlation_measure.mean_, msdl_coords, - title='mean partial correlation over 13 ADHD subjects') + title='mean partial correlation over 12 children') ############################################################################### # Extract subjects variabilities around a robust group connectivity @@ -139,10 +137,10 @@ def plot_matrices(matrices, matrix_kind): tangent_measure = ConnectivityMeasure(kind='tangent') ############################################################################### -# We fit our ADHD group and get the group connectivity matrix stored as +# We fit our children group and get the group connectivity matrix stored as # in `tangent_measure.mean_`, and individual deviation matrices of each subject # from it. -tangent_matrices = tangent_measure.fit_transform(adhd_subjects) +tangent_matrices = tangent_measure.fit_transform(children) ############################################################################### # `tangent_matrices` model individual connectivities as @@ -153,7 +151,7 @@ def plot_matrices(matrices, matrix_kind): plot_matrices(tangent_matrices[:4], 'tangent variability') plotting.plot_connectome( tangent_measure.mean_, msdl_coords, - title='mean tangent connectivity over 13 ADHD subjects') + title='mean tangent connectivity over 12 children') ############################################################################### # The mean connectome graph is not as sparse as partial correlations graph, @@ -184,8 +182,7 @@ def plot_matrices(matrices, matrix_kind): # proportion of each class as in the whole cohort from sklearn.model_selection import StratifiedKFold -classes = ['{0}{1}'.format(site_name, adhd_label) - for site_name, adhd_label in zip(site_names, adhd_labels)] +_, classes = np.unique(groups, return_inverse=True) cv = StratifiedKFold(n_splits=3) ############################################################################### # and use the connectivity coefficients to classify ADHD patients vs controls. @@ -202,9 +199,9 @@ def plot_matrices(matrices, matrix_kind): svc = LinearSVC(random_state=0) cv_scores = cross_val_score(svc, connectivity_biomarkers[kind], - y=adhd_labels, + y=classes, cv=cv, - groups=adhd_labels, + groups=groups, scoring='accuracy', ) mean_scores.append(cv_scores.mean()) diff --git a/examples/03_connectivity/plot_inverse_covariance_connectome.py b/examples/03_connectivity/plot_inverse_covariance_connectome.py index b20eef1f01..4df93e1294 100644 --- a/examples/03_connectivity/plot_inverse_covariance_connectome.py +++ b/examples/03_connectivity/plot_inverse_covariance_connectome.py @@ -32,7 +32,7 @@ labels = atlas['labels'] # Loading the functional datasets -data = datasets.fetch_adhd(n_subjects=1) +data = datasets.fetch_main(n_subjects=1) # print basic information on the dataset print('First subject functional nifti images (4D) are at: %s' % diff --git a/examples/03_connectivity/plot_multi_subject_connectome.py b/examples/03_connectivity/plot_multi_subject_connectome.py index 3da6875919..db9b9e6da9 100644 --- a/examples/03_connectivity/plot_multi_subject_connectome.py +++ b/examples/03_connectivity/plot_multi_subject_connectome.py @@ -38,11 +38,11 @@ def plot_matrices(cov, prec, title, labels): # ------------------ from nilearn import datasets msdl_atlas_dataset = datasets.fetch_atlas_msdl() -adhd_dataset = datasets.fetch_adhd(n_subjects=n_subjects) +main_dataset = datasets.fetch_main(n_subjects=n_subjects) # print basic information on the dataset print('First subject functional nifti image (4D) is at: %s' % - adhd_dataset.func[0]) # 4D data + main_dataset.func[0]) # 4D data ############################################################################## @@ -62,8 +62,8 @@ def plot_matrices(cov, prec, title, labels): masker.fit() subject_time_series = [] -func_filenames = adhd_dataset.func -confound_filenames = adhd_dataset.confounds +func_filenames = main_dataset.func +confound_filenames = main_dataset.confounds for func_filename, confound_filename in zip(func_filenames, confound_filenames): print("Processing file %s" % func_filename) diff --git a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py index c455a266f9..1469d58ffe 100644 --- a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py +++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py @@ -30,7 +30,7 @@ labels = atlas['labels'] # Load the functional datasets -data = datasets.fetch_adhd(n_subjects=1) +data = datasets.fetch_main(n_subjects=1) print('First subject resting-state nifti image (4D) is located at: %s' % data.func[0]) diff --git a/examples/03_connectivity/plot_rest_parcellations.py b/examples/03_connectivity/plot_rest_parcellations.py index a11e0a030b..e8c330e81e 100644 --- a/examples/03_connectivity/plot_rest_parcellations.py +++ b/examples/03_connectivity/plot_rest_parcellations.py @@ -41,10 +41,10 @@ # Download a rest dataset and turn it to a data matrix # ----------------------------------------------------- # -# We download one subject of the ADHD dataset from Internet +# We download one subject of the MAIN conference dataset from Internet from nilearn import datasets -dataset = datasets.fetch_adhd(n_subjects=1) +dataset = datasets.fetch_main(n_subjects=1) # print basic information on the dataset print('First subject functional nifti image (4D) is at: %s' % diff --git a/examples/03_connectivity/plot_seed_to_voxel_correlation.py b/examples/03_connectivity/plot_seed_to_voxel_correlation.py index be263bcd37..01ba3cb93c 100644 --- a/examples/03_connectivity/plot_seed_to_voxel_correlation.py +++ b/examples/03_connectivity/plot_seed_to_voxel_correlation.py @@ -26,9 +26,9 @@ # subject by indexing with [0]). from nilearn import datasets -adhd_dataset = datasets.fetch_adhd(n_subjects=1) -func_filename = adhd_dataset.func[0] -confound_filename = adhd_dataset.confounds[0] +main_dataset = datasets.fetch_main(n_subjects=1) +func_filename = main_dataset.func[0] +confound_filename = main_dataset.confounds[0] ########################################################################## # Note that func_filename and confound_filename are strings pointing to diff --git a/examples/03_connectivity/plot_signal_extraction.py b/examples/03_connectivity/plot_signal_extraction.py index b72ffc98ed..1e31b5de82 100644 --- a/examples/03_connectivity/plot_signal_extraction.py +++ b/examples/03_connectivity/plot_signal_extraction.py @@ -37,7 +37,7 @@ atlas_filename) # 4D data # One subject of resting-state data -data = datasets.fetch_adhd(n_subjects=1) +data = datasets.fetch_main(n_subjects=1) fmri_filenames = data.func[0] ############################################################################## diff --git a/examples/03_connectivity/plot_sphere_based_connectome.py b/examples/03_connectivity/plot_sphere_based_connectome.py index d52a006513..fca2553b72 100644 --- a/examples/03_connectivity/plot_sphere_based_connectome.py +++ b/examples/03_connectivity/plot_sphere_based_connectome.py @@ -21,15 +21,15 @@ # Load fMRI data and Power atlas # ------------------------------ # -# We are going to use a single subject from the ADHD dataset. +# We are going to use a single subject from the MAIN dataset. from nilearn import datasets -adhd = datasets.fetch_adhd(n_subjects=1) +main = datasets.fetch_main(n_subjects=5) ############################################################################### # We store the paths to its functional image and the confounds file. -fmri_filename = adhd.func[0] -confounds_filename = adhd.confounds[0] +fmri_filename = main.func[0] +confounds_filename = main.confounds[0] print('Functional image is {0},\nconfounds are {1}.'.format(fmri_filename, confounds_filename)) @@ -57,7 +57,7 @@ from nilearn import input_data spheres_masker = input_data.NiftiSpheresMasker( - seeds=coords, smoothing_fwhm=4, radius=5., + seeds=coords, smoothing_fwhm=6, radius=5., detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2.5) ############################################################################### @@ -80,7 +80,7 @@ # estimator captures well the covariance **structure**. from sklearn.covariance import GraphLassoCV -covariance_estimator = GraphLassoCV(verbose=1) +covariance_estimator = GraphLassoCV(cv=3, verbose=1) ############################################################################### # We just fit our regions signals into the `GraphLassoCV` object diff --git a/examples/04_manipulating_images/plot_compare_mean_image.py b/examples/04_manipulating_images/plot_compare_mean_image.py index ecdc87d824..915818bccf 100644 --- a/examples/04_manipulating_images/plot_compare_mean_image.py +++ b/examples/04_manipulating_images/plot_compare_mean_image.py @@ -12,7 +12,7 @@ ############################################################################### # Fetching 2 subject resting state functionnal MRI from datasets. from nilearn import datasets -dataset = datasets.fetch_adhd(n_subjects=2) +dataset = datasets.fetch_main(n_subjects=2) ############################################################################### diff --git a/examples/04_manipulating_images/plot_mask_computation.py b/examples/04_manipulating_images/plot_mask_computation.py index b7ec34b609..c6db0cc268 100644 --- a/examples/04_manipulating_images/plot_mask_computation.py +++ b/examples/04_manipulating_images/plot_mask_computation.py @@ -25,10 +25,10 @@ ############################################################################### # Computing a mask from the background ############################################################################### -# +# # The default strategy to compute a mask, eg in NiftiMasker is to try to # detect the background. -# +# # With data that has already been masked, this will work well, as it lies # on a homogeneous background @@ -60,8 +60,8 @@ # From raw EPI data, there is no uniform background, and a different # strategy is necessary -# Load ADHD resting-state dataset -dataset = datasets.fetch_adhd(n_subjects=1) +# Load MAIN resting-state dataset +dataset = datasets.fetch_main(n_subjects=1) epi_filename = dataset.func[0] # Restrict to 100 frames to speed up computation @@ -81,7 +81,7 @@ ############################################################################### # Generate mask with strong opening -# +# # We can fine-tune the outline of the mask by increasing the number of # opening steps (`opening=10`) using the `mask_args` argument of the # NiftiMasker. This effectively performs erosion and dilation @@ -94,7 +94,7 @@ ############################################################################### # Generate mask with a high lower cutoff -# +# # The NiftiMasker calls the nilearn.masking.compute_epi_mask function to # compute the mask from the EPI. It has two important parameters: # lower_cutoff and upper_cutoff. These set the grey-value bounds in which @@ -113,7 +113,7 @@ ############################################################################### # Computing the mask from the MNI template ############################################################################### -# +# # A mask can also be computed from the MNI gray matter template. In this # case, it is resampled to the target image @@ -126,7 +126,7 @@ ############################################################################### # After mask computation: extracting time series ############################################################################### -# +# # Extract time series # trended vs detrended diff --git a/examples/04_manipulating_images/plot_nifti_simple.py b/examples/04_manipulating_images/plot_nifti_simple.py index 50367001c9..3de8f0baff 100644 --- a/examples/04_manipulating_images/plot_nifti_simple.py +++ b/examples/04_manipulating_images/plot_nifti_simple.py @@ -7,10 +7,10 @@ """ ########################################################################### -# Retrieve the NYU test-retest dataset +# Retrieve the MAIN functional dataset from nilearn import datasets -dataset = datasets.fetch_adhd(n_subjects=1) +dataset = datasets.fetch_main(n_subjects=1) func_filename = dataset.func[0] # print basic information on the dataset diff --git a/examples/04_manipulating_images/plot_smooth_mean_image.py b/examples/04_manipulating_images/plot_smooth_mean_image.py index 1ff7dfc5e8..daf716bbd6 100644 --- a/examples/04_manipulating_images/plot_smooth_mean_image.py +++ b/examples/04_manipulating_images/plot_smooth_mean_image.py @@ -13,7 +13,7 @@ from nilearn import datasets, plotting, image -data = datasets.fetch_adhd(n_subjects=1) +data = datasets.fetch_main(n_subjects=1) # Print basic information on the dataset print('First subject functional nifti image (4D) are located at: %s' % diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py index ef54643e87..3ccfa6153c 100644 --- a/examples/05_advanced/plot_ica_resting_state.py +++ b/examples/05_advanced/plot_ica_resting_state.py @@ -26,7 +26,7 @@ # Here we use only 3 subjects to get faster-running code. For better # results, simply increase this number # XXX: must get the code to run for more than 1 subject -dataset = datasets.fetch_adhd(n_subjects=1) +dataset = datasets.fetch_main(n_subjects=1) func_filename = dataset.func[0] # print basic information on the dataset diff --git a/nilearn/__init__.py b/nilearn/__init__.py index 0c59990a94..6f0bd497df 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -42,16 +42,34 @@ def _py2_deprecation_warning(): - warnings.simplefilter('once') py2_warning = ('Python2 support is deprecated and will be removed in ' 'a future release. Consider switching to Python3.') + warnings.filterwarnings('once', message=py2_warning) + warnings.warn(message=py2_warning, + category=DeprecationWarning, + stacklevel=3, + ) + +def _py34_deprecation_warning(): + py34_warning = ('Python 3.4 support is deprecated and will be removed in ' + 'a future release. Consider switching to Python 3.6 or 3.7.' + ) + warnings.filterwarnings('once', message=py34_warning) + warnings.warn(message=py34_warning, + category=DeprecationWarning, + stacklevel=3, + ) + + +def _python_deprecation_warnings(): if sys.version_info.major == 2: - warnings.warn(message=py2_warning, - category=DeprecationWarning, - stacklevel=3, - ) + _py2_deprecation_warning() + elif sys.version_info.major == 3 and sys.version_info.minor == 4: + _py34_deprecation_warning() + _check_module_dependencies() +_python_deprecation_warnings() # Temporary work around to address formatting issues in doc tests # with NumPy 1.14. NumPy had made more consistent str/repr formatting @@ -88,5 +106,3 @@ def _py2_deprecation_warning(): 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 'region', 'signal', 'surface', 'parcellations', '__version__'] - -_py2_deprecation_warning() diff --git a/nilearn/_utils/niimg.py b/nilearn/_utils/niimg.py index e27046d38e..64c62221a4 100644 --- a/nilearn/_utils/niimg.py +++ b/nilearn/_utils/niimg.py @@ -116,8 +116,15 @@ def load_niimg(niimg, dtype=None): dtype = _get_target_dtype(niimg.get_data().dtype, dtype) if dtype is not None: - niimg = new_img_like(niimg, niimg.get_data().astype(dtype), - niimg.affine) + # Copyheader and set dtype in header if header exists + if niimg.header is not None: + niimg = new_img_like(niimg, niimg.get_data().astype(dtype), + niimg.affine, copy_header=True) + niimg.header.set_data_dtype(dtype) + else: + niimg = new_img_like(niimg, niimg.get_data().astype(dtype), + niimg.affine) + return niimg @@ -173,3 +180,23 @@ def short_repr(niimg): # Shorten the repr to have a useful error message this_repr = this_repr[:18] + '...' return this_repr + + +def img_data_dtype(niimg): + """Determine type of data contained in image + + Based on the information contained in ``niimg.dataobj``, determine the + dtype of ``np.array(niimg.dataobj).dtype``. + """ + + dataobj = niimg.dataobj + + # Neuroimages that scale data should be interpreted as floating point + if nibabel.is_proxy(dataobj) and (dataobj.slope, dataobj.inter) != (1.0, 0.0): + return np.float_ + + # ArrayProxy gained the dtype attribute in nibabel 2.2 + if hasattr(dataobj, 'dtype'): + return dataobj.dtype + + return niimg.get_data_dtype() diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 91c8cf2f5e..38bc5750f1 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -13,7 +13,7 @@ fetch_localizer_button_task, fetch_localizer_calculation_task, fetch_mixed_gambles, fetch_megatrawls_netmats, fetch_cobre, - fetch_surf_nki_enhanced) + fetch_surf_nki_enhanced, fetch_main) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_coords_power_2011, @@ -46,7 +46,7 @@ 'fetch_atlas_allen_2011', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', 'fetch_megatrawls_netmats', 'fetch_cobre', - 'fetch_surf_nki_enhanced', 'fetch_surf_fsaverage5', + 'fetch_surf_nki_enhanced', 'fetch_main', 'fetch_surf_fsaverage5', 'fetch_surf_fsaverage', 'fetch_atlas_basc_multiscale_2015', 'fetch_coords_dosenbach_2010', 'fetch_neurovault', 'fetch_neurovault_ids', diff --git a/nilearn/datasets/data/MAIN_osf.csv b/nilearn/datasets/data/MAIN_osf.csv new file mode 100644 index 0000000000..52c2cec017 --- /dev/null +++ b/nilearn/datasets/data/MAIN_osf.csv @@ -0,0 +1,84 @@ +participant_id,key_r,key_b +sub-pixar001,5c8ff37da743a90018606df1,5c8ff37c2286e80019c3c102 +sub-pixar003,5c8ff37d4712b400193b5b54,5c8ff37d4712b400183b7011 +sub-pixar004,5c8ff37e2286e80016c3c2cb,5c8ff3832286e80019c3c10f +sub-pixar005,5c8ff3822286e80018c3e37b,5c8ff382a743a90018606df8 +sub-pixar006,5c8ff3814712b4001a3b5561,5c8ff3832286e80016c3c2d1 +sub-pixar007,5c8ff3842286e80017c419e0,5c8ff3854712b4001a3b5568 +sub-pixar009,5c8ff3872286e80017c419ea,5c8ff3872286e80017c419e9 +sub-pixar010,5c8ff3884712b400183b7023,5c8ff3884712b400193b5b5c +sub-pixar011,5c8ff389a743a9001660a016,5c8ff38c2286e80016c3c2da +sub-pixar012,5c8ff38ca743a90018606dfe,5c8ff38ca743a9001760809e +sub-pixar015,5c8ff38c4712b4001a3b5573,5c8ff38da743a900176080a2 +sub-pixar018,5c8ff38f2286e80018c3e38d,5c8ff3914712b4001a3b5579 +sub-pixar020,5c8ff391a743a900176080a9,5c8ff3914712b400173b5329 +sub-pixar022,5c8ff3912286e80018c3e393,5c8ff3952286e80017c41a1b +sub-pixar027,5c8ff3952286e80016c3c2e7,5c8ff3954712b400193b5b79 +sub-pixar029,5c8ff395a743a900176080af,5c8ff3964712b400193b5b7d +sub-pixar030,5c8ff399a743a9001660a031,5c8ff3982286e80017c41a29 +sub-pixar031,5c8ff39aa743a90018606e21,5c8ff39aa743a900176080ba +sub-pixar035,5c8ff39aa743a900176080bf,5c8ff39d4712b400193b5b89 +sub-pixar039,5c8ff39ca743a90019606c50,5c8ff3a2a743a9001660a048 +sub-pixar041,5c8ff3a12286e80017c41a48,5c8ff3a12286e80016c3c2fc +sub-pixar042,5c8ff39fa743a90018606e2f,5c8ff3a34712b4001a3b55a3 +sub-pixar044,5c8ff3a34712b400193b5b92,5c8ff3a84712b400183b7048 +sub-pixar046,5c8ff3a72286e80017c41a54,5c8ff3a7a743a90018606e42 +sub-pixar049,5c8ff3a74712b4001a3b55ad,5c8ff3a72286e80017c41a59 +sub-pixar050,5c8ff3aa4712b400183b704d,5c8ff3ac4712b4001a3b55b7 +sub-pixar052,5c8ff3aca743a9001660a063,5c8ff3ac4712b400183b7051 +sub-pixar057,5c8ff3ae4712b400183b7055,5c8ff3af2286e80018c3e3c0 +sub-pixar058,5c8ff3b02286e80018c3e3c4,5c8ff3b14712b400183b705a +sub-pixar061,5c8ff3b12286e80016c3c30f,5c8ff3b34712b400183b7060 +sub-pixar062,5c8ff3b2a743a9001660a07a,5c8ff3b54712b400193b5ba3 +sub-pixar063,5c8ff3b2a743a9001660a07a,5c8ff3b54712b400193b5ba3 +sub-pixar065,5c8ff3b72286e80017c41a88,5c8ff3b94712b4001a3b55bf +sub-pixar067,5c8ff3b92286e80017c41a8e,5c8ff3b92286e80018c3e3e0 +sub-pixar068,5c8ff3ba2286e80016c3c325,5c8ff3bd2286e80017c41a9e +sub-pixar072,5c8ff3be4712b400193b5bab,5c8ff3bf2286e80017c41aa8 +sub-pixar073,5c8ff3be4712b4001a3b55c4,5c8ff3c12286e80017c41ab1 +sub-pixar074,5c8ff3c34712b400173b5362,5c8ff3c42286e80017c41ab6 +sub-pixar077,5c8ff3c44712b400183b7071,5c8ff3c42286e80017c41abc +sub-pixar080,5c8ff3c7a743a90018606e5f,5c8ff3c9a743a90017608120 +sub-pixar089,5c8ff3c94712b4001a3b55d3,5c8ff3c9a743a9001760811a +sub-pixar090,5c8ff3ca4712b400183b707a,5c8ff3cc2286e80017c41adc +sub-pixar097,5c8ff3cea743a90019606c9f,5c8ff3cea743a90018606e68 +sub-pixar103,5c8ff3ce2286e80016c3c34b,5c8ff3d12286e80019c3c16f +sub-pixar109,5c8ff3d3a743a90019606caa,5c8ff3d34712b400193b5bc7 +sub-pixar110,5c8ff3d22286e80017c41af2,5c8ff3d52286e80017c41afe +sub-pixar113,5c8ff3d84712b400183b708c,5c8ff3d7a743a90017608138 +sub-pixar116,5c8ff3d8a743a90019606cb5,5c8ff3d8a743a90018606e75 +sub-pixar117,5c8ff3dba743a90018606e7e,5c8ff3de4712b4001a3b55f4 +sub-pixar121,5c8ff3dc4712b4001a3b55f0,5c8ff3df2286e80018c3e421 +sub-pixar123,5c8ff3df4712b400183b7092,5c8ff3e04712b400193b5bdf +sub-pixar124,5c8ff3e14712b400183b7097,5c8ff3e32286e80018c3e42c +sub-pixar125,5c8ff3e4a743a9001760814f,5c8ff3e54712b400183b70a5 +sub-pixar126,5c8ff3e52286e80018c3e439,5c8ff3e72286e80017c41b3d +sub-pixar127,5c8ff3e9a743a90017608158,5c8ff3e82286e80018c3e443 +sub-pixar128,5c8ff3ea4712b400183b70b7,5c8ff3eb2286e80019c3c194 +sub-pixar129,5c8ff3eb2286e80019c3c198,5c8ff3ed2286e80017c41b56 +sub-pixar130,5c8ff3ee2286e80016c3c379,5c8ff3ee4712b400183b70c3 +sub-pixar131,5c8ff3efa743a9001660a0d5,5c8ff3f14712b4001a3b560e +sub-pixar132,5c8ff3f1a743a90017608164,5c8ff3f12286e80016c3c37e +sub-pixar133,5c8ff3f34712b4001a3b5612,5c8ff3f7a743a90019606cdf +sub-pixar134,5c8ff3f6a743a90017608171,5c8ff3f64712b400183b70d8 +sub-pixar135,5c8ff3f72286e80019c3c1af,5c8ff3f92286e80018c3e463 +sub-pixar136,5c8ff4534712b400183b716d,5c8ff3fb2286e80017c41b72 +sub-pixar137,5c8ff3fb2286e80019c3c1b3,5c8ff3fd4712b400183b70e6 +sub-pixar138,5c8ff3fe4712b4001a3b5620,5c8ff3ff4712b400173b5399 +sub-pixar139,5c8ff401a743a9001660a104,5c8ff403a743a90017608181 +sub-pixar140,5c8ff4034712b400183b70f6,5c8ff4042286e80019c3c1c2 +sub-pixar141,5c8ff4052286e80017c41b92,5c8ff4064712b400183b70fe +sub-pixar142,5c8ff4074712b400183b7104,5c8ff40aa743a9001660a119 +sub-pixar143,5c8ff4092286e80017c41ba7,5c8ff40b2286e80016c3c39a +sub-pixar144,5c8ff40d2286e80016c3c39f,5c8ff40da743a90018606eac +sub-pixar145,5c8ff40e4712b400173b53a8,5c8ff4104712b400173b53ad +sub-pixar146,5c8ff4112286e80016c3c3a5,5c8ff412a743a9001660a128 +sub-pixar147,5c8ff414a743a90019606cfc,5c8ff416a743a90019606d01 +sub-pixar148,5c8ff417a743a9001660a130,5c8ff4184712b400193b5c19 +sub-pixar149,5c8ff41a2286e80019c3c1de,5c8ff41aa743a9001660a13b +sub-pixar150,5c8ff41b2286e80016c3c3b6,5c8ff41d2286e80018c3e499 +sub-pixar151,5c8ff41da743a900176081a2,5c8ff41ea743a90018606ec7 +sub-pixar152,5c8ff4202286e80019c3c1e2,5c8ff4212286e80018c3e49d +sub-pixar153,5c8ff4212286e80019c3c1e6,5c8ff424a743a900176081af +sub-pixar154,5c8ff4264712b400193b5c2f,5c8ff4252286e80017c41bfc +sub-pixar155,5c8ff4282286e80017c41c0a,5c8ff4292286e80017c41c0f diff --git a/nilearn/datasets/description/brainomics_localizer.rst b/nilearn/datasets/description/brainomics_localizer.rst index 3685f416f6..fab9fcf995 100644 --- a/nilearn/datasets/description/brainomics_localizer.rst +++ b/nilearn/datasets/description/brainomics_localizer.rst @@ -22,9 +22,15 @@ References For more information about this dataset's structure: http://brainomics.cea.fr/localizer/ +To cite this dataset: +Papadopoulos Orfanos, Dimitri, et al. +"The Brainomics/Localizer database." +NeuroImage 144.B (2017): 309. + +For an example of scientific results obtained using this dataset: Pinel, Philippe, et al. "Fast reproducible identification and large-scale databasing of individual functional cognitive networks." -BMC neuroscience 8.1 (2007): 91. +BMC Neuroscience 8.1 (2007): 91. Licence: usage is unrestricted for non-commercial research purposes. diff --git a/nilearn/datasets/description/main.rst b/nilearn/datasets/description/main.rst new file mode 100644 index 0000000000..2d536f2cb4 --- /dev/null +++ b/nilearn/datasets/description/main.rst @@ -0,0 +1,37 @@ +Montreal Artificial Intelligence and Neuroscience conference 2018 datasets + + +Notes +----- +This functional MRI datasets are used as part of teaching how to use +machine learning to predict age from rs-fmri with Nilearn. + +The dataset consists of 50 children (ages 3-13) and 33 young adults (ages +18-39). This rs-fmri data can be used to try to predict who are adults and +who are children. + +The data is downsampled to 4mm resolution for convenience. The original +data is downloaded from OpenNeuro. + +Here: https://openneuro.org/datasets/ds000228/versions/1.0.0 + +Track issue for more information: +https://github.com/nilearn/nilearn/issues/1864 + +Content +------- + :'func': functional MRI Nifti images (4D) per subject + :'confounds': TSV file contain nuisance information per subject + :'phenotypic': Phenotypic informaton for each subject such as age, + age group, gender, handedness. + + +References +---------- +Please cite this paper if you are using this dataset: +Richardson, H., Lisandrelli, G., Riobueno-Naylor, A., & Saxe, R. (2018). +Development of the social brain from age three to twelve years. +Nature communications, 9(1), 1027. +https://www.nature.com/articles/s41467-018-03399-2 + +Licence: usage is unrestricted for non-commercial research purposes. diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 1d4f0663db..bca3fbd21b 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -666,7 +666,7 @@ def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, get_masks=False, get_anats=False, data_dir=None, url=None, resume=True, verbose=1): - """Download and load Brainomics Localizer dataset (94 subjects). + """Download and load Brainomics/Localizer dataset (94 subjects). "The Functional Localizer is a simple and fast acquisition procedure based on a 5-minute functional magnetic resonance @@ -679,8 +679,11 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, Functional Localizer page." (see http://brainomics.cea.fr/localizer/) - "Scientific results obtained using this dataset are described in - Pinel et al., 2007" [1] + You may cite Papadopoulos Orfanos, Dimitri, *et al.* when using this + dataset [1]. + + Scientific results obtained using this dataset are described in + Pinel *et al.*, 2007 [2]. Parameters ---------- @@ -800,10 +803,14 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, References ---------- - Pinel, Philippe, et al. + [1] Papadopoulos Orfanos, Dimitri, et al. + "The Brainomics/Localizer database." + NeuroImage 144.B (2017): 309. + + [2] Pinel, Philippe, et al. "Fast reproducible identification and large-scale databasing of individual functional cognitive networks." - BMC neuroscience 8.1 (2007): 91. + BMC Neuroscience 8.1 (2007): 91. See Also --------- @@ -1861,3 +1868,227 @@ def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, phenotypic=phenotypic, description=fdescr) + +def _fetch_main_participants(data_dir, url, verbose): + """Helper function to fetch_main. + + This function helps in downloading and loading participants data from .tsv + uploaded on Open Science Framework (OSF). + + The original .tsv file contains many columns but this function picks only + those columns that are relevant. + + Parameters + ---------- + data_dir: str + Path of the data directory. Used to force data storage in a specified + location. If None is given, data is stored in home directory. + + url: str, optional + Override download URL. Used for test only (or if you setup a mirror of + the data). Default: None + + verbose: int + Defines the level of verbosity of the output. + + Returns + ------- + participants : numpy.ndarray + Contains data of each subject age, age group, child or adult, + gender, handedness. + + """ + dataset_name = 'main' + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + + if url is None: + url = 'https://osf.io/yr3av/download' + + files = [('participants.tsv', url, {'move': 'participants.tsv'})] + path_to_participants = _fetch_files(data_dir, files, verbose=verbose)[0] + + # Load path to participants + dtype = [('participant_id', 'U12'), ('Age', ' max_subjects) or (n_subjects < 1))): + warnings.warn("Wrong value for n_subjects={0}. The maximum " + "value will be used instead n_subjects={1}" + .format(n_subjects, max_subjects)) + n_subjects = max_subjects + + # Download functional and regressors based on participants + child_count = participants['Child_Adult'].tolist().count('child') + adult_count = participants['Child_Adult'].tolist().count('adult') + + n_child = np.round(float(n_subjects) / max_subjects * child_count).astype(int) + n_adult = np.round(float(n_subjects) / max_subjects * adult_count).astype(int) + + # First, restrict the csv files to the adequate number of subjects + child_ids = participants[participants['Child_Adult'] == + 'child']['participant_id'][:n_child] + adult_ids = participants[participants['Child_Adult'] == + 'adult']['participant_id'][:n_adult] + ids = np.hstack([child_ids, adult_ids]) + participants = participants[np.in1d(participants['participant_id'], + ids)] + + funcs, regressors = _fetch_main_functional(participants, data_dir=data_dir, + url=None, verbose=verbose) + + return Bunch(func=funcs, confounds=regressors, phenotypic=participants, + description=fdescr) diff --git a/nilearn/datasets/neurovault.py b/nilearn/datasets/neurovault.py index 18dca08927..0f51b319ac 100644 --- a/nilearn/datasets/neurovault.py +++ b/nilearn/datasets/neurovault.py @@ -13,17 +13,18 @@ import json from glob import glob from tempfile import mkdtemp -from collections import Container try: # python3 from urllib.parse import urljoin, urlencode from urllib.request import build_opener, Request from urllib.error import URLError + from collections.abc import Container except ImportError: # python2 from urlparse import urljoin from urllib import urlencode from urllib2 import build_opener, Request, URLError + from collections import Container import numpy as np from sklearn.datasets.base import Bunch diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 74e242f8e5..272d375a66 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -599,3 +599,60 @@ def test_fetch_surf_nki_enhanced(data_dir=tst.tmpdir, verbose=0): assert_true(isinstance(nki_data.phenotypic, np.ndarray)) assert_equal(nki_data.phenotypic.shape, (10,)) assert_not_equal(nki_data.description, '') + + +def _mock_participants_data(n_ids=5): + """Maximum 8 ids are allowed to mock + """ + ids = ['sub-pixar052', 'sub-pixar073', 'sub-pixar074', 'sub-pixar110', + 'sub-pixar042', 'sub-pixar109', 'sub-pixar068', 'sub-pixar007'] + array_ids = np.asarray(ids[:n_ids], dtype='|U12') + + age = np.ones(len(array_ids), dtype=' connectome plot - INSERT_JS_LIBRARIES_HERE + $INSERT_JS_LIBRARIES_HERE """.format(jquery, plotly, js_utils) - return html.replace('INSERT_JS_LIBRARIES_HERE', js_lib) + if not isinstance(html, Template): + html = Template(html) + return html.safe_substitute({'INSERT_JS_LIBRARIES_HERE': js_lib}) def get_html_template(template_name): @@ -64,7 +67,7 @@ def get_html_template(template_name): template_path = os.path.join( os.path.dirname(__file__), 'data', 'html', template_name) with open(template_path, 'rb') as f: - return f.read().decode('utf-8') + return Template(f.read().decode('utf-8')) def _remove_after_n_seconds(file_name, n_seconds): @@ -120,7 +123,7 @@ def get_iframe(self, width=None, height=None): if height is None: height = self.height escaped = escape(self.html, quote=True) - wrapped = ('').format(escaped, width, height) return wrapped diff --git a/nilearn/plotting/tests/test_displays.py b/nilearn/plotting/tests/test_displays.py index 999bd54072..d71813c5cd 100644 --- a/nilearn/plotting/tests/test_displays.py +++ b/nilearn/plotting/tests/test_displays.py @@ -7,6 +7,7 @@ import numpy as np from nilearn.plotting.displays import OrthoSlicer, XSlicer, OrthoProjector +from nilearn.plotting.displays import TiledSlicer from nilearn.plotting.displays import LZRYProjector from nilearn.datasets import load_mni152_template @@ -22,6 +23,13 @@ def test_demo_ortho_slicer(): oslicer.close() +def test_demo_tiled_slicer(): + tslicer = TiledSlicer(cut_coords=(0, 0, 0)) + img = load_mni152_template() + tslicer.add_overlay(img, cmap=plt.cm.gray) + tslicer.close() + + def test_stacked_slicer(): # Test stacked slicers, like the XSlicer img = load_mni152_template() @@ -33,6 +41,17 @@ def test_stacked_slicer(): slicer.close() +def test_tiled_slicer(): + img = load_mni152_template() + slicer = TiledSlicer.init_with_figure(img=img, cut_coords=(0, 0, 0), + colorbar=True) + slicer.add_overlay(img, cmap=plt.cm.gray,colorbar=True) + # Forcing a layout here, to test the locator code + with tempfile.TemporaryFile() as fp: + slicer.savefig(fp) + slicer.close() + + def test_demo_ortho_projector(): # This is only a smoke test img = load_mni152_template() diff --git a/nilearn/plotting/tests/test_html_connectome.py b/nilearn/plotting/tests/test_html_connectome.py index 43c47a1d16..c0f1c2aec6 100644 --- a/nilearn/plotting/tests/test_html_connectome.py +++ b/nilearn/plotting/tests/test_html_connectome.py @@ -1,5 +1,8 @@ +import warnings + import numpy as np +from nilearn.plotting import cm from nilearn.plotting.js_plotting_utils import decode from nilearn.plotting import html_connectome @@ -53,10 +56,84 @@ def test_view_connectome(): html = html_connectome.view_connectome(adj, coord, '85.3%') check_html(html, False, 'connectome-plot') html = html_connectome.view_connectome(adj, coord, '85.3%', - linewidth=8.5, marker_size=4.2) + linewidth=8.5, node_size=4.2) + check_html(html, False, 'connectome-plot') + html = html_connectome.view_connectome( + adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord))) check_html(html, False, 'connectome-plot') + +def test_params_deprecation_view_connectome(): + deprecated_params = {'coords': 'node_coords', + 'threshold': 'edge_threshold', + 'cmap': 'edge_cmap', + 'marker_size': 'node_size', + } + deprecation_msg = ( + 'The parameter "{}" will be removed in Nilearn version 0.6.0. ' + 'Please use the parameter "{}" instead.' + ) + warning_msgs = {old_: deprecation_msg.format(old_, new_) + for old_, new_ in deprecated_params.items() + } + + adj, coord = _make_connectome() + with warnings.catch_warnings(record=True) as raised_warnings: + html_connectome.view_connectome(adjacency_matrix=adj, + coords=coord, + edge_threshold='85.3%', + edge_cmap=cm.cyan_orange, + linewidth=8.5, node_size=4.2, + ) + + html_connectome.view_connectome(adjacency_matrix=adj, + node_coords=coord, + threshold='85.3%', + edge_cmap=cm.cyan_orange, + linewidth=8.5, + node_size=4.2, + ) + + html_connectome.view_connectome(adjacency_matrix=adj, + node_coords=coord, + edge_threshold='85.3%', + cmap=cm.cyan_orange, + linewidth=8.5, + node_size=4.2, + ) + + html_connectome.view_connectome(adjacency_matrix=adj, + node_coords=coord, + edge_threshold='85.3%', + edge_cmap=cm.cyan_orange, + linewidth=8.5, + marker_size=4.2, + ) + + html_connectome.view_connectome(adjacency_matrix=adj, + node_coords=coord, + edge_threshold='85.3%', + edge_cmap=cm.cyan_orange, + linewidth=8.5, + node_size=4.2, + ) + + html_connectome.view_connectome(adj, + coord, + '85.3%', + cm.cyan_orange, + 8.5, + 4.2, + ) + old_params = ['coords', 'threshold', 'cmap', 'marker_size'] + + assert len(raised_warnings) == 4 + for old_param_, raised_warning_ in zip(old_params, raised_warnings): + assert warning_msgs[old_param_] == str(raised_warning_.message) + assert raised_warning_.category is DeprecationWarning + + def test_get_markers(): coords = np.arange(12).reshape((4, 3)) colors = ['r', 'g', 'black', 'white'] @@ -77,3 +154,45 @@ def test_view_markers(): check_html(html, False, 'connectome-plot') html = html_connectome.view_markers(coords, marker_size=15) check_html(html, False, 'connectome-plot') + html = html_connectome.view_markers( + coords, marker_size=np.arange(len(coords))) + check_html(html, False, 'connectome-plot') + html = html_connectome.view_markers( + coords, marker_size=list(range(len(coords)))) + check_html(html, False, 'connectome-plot') + + +def test_params_deprecation_view_markers(): + """ Tests whether use of deprecated keyword parameters of view_markers + raise corrrect warnings. + """ + deprecated_params = {'coords': 'marker_coords', + 'colors': 'marker_color', + } + deprecation_msg = ( + 'The parameter "{}" will be removed in Nilearn version 0.6.0. ' + 'Please use the parameter "{}" instead.' + ) + warning_msgs = {old_: deprecation_msg.format(old_, new_) + for old_, new_ in deprecated_params.items() + } + coords = np.arange(12).reshape((4, 3)) + colors = ['r', 'g', 'black', 'white'] + with warnings.catch_warnings(record=True) as raised_warnings: + html_connectome.view_markers(coords=coords, + marker_color=colors, + ) + html_connectome.view_markers(marker_coords=coords, + colors=colors, + ) + html_connectome.view_markers(marker_coords=coords, + marker_color=colors, + ) + html_connectome.view_markers(coords, + colors, + ) + old_params = ['coords', 'colors'] + assert len(raised_warnings) == 2 + for old_param_, raised_warning_ in zip(old_params, raised_warnings): + assert warning_msgs[old_param_] == str(raised_warning_.message) + assert raised_warning_.category is DeprecationWarning diff --git a/nilearn/plotting/tests/test_img_plotting.py b/nilearn/plotting/tests/test_img_plotting.py index 7eab2d6e90..5501711f51 100644 --- a/nilearn/plotting/tests/test_img_plotting.py +++ b/nilearn/plotting/tests/test_img_plotting.py @@ -872,6 +872,30 @@ def test_invalid_in_display_mode_cut_coords_all_plots(): img, display_mode='ortho', cut_coords=2) +def test_invalid_in_display_mode_tiled_cut_coords_single_all_plots(): + img = _generate_img() + + for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, + plot_stat_map,plot_prob_atlas]: + assert_raises_regex(ValueError, + "The input given for display_mode='tiled' needs to " + "be a list of 3d world coordinates.", + plot_func, + img, display_mode='tiled', cut_coords=2) + + +def test_invalid_in_display_mode_tiled_cut_coords_all_plots(): + img = _generate_img() + + for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, + plot_stat_map,plot_prob_atlas]: + assert_raises_regex(ValueError, + "The number cut_coords passed does not " + "match the display_mode", + plot_func, + img, display_mode='tiled', cut_coords=(2,2)) + + def test_outlier_cut_coords(): """ Test to plot a subset of a large set of cuts found for a small area.""" bg_img = load_mni152_template() @@ -958,3 +982,20 @@ def test_add_markers_using_plot_glass_brain(): coords = [(-34, -39, -9)] fig.add_markers(coords) fig.close() + + +def test_plotting_functions_with_display_mode_tiled(): + img = _generate_img() + plot_stat_map(img, display_mode='tiled') + plot_anat(display_mode='tiled') + plot_img(img, display_mode='tiled') + plt.close() + + +def test_display_methods_with_display_mode_tiled(): + img = _generate_img() + display = plot_img(img, display_mode='tiled') + display.add_overlay(img, threshold=0) + display.add_edges(img, color='c') + display.add_contours(img, contours=2, linewidth=4, + colors=['limegreen', 'yellow']) diff --git a/nilearn/plotting/tests/test_js_plotting_utils.py b/nilearn/plotting/tests/test_js_plotting_utils.py index 84502a2018..6320afbc0f 100644 --- a/nilearn/plotting/tests/test_js_plotting_utils.py +++ b/nilearn/plotting/tests/test_js_plotting_utils.py @@ -239,8 +239,8 @@ def check_html(html, check_selects=True, plot_div_id='surface-plot'): resized = html.resize(3, 17) assert resized is html assert (html.width, html.height) == (3, 17) - assert "width=3 height=17" in html.get_iframe() - assert "width=33 height=37" in html.get_iframe(33, 37) + assert 'width="3" height="17"' in html.get_iframe() + assert 'width="33" height="37"' in html.get_iframe(33, 37) if not LXML_INSTALLED: return root = etree.HTML(html.html.encode('utf-8'), diff --git a/nilearn/tests/test_init.py b/nilearn/tests/test_init.py index bf3c5a825c..153c1b3a05 100644 --- a/nilearn/tests/test_init.py +++ b/nilearn/tests/test_init.py @@ -2,11 +2,63 @@ import warnings from nose.tools import assert_true -from nilearn import _py2_deprecation_warning + +# import time warnings don't interfere with warning's tests +with warnings.catch_warnings(record=True): + from nilearn import _py34_deprecation_warning + from nilearn import _py2_deprecation_warning + from nilearn import _python_deprecation_warnings def test_py2_deprecation_warning(): - if sys.version_info.major == 2: - with warnings.catch_warnings(record=True) as raised_warnings: + with warnings.catch_warnings(record=True) as raised_warnings: _py2_deprecation_warning() - assert_true(raised_warnings[0].category is DeprecationWarning) + assert_true(raised_warnings[0].category is DeprecationWarning) + assert_true( + str(raised_warnings[0].message).startswith( + 'Python2 support is deprecated') + ) + + +def test_py34_deprecation_warning(): + with warnings.catch_warnings(record=True) as raised_warnings: + _py34_deprecation_warning() + assert_true(raised_warnings[0].category is DeprecationWarning) + assert_true( + str(raised_warnings[0].message).startswith( + 'Python 3.4 support is deprecated') + ) + + +def test_python_deprecation_warnings(): + with warnings.catch_warnings(record=True) as raised_warnings: + _python_deprecation_warnings() + if sys.version_info.major == 2: + assert_true(raised_warnings[0].category is DeprecationWarning) + assert_true( + str(raised_warnings[0].message).startswith( + 'Python2 support is deprecated') + ) + elif sys.version_info.major == 3 and sys.version_info.minor == 4: + assert_true(raised_warnings[0].category is DeprecationWarning) + assert_true( + str(raised_warnings[0].message).startswith( + 'Python 3.4 support is deprecated') + ) + else: + assert_true(len(raised_warnings) == 0) + + +def test_warnings_filter_scope(): + """ + Tests that warnings generated at Nilearn import in Python 2, 3.4 envs + do not change the warnings filter for subsequent warnings. + """ + with warnings.catch_warnings(record=True) as raised_warnings: + warnings.warn('Dummy warning 1') # Will be raised. + warnings.filterwarnings("ignore", message="Dummy warning") + warnings.warn('Dummy warning 2') # Will not be raised. + import nilearn # Irrespective of warning raised in py2, py3.4 ... + warnings.warn('Dummy warning 3') # ...this should not be raised. + assert str(raised_warnings[0].message) == 'Dummy warning 1' + assert str(raised_warnings[-1].message) != 'Dummy warning 3' diff --git a/nilearn/tests/test_niimg.py b/nilearn/tests/test_niimg.py index 4c63c3f50e..dfc4900e02 100644 --- a/nilearn/tests/test_niimg.py +++ b/nilearn/tests/test_niimg.py @@ -3,7 +3,9 @@ from nose.tools import assert_equal +import nibabel as nb from nibabel import Nifti1Image +from nibabel.tmpdirs import InTemporaryDirectory from sklearn.externals import joblib from nilearn.image import new_img_like @@ -48,3 +50,30 @@ def test_get_target_dtype(): dtype_kind_int = niimg._get_target_dtype(img2.get_data().dtype, target_dtype='auto') assert_equal(dtype_kind_int, np.int32) + + +def test_img_data_dtype(): + # Ignoring complex, binary, 128+ bit, RGBA + nifti1_dtypes = ( + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64) + dtype_matches = [] + with InTemporaryDirectory(): + for logical_dtype in nifti1_dtypes: + dataobj = np.random.uniform(0, 255, + size=(2, 2, 2)).astype(logical_dtype) + for on_disk_dtype in nifti1_dtypes: + img = Nifti1Image(dataobj, np.eye(4)) + img.set_data_dtype(on_disk_dtype) + img.to_filename('test.nii') + loaded = nb.load('test.nii') + # To verify later that sometimes these differ meaningfully + dtype_matches.append( + loaded.get_data_dtype() == niimg.img_data_dtype(loaded)) + # Use np.array(dataobj) because get_data() is to be deprecated + assert_equal(np.array(loaded.dataobj).dtype, + niimg.img_data_dtype(loaded)) + # Verify that the distinction is worth making + assert any(dtype_matches) + assert not all(dtype_matches)