Skip to content

Commit

Permalink
[DOC] Speed up fit computation with parallel processing (#4108)
Browse files Browse the repository at this point in the history
* Speed up compute with n_jobs

* Set n_jobs to max cpu where possible

* [full doc] test full build

* Set n_jobs to 2

* [full doc] test full build njobs=2

* Update examples/02_decoding/plot_haxby_searchlight.py

* Add the rest
  • Loading branch information
ymzayek committed Nov 20, 2023
1 parent b8053f1 commit f4b03d4
Show file tree
Hide file tree
Showing 24 changed files with 38 additions and 26 deletions.
2 changes: 1 addition & 1 deletion examples/02_decoding/plot_haxby_frem.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
# --------
from nilearn.decoding import FREMClassifier

decoder = FREMClassifier(cv=10, standardize="zscore_sample")
decoder = FREMClassifier(cv=10, standardize="zscore_sample", n_jobs=2)
# Fit model on train data and predict on test data
decoder.fit(X_train, y_train)
y_pred = decoder.predict(X_test)
Expand Down
2 changes: 1 addition & 1 deletion examples/02_decoding/plot_haxby_searchlight.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
# Make processing parallel
# /!\ As each thread will print its progress, n_jobs > 1 could mess up the
# information output.
n_jobs = 1
n_jobs = 2

# Define the cross-validation scheme used for validation.
# Here we use a KFold cross-validation on the session, which corresponds to
Expand Down
3 changes: 1 addition & 2 deletions examples/02_decoding/plot_haxby_searchlight_surface.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@
cv = KFold(n_splits=3, shuffle=False)

# Cross-validated search light
scores = search_light(X, y, estimator, adjacency, cv=cv, n_jobs=1)

scores = search_light(X, y, estimator, adjacency, cv=cv, n_jobs=2)
# %%
# Visualization
# -------------
Expand Down
1 change: 1 addition & 0 deletions examples/02_decoding/plot_mixed_gambles_frem.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@
penalty="tv-l1",
eps=1e-1, # prefer large alphas
memory="nilearn_cache",
n_jobs=2,
)
# tv_l1.fit(zmap_filenames, behavioral_target)
# plot_stat_map(tv_l1.coef_img_, title="TV-L1", display_mode="yz",
Expand Down
5 changes: 4 additions & 1 deletion examples/02_decoding/plot_miyawaki_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,10 @@
from nilearn.maskers import MultiNiftiMasker

masker = MultiNiftiMasker(
mask_img=dataset.mask, detrend=True, standardize="zscore_sample"
mask_img=dataset.mask,
detrend=True,
standardize="zscore_sample",
n_jobs=2,
)
masker.fit()
fmri_data = masker.transform(fmri_random_runs_filenames)
Expand Down
2 changes: 1 addition & 1 deletion examples/02_decoding/plot_miyawaki_reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@

# Load and mask fMRI data
masker = MultiNiftiMasker(
mask_img=miyawaki_dataset.mask, detrend=True, standardize=False
mask_img=miyawaki_dataset.mask, detrend=True, standardize=False, n_jobs=2
)
masker.fit()
X_train = masker.transform(X_random_filenames)
Expand Down
4 changes: 2 additions & 2 deletions examples/02_decoding/plot_oasis_vbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@
mask=mask,
scoring="neg_mean_absolute_error",
screening_percentile=1,
n_jobs=1,
n_jobs=2,
standardize="zscore_sample",
)
# Fit and predict with the decoder
Expand Down Expand Up @@ -197,7 +197,7 @@
data, # + intercept as a covariate by default
n_perm=2000, # 1,000 in the interest of time; 10000 would be better
verbose=1, # display progress bar
n_jobs=1,
n_jobs=2,
)
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
Expand Down
1 change: 1 addition & 0 deletions examples/02_decoding/plot_oasis_vbm_space_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
screening_percentile=5.0,
memory_level=2,
standardize="zscore_sample",
n_jobs=2,
)
decoder.fit(gm_imgs_train, age_train) # fit
coef_img = decoder.coef_img_
Expand Down
2 changes: 1 addition & 1 deletion examples/02_decoding/plot_simulated_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def plot_slices(data, title=None):
estimator=svm.SVR(kernel="linear"),
cv=KFold(n_splits=4),
verbose=1,
n_jobs=1,
n_jobs=2,
),
),
]
Expand Down
2 changes: 2 additions & 0 deletions examples/03_connectivity/plot_compare_decomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
mask_strategy="whole-brain-template",
random_state=0,
standardize="zscore_sample",
n_jobs=2,
)
canica.fit(func_filenames)

Expand Down Expand Up @@ -116,6 +117,7 @@
n_epochs=1,
mask_strategy="whole-brain-template",
standardize="zscore_sample",
n_jobs=2,
)

print("[Example] Fitting dictionary learning model")
Expand Down
1 change: 1 addition & 0 deletions examples/04_glm_first_level/plot_bids_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@
space_label,
smoothing_fwhm=5.0,
derivatives_folder=derivatives_folder,
n_jobs=2,
)

# %%
Expand Down
4 changes: 3 additions & 1 deletion examples/05_glm_second_level/plot_oasis.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,9 @@
# also smooth a little bit to improve statistical behavior.
from nilearn.glm.second_level import SecondLevelModel

second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask_img=mask_img)
second_level_model = SecondLevelModel(
smoothing_fwhm=2.0, mask_img=mask_img, n_jobs=2
)
second_level_model.fit(
gray_matter_map_filenames,
design_matrix=design_matrix,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
# Model specification and fit
from nilearn.glm.second_level import SecondLevelModel

second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = SecondLevelModel(smoothing_fwhm=8.0, n_jobs=2)
second_level_model = second_level_model.fit(
second_level_input, design_matrix=design_matrix
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
# Fit of the second-level model
from nilearn.glm.second_level import SecondLevelModel

model = SecondLevelModel(smoothing_fwhm=5.0)
model = SecondLevelModel(smoothing_fwhm=5.0, n_jobs=2)
model.fit(contrast_map_filenames, design_matrix=design_matrix)

# %%
Expand Down Expand Up @@ -143,7 +143,7 @@
two_sided_test=False,
mask=None,
smoothing_fwhm=5.0,
n_jobs=1,
n_jobs=2,
)

# %%
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
# Next, we specify the model and fit it.
from nilearn.glm.second_level import SecondLevelModel

second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = SecondLevelModel(smoothing_fwhm=8.0, n_jobs=2)
second_level_model = second_level_model.fit(
second_level_input,
design_matrix=design_matrix,
Expand Down Expand Up @@ -167,7 +167,7 @@
n_perm=500, # 500 for the sake of time. Ideally, this should be 10,000.
two_sided_test=False,
smoothing_fwhm=8.0,
n_jobs=1,
n_jobs=2,
threshold=0.001,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@
# We specify the analysis models and fit them.
from nilearn.glm.second_level import SecondLevelModel

second_level_model_unpaired = SecondLevelModel().fit(
second_level_model_unpaired = SecondLevelModel(n_jobs=2).fit(
second_level_input, design_matrix=unpaired_design_matrix
)

second_level_model_paired = SecondLevelModel().fit(
second_level_model_paired = SecondLevelModel(n_jobs=2).fit(
second_level_input, design_matrix=paired_design_matrix
)

Expand Down
2 changes: 1 addition & 1 deletion examples/05_glm_second_level/plot_thresholding.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
# Next, we specify and estimate the model.
from nilearn.glm.second_level import SecondLevelModel

second_level_model = SecondLevelModel().fit(
second_level_model = SecondLevelModel(n_jobs=2).fit(
cmap_filenames, design_matrix=design_matrix
)

Expand Down
4 changes: 2 additions & 2 deletions examples/07_advanced/plot_advanced_decoding_scikit.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@
cv=cv,
scoring="roc_auc",
groups=session_label,
n_jobs=-1,
n_jobs=2,
)
print(f"SVC accuracy (tuned parameters): {cv_scores.mean():.3f}")

Expand Down Expand Up @@ -274,6 +274,6 @@
# fmri_masked,
# target,
# cv=cv,
# n_jobs=-1,
# n_jobs=2,
# verbose=1)
# But, be aware that this can take * A WHILE * ...
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
cv=cv,
verbose=1,
refit=False,
n_jobs=8,
n_jobs=2,
)
gs.fit(masked_data, classes)
mean_scores = gs.cv_results_["mean_test_score"]
Expand Down
1 change: 1 addition & 0 deletions examples/07_advanced/plot_beta_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@
data_dir,
"languagelocalizer",
img_filters=[("desc", "preproc")],
n_jobs=2,
)

# Grab the first subject's model, functional file, and events DataFrame
Expand Down
4 changes: 2 additions & 2 deletions examples/07_advanced/plot_bids_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
models_events,
models_confounds,
) = first_level_from_bids(
data_dir, task_label, img_filters=[("desc", "preproc")]
data_dir, task_label, img_filters=[("desc", "preproc")], n_jobs=2
)

# %%
Expand Down Expand Up @@ -137,7 +137,7 @@

# %%
# Note that we apply a smoothing of 8mm.
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = SecondLevelModel(smoothing_fwhm=8.0, n_jobs=2)
second_level_model = second_level_model.fit(second_level_input)

# %%
Expand Down
2 changes: 1 addition & 1 deletion examples/07_advanced/plot_haxby_mass_univariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@
n_perm=10000,
two_sided_test=True,
verbose=1, # display progress bar
n_jobs=1, # can be changed to use more CPUs
n_jobs=2, # can be changed to use more CPUs
)
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
tfce=True,
n_perm=200, # 200 for the sake of time. Ideally, this should be 10000.
verbose=1, # display progress bar
n_jobs=1, # can be changed to use more CPUs
n_jobs=2, # can be changed to use more CPUs
output_type="dict",
)
neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform(
Expand Down
4 changes: 3 additions & 1 deletion examples/07_advanced/plot_surface_bids_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,9 @@
_, models_run_imgs, models_events, models_confounds = \
first_level_from_bids(
data_dir, task_label,
img_filters=[('desc', 'preproc')])
img_filters=[('desc', 'preproc')],
n_jobs=2
)

# %%
# We also need to get the :term:`TR` information.
Expand Down

0 comments on commit f4b03d4

Please sign in to comment.