Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REF] fix f strings #3770

Merged
merged 8 commits into from
Jun 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
17 changes: 8 additions & 9 deletions examples/00_tutorials/plot_decoding_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,11 +194,10 @@
# of our model on examples it hasn't seen to examine how well the model perform
# in general.

print(
"Prediction Accuracy: {:.3f}".format(
(prediction == conditions_test).sum() / float(len(conditions_test))
)
predicton_accuracy = (prediction == conditions_test).sum() / float(
len(conditions_test)
)
print(f"Prediction Accuracy: {predicton_accuracy:.3f}")

###########################################################################
# Implementing a KFold loop
Expand All @@ -216,12 +215,12 @@
)
decoder.fit(index_img(fmri_niimgs, train), conditions[train])
prediction = decoder.predict(index_img(fmri_niimgs, test))
predicton_accuracy = (prediction == conditions[test]).sum() / float(
len(conditions[test])
)
print(
"CV Fold {:01d} | Prediction Accuracy: {:.3f}".format(
fold,
(prediction == conditions[test]).sum()
/ float(len(conditions[test])),
)
f"CV Fold {fold:01d} | "
f"Prediction Accuracy: {predicton_accuracy:.3f}"
)

###########################################################################
Expand Down
6 changes: 3 additions & 3 deletions examples/01_plotting/plot_surf_stat_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@

# The nki dictionary contains file names for the data
# of all downloaded subjects.
print(('Resting state data of the first subjects on the '
'fsaverag5 surface left hemisphere is at: %s' %
nki_dataset['func_left'][0]))
print('Resting state data of the first subjects on the '
f"fsaverag5 surface left hemisphere is at: {nki_dataset['func_left'][0]}"
)

# Destrieux parcellation for left hemisphere in fsaverage5 space
destrieux_atlas = datasets.fetch_atlas_surf_destrieux()
Expand Down
5 changes: 2 additions & 3 deletions examples/02_decoding/plot_haxby_glm_decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@
classification_accuracy = np.mean(list(decoder.cv_scores_.values()))
chance_level = 1.0 / len(np.unique(conditions))
print(
"Classification accuracy: {:.4f} / Chance level: {}".format(
classification_accuracy, chance_level
)
f"Classification accuracy: {classification_accuracy:.4f} / "
f"Chance level: {chance_level}"
)
7 changes: 3 additions & 4 deletions examples/02_decoding/plot_simulated_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,9 @@ def plot_slices(data, title=None):
coefs = estimator.coef_
coefs = np.reshape(coefs, [size, size, size])
score = estimator.score(X_test, y_test)
title = "{}: prediction score {:.3f}, training time: {:.2f}s".format(
name,
score,
elapsed_time,
title = (
f"{name}: prediction score {score:.3f}, "
f"training time: {elapsed_time:.2f}s"
)

else: # Searchlight
Expand Down
10 changes: 4 additions & 6 deletions examples/03_connectivity/plot_group_level_connectivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@
msdl_coords = msdl_data.region_coords
n_regions = len(msdl_coords)
print(
"MSDL has {0} ROIs, part of the following networks :\n{1}.".format(
n_regions, msdl_data.networks
)
f"MSDL has {n_regions} ROIs, "
f"part of the following networks:\n{msdl_data.networks}."
)

###############################################################################
Expand Down Expand Up @@ -90,9 +89,8 @@

# All individual coefficients are stacked in a unique 2D matrix.
print(
"Correlations of children are stacked in an array of shape {0}".format(
correlation_matrices.shape
)
"Correlations of children are stacked "
f"in an array of shape {correlation_matrices.shape}"
)

###############################################################################
Expand Down
4 changes: 2 additions & 2 deletions examples/03_connectivity/plot_seed_to_voxel_correlation.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@
# series** is an array with shape n_volumes, 1), while the
# **brain time series** is an array with shape (n_volumes, n_voxels).

print("Seed time series shape: (%s, %s)" % seed_time_series.shape)
print("Brain time series shape: (%s, %s)" % brain_time_series.shape)
print(f"Seed time series shape: ({seed_time_series.shape})")
print(f"Brain time series shape: ({brain_time_series.shape})")

##########################################################################
# We can plot the **seed time series**.
Expand Down
2 changes: 1 addition & 1 deletion examples/07_advanced/plot_localizer_simple_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
title = (
"Negative $\\log_{10}$ p-values"
"\n(Parametric + Bonferroni correction)"
"\n%d detections" % (~masked_pvals.mask).sum()
f"\n{(~masked_pvals.mask).sum()} detections"
)

display.title(title, y=1.1, alpha=0.8)
Expand Down
9 changes: 4 additions & 5 deletions nilearn/_utils/data_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,9 +367,8 @@ def generate_fake_fmri(shape=(10, 11, 12),
target = np.zeros(length, dtype=int)
rest_max_size = (length - (n_blocks * block_size)) // n_blocks
if rest_max_size < 0:
raise ValueError('%s is too small '
'to put %s blocks of size %s' %
(length, n_blocks, block_size))
raise ValueError(f'{length} is too small '
f'to put {n_blocks} blocks of size {block_size}')
t_start = 0
if rest_max_size > 0:
t_start = rand_gen.randint(0, rest_max_size, 1)[0]
Expand Down Expand Up @@ -495,11 +494,11 @@ def write_fake_fmri_data_and_design(shapes,
mask_file, fmri_files, design_files = 'mask.nii', [], []
rand_gen = check_random_state(random_state)
for i, shape in enumerate(shapes):
fmri_files.append('fmri_run%d.nii' % i)
fmri_files.append(f'fmri_run{i:d}.nii')
data = rand_gen.randn(*shape)
data[1:-1, 1:-1, 1:-1] += 100
Nifti1Image(data, affine).to_filename(fmri_files[-1])
design_files.append('dmtx_%d.csv' % i)
design_files.append(f'dmtx_{i:d}.csv')
pd.DataFrame(rand_gen.randn(shape[3], rk),
columns=['', '', '']).to_csv(design_files[-1])
Nifti1Image((rand_gen.rand(*shape[:3]) > .5).astype(np.int8),
Expand Down
2 changes: 1 addition & 1 deletion nilearn/_utils/extmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def is_spd(M, decimal=15, verbose=1):
"""
if not np.allclose(M, M.T, atol=0, rtol=10**-decimal):
if verbose > 0:
print("matrix not symmetric to %d decimals" % decimal)
print(f"matrix not symmetric to {decimal:d} decimals")
return False
eigvalsh = np.linalg.eigvalsh(M)
ispd = eigvalsh.min() > 0
Expand Down
17 changes: 6 additions & 11 deletions nilearn/_utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,9 @@
for deprecated_param_ in used_deprecated_params:
replacement_param = replacement_params[deprecated_param_]
param_deprecation_msg = (
'The parameter "{}" will be removed in {} release of {}. '
'Please use the parameter "{}" instead.'.format(deprecated_param_,
end_version,
lib_name,
replacement_param,
)
)
f'The parameter "{deprecated_param_}" '
f'will be removed in {end_version} release of {lib_name}. '
f'Please use the parameter "{replacement_param}" instead.')
warnings.warn(category=FutureWarning,
message=param_deprecation_msg,
stacklevel=3)
Expand Down Expand Up @@ -138,10 +134,9 @@
def wrapper(*args, **kwargs):
found = set(removed_params).intersection(kwargs)
if found:
message = ('Parameter(s) {} will be removed in version {}; '
'{}'.format(', '.join(found),
end_version, reason)
)
message = (f'Parameter(s) {", ".join(found)} '

Check warning on line 137 in nilearn/_utils/helpers.py

View check run for this annotation

Codecov / codecov/patch

nilearn/_utils/helpers.py#L137

Added line #L137 was not covered by tests
f'will be removed in version {end_version}; '
f'{reason}')
warnings.warn(category=DeprecationWarning,
message=message,
stacklevel=3)
Expand Down
18 changes: 9 additions & 9 deletions nilearn/_utils/niimg.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,21 +236,21 @@ def _repr_niimgs(niimgs, shorten=True):
)
else:
tmp = [_repr_niimgs(niimg, shorten=shorten) for niimg in niimgs]
return f"[{', '.join(x for x in tmp)}]"
return f"[{', '.join(tmp)}]"
# Nibabel objects have a 'get_filename'
try:
filename = niimgs.get_filename()
if filename is not None:
return "{}('{}')".format(
niimgs.__class__.__name__,
_short_repr(filename, shorten=shorten),
return (
f"{niimgs.__class__.__name__}"
f"('{_short_repr(filename, shorten=shorten)}')"
)
else:
# No shortening in this case
return "{}(\nshape={},\naffine={}\n)".format(
niimgs.__class__.__name__,
repr(niimgs.shape),
repr(niimgs.affine),
return (
f"{niimgs.__class__.__name__}"
f"(\nshape={repr(niimgs.shape)},"
f"\naffine={repr(niimgs.affine)}\n)"
)
except Exception:
pass
Expand All @@ -267,7 +267,7 @@ def _short_repr(niimg_rep, shorten=True, truncate=20):
# If the name of the file itself is larger than
# truncate, then shorten the name only
if len(path_to_niimg.name) > truncate:
return path_to_niimg.name[: (truncate - 2)] + "..."
return f"{path_to_niimg.name[: (truncate - 2)]}..."
# Else add some folder structure if available
else:
rep = path_to_niimg.name
Expand Down
22 changes: 10 additions & 12 deletions nilearn/_utils/param_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ def check_threshold(threshold, data, percentile_func, name="threshold"):
"""
if isinstance(threshold, str):
message = (
'If "{}" is given as string it '
f'If "{name}" is given as string it '
"should be a number followed by the percent "
'sign, e.g. "25.3%"'
).format(name)
)
if not threshold.endswith("%"):
raise ValueError(message)

Expand All @@ -70,15 +70,13 @@ def check_threshold(threshold, data, percentile_func, name="threshold"):
value_check = abs(data).max()
if abs(threshold) > value_check:
warnings.warn(
"The given float value must not exceed {}. "
"But, you have given threshold={} ".format(
value_check, threshold
)
f"The given float value must not exceed {value_check}. "
f"But, you have given threshold={threshold}."
)
else:
raise TypeError(
"%s should be either a number "
"or a string finishing with a percent sign" % (name,)
f"{name} should be either a number "
"or a string finishing with a percent sign"
)
return threshold

Expand Down Expand Up @@ -156,9 +154,9 @@ def _adjust_screening_percentile(screening_percentile, mask_img, verbose=0):
f"Mask volume = {mask_volume:g}mm^3 = {mask_volume / 1000.0:g}cm^3"
)
print(
"Standard brain volume = {:g}mm^3 = {:g}cm^3".format(
MNI152_BRAIN_VOLUME, MNI152_BRAIN_VOLUME / 1.0e3
)
"Standard brain volume "
f"= {MNI152_BRAIN_VOLUME:g}mm^3 "
f"= {MNI152_BRAIN_VOLUME / 1.0e3:g}cm^3"
)
print(
f"Original screening-percentile: {original_screening_percentile:g}"
Expand Down Expand Up @@ -208,7 +206,7 @@ def check_feature_screening(
elif not (0.0 <= screening_percentile <= 100.0):
raise ValueError(
"screening_percentile should be in the interval"
" [0, 100], got %g" % screening_percentile
f" [0, 100], got {screening_percentile:g}"
)
else:
# correct screening_percentile according to the volume of the data mask
Expand Down
8 changes: 3 additions & 5 deletions nilearn/_utils/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,9 @@ def assert_memory_less_than(
if mem_used < 50:
raise ValueError(
"Memory profiler measured an untrustable memory "
"consumption ({:.2f} MiB). The expected memory "
"limit was {:.2f} MiB. Try to bench with larger "
"objects (at least 100MiB in memory).".format(
mem_used, memory_limit
)
f"consumption ({mem_used:.2f} MiB). The expected memory "
f"limit was {memory_limit:.2f} MiB. Try to bench with larger "
"objects (at least 100MiB in memory)."
)


Expand Down
7 changes: 2 additions & 5 deletions nilearn/_utils/tests/test_niimg_conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,11 +559,8 @@ def test_repr_niimgs(tmp_path):
_utils._repr_niimgs(list_of_paths, shorten=True)
== shortened_list_of_paths
)

long_list_of_paths = "[%s]" % ",\n ".join(
_ for _ in [str(_) for _ in list_of_paths]
)

long_list_of_paths = ",\n ".join([str(_) for _ in list_of_paths])
long_list_of_paths = f"[{long_list_of_paths}]"
assert (
_utils._repr_niimgs(list_of_paths, shorten=False) == long_list_of_paths
)
Expand Down
2 changes: 1 addition & 1 deletion nilearn/datasets/func.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,7 @@ def fetch_localizer_contrasts(
if isinstance(contrasts, str):
raise ValueError(
"Contrasts should be a list of strings, but "
'a single string was given: "%s"' % contrasts
f'a single string was given: "{contrasts}"'
)
if n_subjects is None:
n_subjects = 94 # 94 subjects available
Expand Down
6 changes: 3 additions & 3 deletions nilearn/interfaces/bids/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,9 @@ def _generate_dataset_description(out_file, model_level):
'GeneratedBy': {
'Name': 'nilearn',
'Version': nilearn.__version__,
'Description': 'A Nilearn {} GLM.'.format(
'first-level' if model_level == 1 else 'second-level'
),
'Description': ("A Nilearn "
f"{'first' if model_level == 1 else 'second'}"
"-level GLM."),
'CodeURL': (
f"{repo_url}/releases/tag/{nilearn.__version__}"
)
Expand Down
22 changes: 8 additions & 14 deletions nilearn/maskers/nifti_labels_masker.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,22 +333,16 @@ def fit(self, imgs=None, y=None):
All parameters are unused, they are for scikit-learn compatibility.

"""
_utils.logger.log(
'loading data from %s' % _utils._repr_niimgs(
self.labels_img,
shorten=(not self.verbose),
),
verbose=self.verbose,
)
repr = _utils._repr_niimgs(self.labels_img,
shorten=(not self.verbose))
msg = f"loading data from {repr}"
_utils.logger.log(msg=msg, verbose=self.verbose)
self.labels_img_ = _utils.check_niimg_3d(self.labels_img)
if self.mask_img is not None:
_utils.logger.log(
'loading data from %s' % _utils._repr_niimgs(
self.mask_img,
shorten=(not self.verbose),
),
verbose=self.verbose,
)
repr = _utils._repr_niimgs(self.mask_img,
shorten=(not self.verbose))
msg = f"loading data from {repr}"
_utils.logger.log(msg=msg, verbose=self.verbose)
self.mask_img_ = _utils.check_niimg_3d(self.mask_img)

else:
Expand Down
22 changes: 8 additions & 14 deletions nilearn/maskers/nifti_maps_masker.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,13 +349,10 @@ def fit(self, imgs=None, y=None):

"""
# Load images
_utils.logger.log(
"loading regions from %s" % _utils._repr_niimgs(
self.maps_img,
shorten=(not self.verbose),
),
verbose=self.verbose,
)
repr = _utils._repr_niimgs(self.mask_img,
shorten=(not self.verbose))
msg = f"loading regions from {repr}"
_utils.logger.log(msg=msg, verbose=self.verbose)
self.maps_img_ = _utils.check_niimg(
self.maps_img, dtype=self.dtype, atleast_4d=True
)
Expand All @@ -367,13 +364,10 @@ def fit(self, imgs=None, y=None):
)

if self.mask_img is not None:
_utils.logger.log(
"loading mask from %s" % _utils._repr_niimgs(
self.mask_img,
shorten=(not self.verbose),
),
verbose=self.verbose,
)
repr = _utils._repr_niimgs(self.mask_img,
shorten=(not self.verbose))
msg = f"loading mask from {repr}"
_utils.logger.log(msg=msg, verbose=self.verbose)
self.mask_img_ = _utils.check_niimg_3d(self.mask_img)
else:
self.mask_img_ = None
Expand Down