Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REF] Fix more f strings #3776

Merged
merged 10 commits into from
Jun 29, 2023
2 changes: 1 addition & 1 deletion .github/workflows/flake8.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:

- name: "Install Flake8"
shell: bash {0}
run: python -m pip install --upgrade pip flake8 flake8-docstrings
run: python -m pip install --upgrade pip flake8 flake8-docstrings flake8-use-fstring

- name: "Run Flake8 on whole file"
shell: bash {0}
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -121,4 +121,4 @@ repos:
hooks:
- id: flake8
args: [--verbose]
additional_dependencies: [flake8-docstrings]
additional_dependencies: [flake8-docstrings, flake8-use-fstring]
3 changes: 1 addition & 2 deletions examples/04_glm_first_level/plot_fiac_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@ def pad_vector(contrast_, n_columns):

print('Computing contrasts...')
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % 2i out of %i: %s' % (
index + 1, len(contrasts), contrast_id))
print(f" Contrast {index + 1: 2} out of {len(contrasts)}: {contrast_id}")
# Estimate the contasts. Note that the model implicitly computes a fixed
# effect across the two sessions
z_map = fmri_glm.compute_contrast(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,8 +178,8 @@
from nilearn.glm.contrasts import compute_contrast

for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % i out of %i: %s, right hemisphere' %
(index + 1, len(contrasts), contrast_id))
print(f" Contrast {index + 1:1} out of {len(contrasts)}: "
f"{contrast_id}, right hemisphere")
# compute contrast-related statistics
contrast = compute_contrast(labels, estimates, contrast_val,
contrast_type='t')
Expand Down Expand Up @@ -211,8 +211,8 @@
###############################################################################
# Finally, we create contrast-specific maps and plot them.
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % i out of %i: %s, left hemisphere' %
(index + 1, len(contrasts), contrast_id))
print(f" Contrast {index + 1:1}% i out of {len(contrasts)}: "
Remi-Gau marked this conversation as resolved.
Show resolved Hide resolved
f"{contrast_id}, left hemisphere")
# compute contrasts
contrast = compute_contrast(labels, estimates, contrast_val,
contrast_type='t')
Expand Down
23 changes: 11 additions & 12 deletions nilearn/_utils/cache_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,26 +48,25 @@ def _check_memory(memory, verbose=0):
# Maybe the user want to enable expanded user path.
error_msg = (
"Given cache path parent directory doesn't "
"exists, you gave '{}'. Enabling "
f"exists, you gave '{split_cache_dir[0]}'. Enabling "
"nilearn.EXPAND_PATH_WILDCARDS could solve "
"this issue.".format(split_cache_dir[0])
"this issue."
)
elif memory.startswith("~"):
# Path built on top of expanded user path doesn't exist.
error_msg = (
"Given cache path parent directory doesn't "
"exists, you gave '{}' which was expanded "
"as '{}' but doesn't exist either. Use "
"nilearn.EXPAND_PATH_WILDCARDS to deactivate "
"auto expand user path (~) behavior.".format(
split_cache_dir[0], os.path.dirname(memory)
)
f"exists, you gave '{split_cache_dir[0]}' "
"which was expanded as '{os.path.dirname(memory)}' "
"but doesn't exist either. "
"Use nilearn.EXPAND_PATH_WILDCARDS to deactivate "
"auto expand user path (~) behavior."
)
else:
# The given cache base path doesn't exist.
error_msg = (
"Given cache path parent directory doesn't "
"exists, you gave '{}'.".format(split_cache_dir[0])
"exists, you gave '{split_cache_dir[0]}'."
)
raise ValueError(error_msg)

Expand Down Expand Up @@ -159,18 +158,18 @@ def cache(
raise TypeError(
"'memory' argument must be a string or a "
"joblib.Memory object. "
"%s %s was given." % (memory, type(memory))
f"{memory} {type(memory)} was given."
)
if (
memory.location is None
and memory_level is not None
and memory_level > 1
):
warnings.warn(
"Caching has been enabled (memory_level = %d) "
f"Caching has been enabled (memory_level = {memory_level}) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." % (memory_level, func.__name__),
f"function {func.__name__}.",
stacklevel=2,
)
else:
Expand Down
17 changes: 7 additions & 10 deletions nilearn/_utils/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,19 +52,16 @@ def increment_stack_counter(self):
@property
def message(self):
"""Format error message."""
expected_dim = self.required_dimension + self.stack_counter
total_file_dim = f" ({self.file_dimension + self.stack_counter}D)"
return (
"Input data has incompatible dimensionality: "
"Expected dimension is {}D and you provided a "
"{}{}D image{}{}. "
f"Expected dimension is {expected_dim}D and you provided a "
f"{'list of ' * self.stack_counter}{self.file_dimension}D "
f"image{'s' * (self.stack_counter > 0)}"
f"{total_file_dim * (self.stack_counter > 0)}. "
"See https://nilearn.github.io/stable/manipulating_images/"
"input_output.html.".format(
self.required_dimension + self.stack_counter,
"list of " * self.stack_counter,
self.file_dimension,
"s" * (self.stack_counter != 0),
(" (%iD)" % (self.file_dimension + self.stack_counter))
* (self.stack_counter > 0),
)
"input_output.html."
)

def __str__(self):
Expand Down
2 changes: 1 addition & 1 deletion nilearn/_utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,6 @@ def _compose_err_msg(msg, **kwargs):
updated_msg = msg
for k, v in sorted(kwargs.items()):
if isinstance(v, str): # print only str-like arguments
updated_msg += "\n" + k + ": " + v
updated_msg += f"\n{k}: {v}"

return updated_msg
6 changes: 4 additions & 2 deletions nilearn/_utils/niimg.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,14 +226,16 @@ def _repr_niimgs(niimgs, shorten=True):
# Collection case
if isinstance(niimgs, collections.abc.Iterable):
if shorten and len(niimgs) > list_max_display:
return "[%s]" % ",\n ...\n ".join(
tmp = ",\n ...\n ".join(
_repr_niimgs(niimg, shorten=shorten)
for niimg in [niimgs[0], niimgs[-1]]
)
return f"[{tmp}]"
elif len(niimgs) > list_max_display:
return "[%s]" % ",\n ".join(
tmp = ",\n ".join(
_repr_niimgs(niimg, shorten=shorten) for niimg in niimgs
)
return f"[{tmp}]"
else:
tmp = [_repr_niimgs(niimg, shorten=shorten) for niimg in niimgs]
return f"[{', '.join(tmp)}]"
Expand Down
15 changes: 9 additions & 6 deletions nilearn/_utils/niimg_conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"""
raise_error = kwargs.pop("raise_error", False)
for i, arg in enumerate(args):
kwargs["img_#%i" % i] = arg
kwargs[f"img_#{i}"] = arg
errors = []
for (a_name, a_img), (b_name, b_img) in itertools.combinations(
kwargs.items(), 2
Expand All @@ -64,7 +64,10 @@
raise ValueError(
"Following field of view errors were detected:\n"
+ "\n".join(
["- %s and %s do not have the same %s" % e for e in errors]
[
f"- {e[0]} and {e[1]} do not have the same {e[2]}"
for e in errors
]
)
)
return len(errors) == 0
Expand Down Expand Up @@ -195,7 +198,7 @@
img_name = f" ({niimg}) "

exc.args = (
"Error encountered while loading image #%d%s" % (i, img_name),
f"Error encountered while loading image #{i}{img_name}",
) + exc.args
raise

Expand Down Expand Up @@ -504,7 +507,7 @@
if ndim not in [3, 4]:
raise TypeError(
"Concatenated images must be 3D or 4D. You gave a "
"list of %dD images" % ndim
f"list of {ndim}D images"
)

lengths = [first_niimg.shape[-1] if ndim == 4 else 1]
Expand Down Expand Up @@ -537,9 +540,9 @@
):
if verbose > 0:
if isinstance(niimg, str):
nii_str = "image " + niimg
nii_str = f"image {niimg}"

Check warning on line 543 in nilearn/_utils/niimg_conversions.py

View check run for this annotation

Codecov / codecov/patch

nilearn/_utils/niimg_conversions.py#L543

Added line #L543 was not covered by tests
else:
nii_str = "image #" + str(index)
nii_str = f"image #{index}"

Check warning on line 545 in nilearn/_utils/niimg_conversions.py

View check run for this annotation

Codecov / codecov/patch

nilearn/_utils/niimg_conversions.py#L545

Added line #L545 was not covered by tests
print(f"Concatenating {index + 1}: {nii_str}")

data[..., cur_4d_index : cur_4d_index + size] = _get_data(niimg)
Expand Down
9 changes: 4 additions & 5 deletions nilearn/_utils/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,9 @@ def assert_memory_less_than(

if mem_used > memory_limit * (1 + tolerance):
raise ValueError(
"Memory consumption measured ({:.2f} MiB) is "
"greater than required memory limit ({} MiB) within "
"accepted tolerance ({:.2f}%)."
"".format(mem_used, memory_limit, tolerance * 100)
f"Memory consumption measured ({mem_used:.2f} MiB) is "
f"greater than required memory limit ({memory_limit} MiB) within "
f"accepted tolerance ({tolerance * 100:.2f}%)."
)

# We are confident in memory_profiler measures above 100MiB.
Expand Down Expand Up @@ -185,7 +184,7 @@ def write_tmp_imgs(*imgs, **kwargs):
del img

if use_wildcards:
yield prefix + "*" + suffix
yield f"{prefix}*{suffix}"
else:
if len(imgs) == 1:
yield filenames[0]
Expand Down
10 changes: 10 additions & 0 deletions nilearn/_utils/tests/test_exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import re

from nilearn._utils.exceptions import DimensionError


def test_dimension_error_message():
error = DimensionError(file_dimension=3, required_dimension=5)
error.increment_stack_counter()
error.increment_stack_counter()
assert re.match("^.*7D.*list of list of 3D images.*5D.*$", error.message)
12 changes: 6 additions & 6 deletions nilearn/datasets/atlas.py
Original file line number Diff line number Diff line change
Expand Up @@ -786,10 +786,10 @@
for idx, name in enumerate(names):
if name.endswith("L"):
names[idx] = re.sub(r" L$", "", name)
names[idx] = "Left " + name
names[idx] = f"Left {name}"

Check warning on line 789 in nilearn/datasets/atlas.py

View check run for this annotation

Codecov / codecov/patch

nilearn/datasets/atlas.py#L789

Added line #L789 was not covered by tests
if name.endswith("R"):
names[idx] = re.sub(r" R$", "", name)
names[idx] = "Right " + name
names[idx] = f"Right {name}"

Check warning on line 792 in nilearn/datasets/atlas.py

View check run for this annotation

Codecov / codecov/patch

nilearn/datasets/atlas.py#L792

Added line #L792 was not covered by tests

new_label = 0
new_atlas = atlas_data.copy()
Expand All @@ -808,10 +808,10 @@
new_names.append(name)
continue
new_atlas[left_atlas == label] = new_label
new_names.append("Left " + name)
new_names.append(f"Left {name}")
new_label += 1
new_atlas[right_atlas == label] = new_label
new_names.append("Right " + name)
new_names.append(f"Right {name}")
return new_atlas, new_names


Expand Down Expand Up @@ -1267,7 +1267,7 @@
f"Please choose one among {versions}."
)

dataset_name = "aal_" + version
dataset_name = f"aal_{version}"
opts = {"uncompress": True}

if url is None:
Expand Down Expand Up @@ -1428,7 +1428,7 @@
dataset_name, data_dir=data_dir, verbose=verbose
)

folder_name = "template_cambridge_basc_multiscale_nii_" + version
folder_name = f"template_cambridge_basc_multiscale_nii_{version}"
fdescr = _get_dataset_descr(dataset_name)

if resolution:
Expand Down
8 changes: 4 additions & 4 deletions nilearn/datasets/func.py
Original file line number Diff line number Diff line change
Expand Up @@ -1151,7 +1151,7 @@ def fetch_abide_pcp(
# Fetch the phenotypic file and load it
csv = "Phenotypic_V1_0b_preprocessed1.csv"
path_csv = _fetch_files(
data_dir, [(csv, url + "/" + csv, {})], verbose=verbose
data_dir, [(csv, f"{url}/{csv}", {})], verbose=verbose
)[0]

# Note: the phenotypic file contains string that contains comma which mess
Expand All @@ -1160,7 +1160,7 @@ def fetch_abide_pcp(
# done simply with pandas but we don't want such dependency ATM
# pheno = pandas.read_csv(path_csv).to_records()
with open(path_csv) as pheno_f:
pheno = ["i" + pheno_f.readline()]
pheno = [f"i{pheno_f.readline()}"]

# This regexp replaces commas between double quotes
pheno.extend(
Expand Down Expand Up @@ -1202,9 +1202,9 @@ def fetch_abide_pcp(
for file_id in file_ids:
file_ = [
(
file_id + "_" + derivative + ext,
f"{file_id}_{derivative}{ext}",
"/".join(
[url, derivative, file_id + "_" + derivative + ext]
[url, derivative, f"{file_id}_{derivative}{ext}"]
),
{},
)
Expand Down
7 changes: 4 additions & 3 deletions nilearn/datasets/neurovault.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,8 +675,9 @@ def __eq__(self, other):
)

def __repr__(self):
return "{}(pattern={!r}, flags={})".format(
self.__class__.__name__, self.pattern_, self.flags_
return (
f"{self.__class__.__name__}(pattern={self.pattern_!r}, "
f"flags={self.flags_})"
)


Expand Down Expand Up @@ -1342,7 +1343,7 @@ def _add_absolute_paths(root_dir, metadata, force=True):
for name, value in metadata.items():
match = re.match(r"(.*)relative_path(.*)", name)
if match is not None:
abs_name = "{}absolute_path{}".format(*match.groups())
abs_name = f"{match.groups()[0]}absolute_path{match.groups()[1]}"
absolute_paths[abs_name] = os.path.join(root_dir, value)
if not absolute_paths:
return metadata
Expand Down
16 changes: 7 additions & 9 deletions nilearn/datasets/tests/test_atlas.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def test_fetch_atlas_source(tmp_path, request_mocker):


def _write_sample_atlas_metadata(ho_dir, filename, is_symm):
with open(os.path.join(ho_dir, filename + ".xml"), "w") as dm:
with open(os.path.join(ho_dir, f"{filename}.xml"), "w") as dm:
if not is_symm:
dm.write(
"<?xml version='1.0' encoding='us-ascii'?>\n"
Expand Down Expand Up @@ -397,7 +397,7 @@ def _destrieux_data():
atlas = np.random.randint(0, 10, (10, 10, 10), dtype="int32")
atlas_img = nibabel.Nifti1Image(atlas, np.eye(4))
labels = "\n".join([f"{idx},label {idx}" for idx in range(10)])
labels = "index,name\n" + labels
labels = f"index,name\n{labels}"
for lat in ["_lateralized", ""]:
lat_data = {
f"destrieux2009_rois_labels{lat}.csv": labels,
Expand Down Expand Up @@ -631,8 +631,7 @@ def test_fetch_atlas_basc_multiscale_2015(tmp_path, request_mocker):
dataset_name = "basc_multiscale_2015"
name_sym = "template_cambridge_basc_multiscale_nii_sym"
basenames_sym = [
"template_cambridge_basc_multiscale_sym_" + key + ".nii.gz"
for key in keys
f"template_cambridge_basc_multiscale_sym_{key}.nii.gz" for key in keys
]
for key, basename_sym in zip(keys, basenames_sym):
assert data_sym[key] == str(
Expand All @@ -641,8 +640,7 @@ def test_fetch_atlas_basc_multiscale_2015(tmp_path, request_mocker):

name_asym = "template_cambridge_basc_multiscale_nii_asym"
basenames_asym = [
"template_cambridge_basc_multiscale_asym_" + key + ".nii.gz"
for key in keys
f"template_cambridge_basc_multiscale_asym_{key}.nii.gz" for key in keys
]
for key, basename_asym in zip(keys, basenames_asym):
assert data_asym[key] == str(
Expand Down Expand Up @@ -742,9 +740,9 @@ def test_fetch_atlas_talairach(tmp_path, request_mocker):


def test_fetch_atlas_pauli_2017(tmp_path, request_mocker):
labels = pd.DataFrame(
{"label": list(map("label_{}".format, range(16)))}
).to_csv(sep="\t", header=False)
labels = pd.DataFrame({"label": [f"label_{i}" for i in range(16)]}).to_csv(
sep="\t", header=False
)
det_atlas = data_gen.generate_labeled_regions((7, 6, 5), 16)
prob_atlas, _ = data_gen.generate_maps((7, 6, 5), 16)
request_mocker.url_mapping["*osf.io/6qrcb/*"] = labels
Expand Down
2 changes: 1 addition & 1 deletion nilearn/datasets/tests/test_struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def _make_oasis_data(dartel=True):
n_subjects = 457
prefix = "mwrc" if dartel else "mwc"
ids = pd.DataFrame(
{"ID": list(map("OAS1_{:04}".format, range(n_subjects)))}
{"ID": [f"OAS1_{i:04}" for i in range(n_subjects)]}
).to_csv(index=False, sep="\t")
data = {"oasis_cross-sectional.csv": ids, "data_usage_agreement.txt": ""}
path_pattern = str(
Expand Down