From 3cbf14726f59aec1c1f82784da6b006542899f1a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 4 May 2024 23:44:42 +0200 Subject: [PATCH] STY: Apply ruff/pyupgrade rule UP031 (#3280) New in [ruff v0.4.2](https://github.com/astral-sh/ruff/releases/tag/v0.4.2). --------- Co-authored-by: Chris Markiewicz --- .maint/paper_author_list.py | 23 +++++++++++++---------- .maint/update_authors.py | 24 +++++++++++++----------- .maint/update_zenodo.py | 7 ++++--- fmriprep/cli/parser.py | 12 ++++++------ fmriprep/cli/run.py | 2 +- fmriprep/cli/workflow.py | 4 +--- fmriprep/utils/bids.py | 4 ++-- fmriprep/workflows/base.py | 2 +- fmriprep/workflows/bold/resampling.py | 2 +- 9 files changed, 42 insertions(+), 38 deletions(-) diff --git a/.maint/paper_author_list.py b/.maint/paper_author_list.py index a6300c635..c10bd5c71 100755 --- a/.maint/paper_author_list.py +++ b/.maint/paper_author_list.py @@ -46,23 +46,26 @@ def _aslist(inlist): ] print( - 'Some people made commits, but are missing in .maint/ ' - 'files: %s.' % ', '.join(unmatched), + 'Some people made commits, but are missing in .maint/ files: {}.'.format( + ', '.join(unmatched) + ), file=sys.stderr, ) print('Authors (%d):' % len(author_matches)) print( - '%s.' - % '; '.join( - [ - '{} \\ :sup:`{}`\\ '.format(i['name'], idx) - for i, idx in zip(author_matches, aff_indexes, strict=False) - ] + '{}.'.format( + '; '.join( + [ + rf'{i["name"]} \ :sup:`{idx}`\ ' + for i, idx in zip(author_matches, aff_indexes, strict=False) + ] + ) ) ) print( - '\n\nAffiliations:\n%s' - % '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) + '\n\nAffiliations:\n{}'.format( + '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) + ) ) diff --git a/.maint/update_authors.py b/.maint/update_authors.py index 263f88631..55e526e92 100755 --- a/.maint/update_authors.py +++ b/.maint/update_authors.py @@ -187,7 +187,7 @@ def zenodo( misses = set(miss_creators).intersection(miss_contributors) if misses: print( - "Some people made commits, but are missing in .maint/ " f"files: {', '.join(misses)}", + f"Some people made commits, but are missing in .maint/ files: {', '.join(misses)}", file=sys.stderr, ) @@ -206,7 +206,7 @@ def zenodo( if isinstance(creator['affiliation'], list): creator['affiliation'] = creator['affiliation'][0] - Path(zenodo_file).write_text('%s\n' % json.dumps(zenodo, indent=2)) + Path(zenodo_file).write_text(f'{json.dumps(zenodo, indent=2)}\n') @cli.command() @@ -268,24 +268,26 @@ def _aslist(value): if misses: print( - "Some people made commits, but are missing in .maint/ " f"files: {', '.join(misses)}", + f"Some people made commits, but are missing in .maint/ files: {', '.join(misses)}", file=sys.stderr, ) print('Authors (%d):' % len(hits)) print( - '%s.' - % '; '.join( - [ - '{} \\ :sup:`{}`\\ '.format(i['name'], idx) - for i, idx in zip(hits, aff_indexes, strict=False) - ] + '{}.'.format( + '; '.join( + [ + rf'{i["name"]} \ :sup:`{idx}`\ ' + for i, idx in zip(hits, aff_indexes, strict=False) + ] + ) ) ) print( - '\n\nAffiliations:\n%s' - % '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) + '\n\nAffiliations:\n{}'.format( + '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) + ) ) diff --git a/.maint/update_zenodo.py b/.maint/update_zenodo.py index 5b30707b8..a31f535c4 100755 --- a/.maint/update_zenodo.py +++ b/.maint/update_zenodo.py @@ -145,8 +145,9 @@ def loads_contributors(s): zenodo['contributors'] = zen_contributors print( - 'Some people made commits, but are missing in .maint/ ' - 'files: %s.' % ', '.join(set(miss_creators).intersection(miss_contributors)), + 'Some people made commits, but are missing in .maint/ files: {}.'.format( + ', '.join(set(miss_creators).intersection(miss_contributors)) + ), file=sys.stderr, ) @@ -162,4 +163,4 @@ def loads_contributors(s): if isinstance(creator['affiliation'], list): creator['affiliation'] = creator['affiliation'][0] - zenodo_file.write_text('%s\n' % json.dumps(zenodo, indent=2, ensure_ascii=False)) + zenodo_file.write_text(f'{json.dumps(zenodo, indent=2, ensure_ascii=False)}\n') diff --git a/fmriprep/cli/parser.py b/fmriprep/cli/parser.py index 61922b6ee..63b4cd9d4 100644 --- a/fmriprep/cli/parser.py +++ b/fmriprep/cli/parser.py @@ -881,10 +881,9 @@ def parse_args(args=None, namespace=None): if output_dir == bids_dir: parser.error( 'The selected output folder is the same as the input BIDS folder. ' - 'Please modify the output path (suggestion: %s).' - % bids_dir - / 'derivatives' - / ('fmriprep-%s' % version.split('+')[0]) + 'Please modify the output path (suggestion: {}).'.format( + bids_dir / 'derivatives' / f'fmriprep-{version.split("+")[0]}' + ) ) if bids_dir in work_dir.parents: @@ -924,8 +923,9 @@ def parse_args(args=None, namespace=None): missing_subjects = participant_label - set(all_subjects) if missing_subjects: parser.error( - 'One or more participant labels were not found in the BIDS directory: ' - '%s.' % ', '.join(missing_subjects) + 'One or more participant labels were not found in the BIDS directory: {}.'.format( + ', '.join(missing_subjects) + ) ) config.execution.participant_label = sorted(participant_label) diff --git a/fmriprep/cli/run.py b/fmriprep/cli/run.py index af4ffad4b..6c07813d6 100644 --- a/fmriprep/cli/run.py +++ b/fmriprep/cli/run.py @@ -147,7 +147,7 @@ def main(): config.loggers.workflow.log( 15, - '\n'.join(['fMRIPrep config:'] + ['\t\t%s' % s for s in config.dumps().splitlines()]), + '\n'.join(['fMRIPrep config:'] + [f'\t\t{s}' for s in config.dumps().splitlines()]), ) config.loggers.workflow.log(25, 'fMRIPrep started!') errno = 1 # Default is error exit unless otherwise set diff --git a/fmriprep/cli/workflow.py b/fmriprep/cli/workflow.py index 5d62545e6..35140221a 100644 --- a/fmriprep/cli/workflow.py +++ b/fmriprep/cli/workflow.py @@ -173,9 +173,7 @@ def build_boilerplate(config_file, workflow): config.load(config_file) logs_path = config.execution.fmriprep_dir / 'logs' boilerplate = workflow.visit_desc() - citation_files = { - ext: logs_path / ('CITATION.%s' % ext) for ext in ('bib', 'tex', 'md', 'html') - } + citation_files = {ext: logs_path / f'CITATION.{ext}' for ext in ('bib', 'tex', 'md', 'html')} if boilerplate: # To please git-annex users and also to guarantee consistency diff --git a/fmriprep/utils/bids.py b/fmriprep/utils/bids.py index ef58916cc..0bdc03ff1 100644 --- a/fmriprep/utils/bids.py +++ b/fmriprep/utils/bids.py @@ -66,7 +66,7 @@ def collect_derivatives( item = layout.get(return_type='filename', **query) if not item: continue - derivs_cache['%s_boldref' % k] = item[0] if len(item) == 1 else item + derivs_cache[f'{k}_boldref'] = item[0] if len(item) == 1 else item for xfm, q in spec['transforms'].items(): query = {**q, **entities} @@ -237,7 +237,7 @@ def validate_input_dir(exec_env, bids_dir, participant_label, need_T1w=True): ignored_subs = all_subs.difference(selected_subs) if ignored_subs: for sub in ignored_subs: - validator_config_dict['ignoredFiles'].append('/sub-%s/**' % sub) + validator_config_dict['ignoredFiles'].append(f'/sub-{sub}/**') with tempfile.NamedTemporaryFile(mode='w+', suffix='.json') as temp: temp.write(json.dumps(validator_config_dict)) temp.flush() diff --git a/fmriprep/workflows/base.py b/fmriprep/workflows/base.py index d7387b5ee..78cd17a3c 100644 --- a/fmriprep/workflows/base.py +++ b/fmriprep/workflows/base.py @@ -84,7 +84,7 @@ def init_fmriprep_wf(): spaces=config.workflow.spaces.get_fs_spaces(), minimum_fs_version='7.0.0', ), - name='fsdir_run_%s' % config.execution.run_uuid.replace('-', '_'), + name='fsdir_run_{}'.format(config.execution.run_uuid.replace('-', '_')), run_without_submitting=True, ) if config.execution.fs_subjects_dir is not None: diff --git a/fmriprep/workflows/bold/resampling.py b/fmriprep/workflows/bold/resampling.py index 696318ff4..37558f68b 100644 --- a/fmriprep/workflows/bold/resampling.py +++ b/fmriprep/workflows/bold/resampling.py @@ -126,7 +126,7 @@ def init_bold_surf_wf( The BOLD time-series were resampled onto the following surfaces (FreeSurfer reconstruction nomenclature): {out_spaces}. -""".format(out_spaces=', '.join(['*%s*' % s for s in surface_spaces])) +""".format(out_spaces=', '.join([f'*{s}*' for s in surface_spaces])) inputnode = pe.Node( niu.IdentityInterface(