Skip to content

Commit

Permalink
add download function to CLI
Browse files Browse the repository at this point in the history
This commit adds the download functionality to the CLI via dedicated arguments.
  • Loading branch information
PeerHerholz committed Jan 13, 2023
1 parent 12668a7 commit 75fc9c3
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 88 deletions.
2 changes: 1 addition & 1 deletion bids_bep16_conv/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def download_HBN(dataset_path=None):
if dataset_path is None:
path = Path(os.curdir + '/bids_bep16_datasets/HBN/derivatives/QSIprep/sub-NDAREK918EC2/ses-HBNsiteSI/dwi')
else:
path = Path(dataset_path + '/bids_bep16_datasets/HBN/derivatives/QSIprep/sub-NDAREK918EC2/ses-HBNsiteSI/dwi')
path = Path(str(dataset_path) + '/bids_bep16_datasets/HBN/derivatives/QSIprep/sub-NDAREK918EC2/ses-HBNsiteSI/dwi')

# in either case: check if path exists and if not, create it
if not path.exists():
Expand Down
195 changes: 108 additions & 87 deletions bids_bep16_conv/run_bids_bep16_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from pathlib import Path
from bids_bep16_conv.converters import dipy_dti, dipy_bep16, dipy_csd
from bids_bep16_conv.utils import validate_input_dir, create_dataset_description
from bids_bep16_conv.datasets import download_HBN
from bids import BIDSLayout


Expand Down Expand Up @@ -39,6 +40,14 @@ def get_parser():
help='Assume the input dataset is BIDS compliant and skip the validation \
(default: True).',
action="store_true")
parser.add_argument('--download_dataset',
help='Download example dataset. If this argument is set, no processing or'
'conversion will be performed but only the indicated example'
'dataset be download to the path indicated via --download_path',
choices=['HBN'])
parser.add_argument('--download_path',
help='Path where the example dataset should be downloaded to.',
action='store', type=Path)
parser.add_argument('-v', '--version', action='version',
version='BIDS-App version {}'.format(__version__))

Expand All @@ -60,100 +69,112 @@ def run_bids_bep16_conv():
else:
exec_env = 'local'

# check if BIDS validation should be run or skipped
if args.skip_bids_validation:
print("Input data will not be checked for BIDS compliance.")
else:
print("Making sure the input data is BIDS compliant "
"(warnings can be ignored in most cases).")
validate_input_dir(exec_env, args.bids_dir, args.participant_label)
if not args.download_dataset:

# check if BIDS validation should be run or skipped
if args.skip_bids_validation:
print("Input data will not be checked for BIDS compliance.")
else:
print("Making sure the input data is BIDS compliant "
"(warnings can be ignored in most cases).")
validate_input_dir(exec_env, args.bids_dir, args.participant_label)

# intialize BIDS dataset layout
layout = BIDSLayout(args.bids_dir, derivatives=True)

# intialize empty subject list
subjects_to_analyze = []

# check analysis level and gather subject list
if args.analysis_level == "participant":
if args.participant_label:
subjects_to_analyze = args.participant_label
else:
print("No participant label indicated. Please do so.")
else:
subjects_to_analyze = layout.get(return_type='id', target='subject')

# intialize BIDS dataset layout
layout = BIDSLayout(args.bids_dir, derivatives=True)
# check if indicated participants are missing and if so, provide a list of them
list_part_prob = []
for part in subjects_to_analyze:
if part not in layout.get_subjects():
list_part_prob.append(part)
if len(list_part_prob) >= 1:
raise Exception("The participant(s) you indicated are not present in the BIDS dataset, please check again."
"This refers to:")
print(list_part_prob)

# intialize empty subject list
subjects_to_analyze = []
# gather sessions that should be analyzed and provide a respective update
sessions_to_analyze = layout.get(return_type='id', target='session')

# check analysis level and gather subject list
if args.analysis_level == "participant":
if args.participant_label:
subjects_to_analyze = args.participant_label
if not sessions_to_analyze:
print('Processing data from one session.')
else:
print("No participant label indicated. Please do so.")
else:
subjects_to_analyze = layout.get(return_type='id', target='subject')

# check if indicated participants are missing and if so, provide a list of them
list_part_prob = []
for part in subjects_to_analyze:
if part not in layout.get_subjects():
list_part_prob.append(part)
if len(list_part_prob) >= 1:
raise Exception("The participant(s) you indicated are not present in the BIDS dataset, please check again."
"This refers to:")
print(list_part_prob)

# gather sessions that should be analyzed and provide a respective update
sessions_to_analyze = layout.get(return_type='id', target='session')

if not sessions_to_analyze:
print('Processing data from one session.')
print('Processing data from %s sessions:' % str(len(sessions_to_analyze)))
print(sessions_to_analyze)

# loop over subjects and run analysis, as well as subsequent output conversion
for subject_label in subjects_to_analyze:
# get needed files and check if data from multiple sessions should be gathered
if not sessions_to_analyze:
list_dwi_nii_gz = layout.get(subject=subject_label, extension='nii.gz', suffix='dwi',
return_type='filename')
list_dwi_bval = layout.get(subject=subject_label, extension='bval', suffix='dwi',
return_type='filename')
list_dwi_bvec = layout.get(subject=subject_label, extension='bvec', suffix='dwi',
return_type='filename')
list_dwi_mask = layout.get(subject=subject_label, extension='nii.gz', suffix='mask',
return_type='filename')
else:
list_dwi_nii_gz = layout.get(subject=subject_label, extension='nii.gz', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_bval = layout.get(subject=subject_label, extension='bval', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_bvec = layout.get(subject=subject_label, extension='bvec', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_mask = layout.get(subject=subject_label, extension='nii.gz', suffix='mask',
return_type='filename', session=sessions_to_analyze)

# loop over respective sets of files
for dwi_nii_gz, bval, bvec, mask in zip(list_dwi_nii_gz, list_dwi_bval, list_dwi_bvec, list_dwi_mask):
# check if software argument was provided, if not raise error and indicate problem
if args.software is None:
raise Exception("Please indicate the software you want to use for processing."
"For DIPY use: --software dipy and for mrtrix use: --software mrtrix.")
# check if analysis argument was provided, if not raise error and indicate problem
if args.analysis is None:
raise Exception("Please indicate the analysis you want to run."
"For DTI use: --analysis DTI")

# if dipy was selected, run DIPY and define output path in derivatives folder respectively
if args.software == "dipy":
outpath = str(args.out_dir) + '/dipy/sub-' + subject_label + '/dwi/'
if sessions_to_analyze:
ses_label = dwi_nii_gz.split('/')[-1].split('_')[1].split('-')[1]
outpath = str(args.out_dir) + '/dipy/sub-' + subject_label + '/ses-' + ses_label + '/dwi/'
# if DTI analysis should be run, setup and run dipy_dti function
if args.analysis == "DTI":
dipy_dti(dwi_nii_gz, bval, bvec,
mask, outpath)
dipy_bep16(dwi_nii_gz, bval, bvec,
mask, outpath, json_metadata=args.metadata)

# if CSD analysis should be run, setup and run dipy_csd function
elif args.analysis == "CSD":
dipy_csd(dwi_nii_gz, bval, bvec,
mask, outpath)
# create the respective dataset_description.json file for the run analysis
create_dataset_description("dipy", args.out_dir)

else:
print('Processing data from %s sessions:' % str(len(sessions_to_analyze)))
print(sessions_to_analyze)

# loop over subjects and run analysis, as well as subsequent output conversion
for subject_label in subjects_to_analyze:
# get needed files and check if data from multiple sessions should be gathered
if not sessions_to_analyze:
list_dwi_nii_gz = layout.get(subject=subject_label, extension='nii.gz', suffix='dwi',
return_type='filename')
list_dwi_bval = layout.get(subject=subject_label, extension='bval', suffix='dwi',
return_type='filename')
list_dwi_bvec = layout.get(subject=subject_label, extension='bvec', suffix='dwi',
return_type='filename')
list_dwi_mask = layout.get(subject=subject_label, extension='nii.gz', suffix='mask',
return_type='filename')
if args.download_path is None:
raise Exception("Please indicate a path where the dataset should be downloaded to.")
elif os.path.exists(args.download_path) is False:
raise Exception("If you use the Docker container, please make sure that the indicated path is accessible.")
else:
list_dwi_nii_gz = layout.get(subject=subject_label, extension='nii.gz', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_bval = layout.get(subject=subject_label, extension='bval', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_bvec = layout.get(subject=subject_label, extension='bvec', suffix='dwi',
return_type='filename', session=sessions_to_analyze)
list_dwi_mask = layout.get(subject=subject_label, extension='nii.gz', suffix='mask',
return_type='filename', session=sessions_to_analyze)

# loop over respective sets of files
for dwi_nii_gz, bval, bvec, mask in zip(list_dwi_nii_gz, list_dwi_bval, list_dwi_bvec, list_dwi_mask):
# check if software argument was provided, if not raise error and indicate problem
if args.software is None:
raise Exception("Please indicate the software you want to use for processing."
"For DIPY use: --software dipy and for mrtrix use: --software mrtrix.")
# check if analysis argument was provided, if not raise error and indicate problem
if args.analysis is None:
raise Exception("Please indicate the analysis you want to run."
"For DTI use: --analysis DTI")

# if dipy was selected, run DIPY and define output path in derivatives folder respectively
if args.software == "dipy":
outpath = str(args.out_dir) + '/dipy/sub-' + subject_label + '/dwi/'
if sessions_to_analyze:
ses_label = dwi_nii_gz.split('/')[-1].split('_')[1].split('-')[1]
outpath = str(args.out_dir) + '/dipy/sub-' + subject_label + '/ses-' + ses_label + '/dwi/'
# if DTI analysis should be run, setup and run dipy_dti function
if args.analysis == "DTI":
dipy_dti(dwi_nii_gz, bval, bvec,
mask, outpath)
dipy_bep16(dwi_nii_gz, bval, bvec,
mask, outpath, json_metadata=args.metadata)

# if CSD analysis should be run, setup and run dipy_csd function
elif args.analysis == "CSD":
dipy_csd(dwi_nii_gz, bval, bvec,
mask, outpath)
# create the respective dataset_description.json file for the run analysis
create_dataset_description("dipy", args.out_dir)
if args.download_dataset == "HBN":
download_HBN(args.download_path)


# run the CLI
Expand Down

0 comments on commit 75fc9c3

Please sign in to comment.