Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MAINT: revise style of all files (except for workflows) #839

Merged
merged 1 commit into from Apr 5, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
21 changes: 8 additions & 13 deletions mriqc/__about__.py
Expand Up @@ -3,18 +3,13 @@
"""MRIQC."""

from ._version import get_versions
__version__ = get_versions()['version']
del get_versions

__copyright__ = ('Copyright 2009, Center for Reproducible Neuroscience, '
'Stanford University')
__credits__ = 'Oscar Esteban'
__download__ = ('https://github.com/poldracklab/mriqc/archive/'
'{}.tar.gz'.format(__version__))
__version__ = get_versions()["version"]
del get_versions

__all__ = [
'__version__',
'__copyright__',
'__credits__',
'__download__'
]
__copyright__ = (
"Copyright 2020, Center for Reproducible Neuroscience, Stanford University"
)
__credits__ = "Oscar Esteban"
__download__ = f"https://github.com/poldracklab/mriqc/archive/{__version__}.tar.gz"
__all__ = ["__version__", "__copyright__", "__credits__", "__download__"]
6 changes: 3 additions & 3 deletions mriqc/__init__.py
Expand Up @@ -14,7 +14,7 @@


__all__ = [
'__copyright__',
'__credits__',
'__version__',
"__copyright__",
"__credits__",
"__version__",
]
4 changes: 1 addition & 3 deletions mriqc/_warnings.py
Expand Up @@ -12,9 +12,7 @@ def _warn(message, category=None, stacklevel=1, source=None):
category = type(category).__name__
category = category.replace("type", "WARNING")

logging.getLogger("py.warnings").warning(
f"{category or 'WARNING'}: {message}"
)
logging.getLogger("py.warnings").warning(f"{category or 'WARNING'}: {message}")


def _showwarning(message, category, filename, lineno, file=None, line=None):
Expand Down
95 changes: 50 additions & 45 deletions mriqc/bin/abide2bids.py
Expand Up @@ -25,30 +25,34 @@

def main():
"""Entry point"""
parser = ArgumentParser(description='ABIDE2BIDS downloader',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-i', '--input-abide-catalog', action='store',
required=True)
g_input.add_argument('-n', '--dataset-name', action='store',
default='ABIDE Dataset')
g_input.add_argument('-u', '--nitrc-user', action='store',
default=os.getenv('NITRC_USER'))
g_input.add_argument('-p', '--nitrc-password', action='store',
default=os.getenv('NITRC_PASSWORD'))

g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store',
default='ABIDE-BIDS')
parser = ArgumentParser(
description="ABIDE2BIDS downloader", formatter_class=RawTextHelpFormatter
)
g_input = parser.add_argument_group("Inputs")
g_input.add_argument("-i", "--input-abide-catalog", action="store", required=True)
g_input.add_argument(
"-n", "--dataset-name", action="store", default="ABIDE Dataset"
)
g_input.add_argument(
"-u", "--nitrc-user", action="store", default=os.getenv("NITRC_USER")
)
g_input.add_argument(
"-p", "--nitrc-password", action="store", default=os.getenv("NITRC_PASSWORD")
)

g_outputs = parser.add_argument_group("Outputs")
g_outputs.add_argument("-o", "--output-dir", action="store", default="ABIDE-BIDS")

opts = parser.parse_args()

if opts.nitrc_user is None or opts.nitrc_password is None:
raise RuntimeError('NITRC user and password are required')
raise RuntimeError("NITRC user and password are required")

dataset_desc = {'BIDSVersion': '1.0.0rc3',
'License': 'CC Attribution-NonCommercial-ShareAlike 3.0 Unported',
'Name': opts.dataset_name}
dataset_desc = {
"BIDSVersion": "1.0.0rc3",
"License": "CC Attribution-NonCommercial-ShareAlike 3.0 Unported",
"Name": opts.dataset_name,
}

out_dir = op.abspath(opts.output_dir)
try:
Expand All @@ -57,19 +61,18 @@ def main():
if exc.errno != errno.EEXIST:
raise exc

with open(op.join(out_dir, 'dataset_description.json'), 'w') as dfile:
with open(op.join(out_dir, "dataset_description.json"), "w") as dfile:
json.dump(dataset_desc, dfile)

catalog = et.parse(opts.input_abide_catalog).getroot()
urls = [el.get('URI') for el in catalog.iter() if el.get('URI') is not None]
urls = [el.get("URI") for el in catalog.iter() if el.get("URI") is not None]

pool = Pool()
args_list = [(url, opts.nitrc_user, opts.nitrc_password, out_dir)
for url in urls]
args_list = [(url, opts.nitrc_user, opts.nitrc_password, out_dir) for url in urls]
res = pool.map(fetch, args_list)

tsv_data = np.array([('subject_id', 'site_name')] + res)
np.savetxt(op.join(out_dir, 'participants.tsv'), tsv_data, fmt='%s', delimiter='\t')
tsv_data = np.array([("subject_id", "site_name")] + res)
np.savetxt(op.join(out_dir, "participants.tsv"), tsv_data, fmt="%s", delimiter="\t")


def fetch(args):
Expand All @@ -86,55 +89,57 @@ def fetch(args):
else:
out_dir = op.abspath(out_dir)

pkg_id = [u[9:] for u in url.split('/') if u.startswith('NITRC_IR_')][0]
sub_file = op.join(tmpdir, '%s.zip' % pkg_id)
pkg_id = [u[9:] for u in url.split("/") if u.startswith("NITRC_IR_")][0]
sub_file = op.join(tmpdir, "%s.zip" % pkg_id)

cmd = ['curl', '-s', '-u', '%s:%s' % (user, password), '-o', sub_file, url]
cmd = ["curl", "-s", "-u", "%s:%s" % (user, password), "-o", sub_file, url]
sp.check_call(cmd)
sp.check_call(['unzip', '-qq', '-d', tmpdir, '-u', sub_file])
sp.check_call(["unzip", "-qq", "-d", tmpdir, "-u", sub_file])

abide_root = op.join(tmpdir, 'ABIDE')
abide_root = op.join(tmpdir, "ABIDE")
files = []
for root, path, fname in os.walk(abide_root):
if fname and (fname[0].endswith('nii') or fname[0].endswith('nii.gz')):
if fname and (fname[0].endswith("nii") or fname[0].endswith("nii.gz")):
if path:
root = op.join(root, path[0])
files.append(op.join(root, fname[0]))

site_name, sub_str = files[0][len(abide_root) + 1:].split('/')[0].split('_')
subject_id = 'sub-' + sub_str
site_name, sub_str = files[0][len(abide_root) + 1:].split("/")[0].split("_")
subject_id = "sub-" + sub_str

for i in files:
ext = '.nii.gz'
if i.endswith('.nii'):
ext = '.nii'
if 'mprage' in i:
bids_dir = op.join(out_dir, subject_id, 'anat')
ext = ".nii.gz"
if i.endswith(".nii"):
ext = ".nii"
if "mprage" in i:
bids_dir = op.join(out_dir, subject_id, "anat")
try:
os.makedirs(bids_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
shutil.copy(i, op.join(bids_dir, subject_id + '_T1w' + ext))
shutil.copy(i, op.join(bids_dir, subject_id + "_T1w" + ext))

if 'rest' in i:
bids_dir = op.join(out_dir, subject_id, 'func')
if "rest" in i:
bids_dir = op.join(out_dir, subject_id, "func")
try:
os.makedirs(bids_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
shutil.copy(i, op.join(bids_dir, subject_id + '_rest_bold' + ext))
shutil.copy(i, op.join(bids_dir, subject_id + "_rest_bold" + ext))

shutil.rmtree(tmpdir, ignore_errors=True, onerror=_myerror)

print('Successfully processed subject %s from site %s' % (subject_id[4:], site_name))
print(
"Successfully processed subject %s from site %s" % (subject_id[4:], site_name)
)
return subject_id[4:], site_name


def _myerror(msg):
print('WARNING: Error deleting temporal files: %s' % msg)
print("WARNING: Error deleting temporal files: %s" % msg)


if __name__ == '__main__':
if __name__ == "__main__":
main()
79 changes: 54 additions & 25 deletions mriqc/bin/dfcheck.py
Expand Up @@ -20,15 +20,34 @@
def main():
"""Entry point"""
from ..classifier.data import read_iqms
parser = ArgumentParser(description='compare two pandas dataframes',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-i', '--input-csv', action='store', type=Path,
required=True, help='input data frame')
g_input.add_argument('-r', '--reference-csv', action='store', type=Path,
required=True, help='reference dataframe')
g_input.add_argument('--tolerance', type=float, default=1.e-5,
help='relative tolerance for comparison')

parser = ArgumentParser(
description="compare two pandas dataframes",
formatter_class=RawTextHelpFormatter,
)
g_input = parser.add_argument_group("Inputs")
g_input.add_argument(
"-i",
"--input-csv",
action="store",
type=Path,
required=True,
help="input data frame",
)
g_input.add_argument(
"-r",
"--reference-csv",
action="store",
type=Path,
required=True,
help="reference dataframe",
)
g_input.add_argument(
"--tolerance",
type=float,
default=1.0e-5,
help="relative tolerance for comparison",
)

opts = parser.parse_args()

Expand All @@ -39,17 +58,19 @@ def main():
tst_df.set_index(tst_bids)

if sorted(ref_bids) != sorted(tst_bids):
sys.exit('Dataset has different BIDS bits w.r.t. reference')
sys.exit("Dataset has different BIDS bits w.r.t. reference")

if sorted(ref_names) != sorted(tst_names):
sys.exit('Output CSV file changed number of columns')
sys.exit("Output CSV file changed number of columns")

ref_df = ref_df.sort_values(by=ref_bids)
tst_df = tst_df.sort_values(by=tst_bids)

if len(ref_df) != len(tst_df):
print('Input datases have different lengths (input %d, reference %d).' % (
len(ref_df), len(tst_df)))
print(
"Input datases have different lengths (input %d, reference %d)."
% (len(ref_df), len(tst_df))
)
tst_rows = tst_df[tst_bids]
ref_rows = ref_df[ref_bids]

Expand All @@ -58,7 +79,9 @@ def main():
tst_keep = np.sum(tst_rows.isin(ref_rows).values.ravel().tolist())
print(tst_keep)

diff = ~np.isclose(ref_df[ref_names].values, tst_df[tst_names].values, rtol=opts.tolerance)
diff = ~np.isclose(
ref_df[ref_names].values, tst_df[tst_names].values, rtol=opts.tolerance
)
if np.any(diff):
# ne_stacked = pd.DataFrame(data=diff, columns=ref_names).stack()
# ne_stacked = np.isclose(ref_df[ref_names], tst_df[ref_names]).stack()
Expand All @@ -69,26 +92,32 @@ def main():
changed_to = tst_df[ref_names].values[difference_locations]
cols = [ref_names[v] for v in difference_locations[1]]
bids_df = ref_df.loc[difference_locations[0], ref_bids].reset_index()
chng_df = pd.DataFrame({'iqm': cols, 'from': changed_from, 'to': changed_to})
chng_df = pd.DataFrame({"iqm": cols, "from": changed_from, "to": changed_to})
table = pd.concat([bids_df, chng_df], axis=1)
print(table[ref_bids + ['iqm', 'from', 'to']].to_string(index=False))
print(table[ref_bids + ["iqm", "from", "to"]].to_string(index=False))

corr = pd.DataFrame()
corr['iqms'] = ref_names
corr['cc'] = [float(np.corrcoef(ref_df[[var]].values.ravel(),
tst_df[[var]].values.ravel(),
rowvar=False)[0, 1])
for var in ref_names]
corr["iqms"] = ref_names
corr["cc"] = [
float(
np.corrcoef(
ref_df[[var]].values.ravel(),
tst_df[[var]].values.ravel(),
rowvar=False,
)[0, 1]
)
for var in ref_names
]

if np.any(corr.cc < 0.95):
print('IQMs with Pearson correlation < 0.95:')
print("IQMs with Pearson correlation < 0.95:")
print(corr[corr.cc < 0.95])
sys.exit('Output CSV file changed one or more values')
sys.exit("Output CSV file changed one or more values")
else:
print('All IQMs show a Pearson correlation >= 0.95')
print("All IQMs show a Pearson correlation >= 0.95")

sys.exit(0)


if __name__ == '__main__':
if __name__ == "__main__":
main()