-
Notifications
You must be signed in to change notification settings - Fork 290
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RTM] Code revision: confounds, utilities, etc. #641
Changes from 9 commits
8c609e8
8dca00d
daf1734
8b42b1f
7686ce6
92999cd
0df1c06
0772732
e25ea45
b23a111
6aad57c
7818569
1c380c7
1240890
deedd25
faa5913
7acd91a
f3795d9
0c7b3fc
75e8c4a
ce7d2d0
891f2bc
eb0fe88
6b1b20a
ce27889
3852301
220ea51
648465b
1d93c78
bec30ed
beeed3e
36b818b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,182 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- | ||
# vi: set ft=python sts=4 ts=4 sw=4 et: | ||
|
||
import os | ||
import shutil | ||
import numpy as np | ||
import pandas as pd | ||
from niworkflows.nipype import logging | ||
from niworkflows.nipype.interfaces.base import ( | ||
traits, TraitedSpec, BaseInterfaceInputSpec, File, Directory | ||
) | ||
from niworkflows.interfaces.base import SimpleInterface | ||
|
||
LOGGER = logging.getLogger('interface') | ||
|
||
|
||
class GatherConfoundsInputSpec(BaseInterfaceInputSpec): | ||
signals = File(exists=True, mandatory=True, desc='input signals') | ||
dvars = File(exists=True, mandatory=True, desc='file containing DVARS') | ||
fd = File(exists=True, mandatory=True, desc='input framewise displacement') | ||
tcompcor = File(exists=True, mandatory=True, desc='input tCompCorr') | ||
acompcor = File(exists=True, mandatory=True, desc='input aCompCorr') | ||
cos_basis = File(exists=True, mandatory=True, desc='input cosine basis') | ||
motion = File(exists=True, mandatory=True, desc='input motion parameters') | ||
aroma = File(exists=True, mandatory=True, desc='input ICA-AROMA') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These are not mandatory as presently written. I believe the idea is that the individual inputs can fail or (in the case of |
||
|
||
|
||
class GatherConfoundsOutputSpec(TraitedSpec): | ||
confounds_file = File(exists=True, desc='output confounds file') | ||
confounds_list = traits.List(traits.Str, desc='list of headers') | ||
|
||
|
||
class GatherConfounds(SimpleInterface): | ||
""" | ||
Combine various sources of confounds in one TSV file | ||
""" | ||
input_spec = GatherConfoundsInputSpec | ||
output_spec = GatherConfoundsOutputSpec | ||
|
||
def _run_interface(self, runtime): | ||
combined_out, confounds_list = _gather_confounds( | ||
self.inputs.signals, | ||
self.inputs.dvars, | ||
self.inputs.fd, | ||
self.inputs.tcompcor, | ||
self.inputs.acompcor, | ||
self.inputs.cos_basis, | ||
self.inputs.motion, | ||
self.inputs.aroma, | ||
) | ||
self._results['combined_out'] = combined_out | ||
self._results['confounds_file'] = confounds_list | ||
return runtime | ||
|
||
|
||
class ICAConfoundsInputSpec(BaseInterfaceInputSpec): | ||
in_directory = Directory(mandatory=True, desc='directory where ICA derivatives are found') | ||
ignore_aroma_err = traits.Bool(False, usedefault=True, desc='ignore ICA-AROMA errors') | ||
|
||
|
||
class ICAConfoundsOutputSpec(TraitedSpec): | ||
out_file = File(exists=True, desc='output average file') | ||
|
||
|
||
class ICAConfounds(SimpleInterface): | ||
input_spec = ICAConfoundsInputSpec | ||
output_spec = ICAConfoundsOutputSpec | ||
|
||
def _run_interface(self, runtime): | ||
aroma_confounds, motion_ics_out, melodic_mix_out = _get_ica_confounds( | ||
self.inputs.in_directory) | ||
|
||
if aroma_confounds is not None: | ||
self._results['aroma_confounds'] = aroma_confounds | ||
elif not self.inputs.ignore_aroma_err: | ||
raise RuntimeError('ICA-AROMA failed') | ||
|
||
self._results['motion_ics_out'] = motion_ics_out | ||
self._results['melodic_mix_out'] = melodic_mix_out | ||
return runtime | ||
|
||
|
||
def _gather_confounds(signals=None, dvars=None, fdisp=None, | ||
tcompcor=None, acompcor=None, cos_basis=None, | ||
motion=None, aroma=None): | ||
''' load confounds from the filenames, concatenate together horizontally, and re-save ''' | ||
|
||
def less_breakable(a_string): | ||
''' hardens the string to different envs (i.e. case insensitive, no whitespace, '#' ''' | ||
return ''.join(a_string.split()).strip('#') | ||
|
||
def _adjust_indices(left_df, right_df): | ||
# This forces missing values to appear at the beggining of the DataFrame | ||
# instead of the end | ||
index_diff = len(left_df.index) - len(right_df.index) | ||
if index_diff > 0: | ||
right_df.index = range(index_diff, | ||
len(right_df.index) + index_diff) | ||
elif index_diff < 0: | ||
left_df.index = range(-index_diff, | ||
len(left_df.index) - index_diff) | ||
|
||
all_files = [] | ||
confounds_list = [] | ||
for confound, name in ((signals, 'Global signals'), | ||
(dvars, 'DVARS'), | ||
(fdisp, 'Framewise displacement'), | ||
(tcompcor, 'tCompCor'), | ||
(acompcor, 'aCompCor'), | ||
(cos_basis, 'Cosine basis'), | ||
(motion, 'Motion parameters'), | ||
(aroma, 'ICA-AROMA')): | ||
if confound is not None: | ||
confounds_list.append(name) | ||
if os.path.exists(confound) and os.stat(confound).st_size > 0: | ||
all_files.append(confound) | ||
|
||
confounds_data = pd.DataFrame() | ||
for file_name in all_files: # assumes they all have headings already | ||
new = pd.read_csv(file_name, sep="\t") | ||
for column_name in new.columns: | ||
new.rename(columns={column_name: less_breakable(column_name)}, | ||
inplace=True) | ||
|
||
_adjust_indices(confounds_data, new) | ||
confounds_data = pd.concat((confounds_data, new), axis=1) | ||
|
||
combined_out = os.path.abspath('confounds.tsv') | ||
confounds_data.to_csv(combined_out, sep=str("\t"), index=False, | ||
na_rep="n/a") | ||
|
||
return combined_out, confounds_list | ||
|
||
|
||
def _get_ica_confounds(ica_out_dir): | ||
# load the txt files from ICA-AROMA | ||
melodic_mix = os.path.join(ica_out_dir, 'melodic.ica/melodic_mix') | ||
motion_ics = os.path.join(ica_out_dir, 'classified_motion_ICs.txt') | ||
|
||
# Change names of motion_ics and melodic_mix for output | ||
melodic_mix_out = os.path.join(ica_out_dir, 'MELODICmix.tsv') | ||
motion_ics_out = os.path.join(ica_out_dir, 'AROMAnoiseICs.csv') | ||
|
||
# melodic_mix replace spaces with tabs | ||
with open(melodic_mix, 'r') as melodic_file: | ||
melodic_mix_out_char = melodic_file.read().replace(' ', '\t') | ||
# write to output file | ||
with open(melodic_mix_out, 'w+') as melodic_file_out: | ||
melodic_file_out.write(melodic_mix_out_char) | ||
|
||
# copy metion_ics file to derivatives name | ||
shutil.copyfile(motion_ics, motion_ics_out) | ||
|
||
# -1 since python lists start at index 0 | ||
motion_ic_indices = np.loadtxt(motion_ics, dtype=int, delimiter=',') - 1 | ||
melodic_mix_arr = np.loadtxt(melodic_mix, ndmin=2) | ||
|
||
# Return dummy list of ones if no noise compnents were found | ||
if motion_ic_indices.size == 0: | ||
LOGGER.warning('No noise components were classified') | ||
return None, motion_ics_out, melodic_mix_out | ||
|
||
# the "good" ics, (e.g. not motion related) | ||
good_ic_arr = np.delete(melodic_mix_arr, motion_ic_indices, 1).T | ||
|
||
# return dummy lists of zeros if no signal components were found | ||
if good_ic_arr.size == 0: | ||
LOGGER.warning('No signal components were classified') | ||
return None, motion_ics_out, melodic_mix_out | ||
|
||
# transpose melodic_mix_arr so x refers to the correct dimension | ||
aggr_confounds = np.asarray([melodic_mix_arr.T[x] for x in motion_ic_indices]) | ||
|
||
# add one to motion_ic_indices to match melodic report. | ||
aroma_confounds = os.path.abspath("AROMAAggrCompAROMAConfounds.tsv") | ||
pd.DataFrame(aggr_confounds.T, | ||
columns=['AROMAAggrComp%02d' % (x + 1) for x in motion_ic_indices]).to_csv( | ||
aroma_confounds, sep="\t", index=None) | ||
|
||
return aroma_confounds, motion_ics_out, melodic_mix_out |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -6,10 +6,8 @@ | |
Interfaces to deal with the various types of fieldmap sources | ||
|
||
""" | ||
from __future__ import print_function, division, absolute_import, unicode_literals | ||
|
||
import os | ||
from builtins import range | ||
import numpy as np | ||
import nibabel as nb | ||
from niworkflows.nipype import logging | ||
|
@@ -139,11 +137,11 @@ def _despike2d(data, thres, neigh=None): | |
neigh = [-1, 0, 1] | ||
nslices = data.shape[-1] | ||
|
||
for k in range(nslices): | ||
for k in list(range(nslices)): | ||
data2d = data[..., k] | ||
|
||
for i in range(data2d.shape[0]): | ||
for j in range(data2d.shape[1]): | ||
for i in list(range(data2d.shape[0])): | ||
for j in list(range(data2d.shape[1])): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why |
||
vals = [] | ||
thisval = data2d[i, j] | ||
for ii in neigh: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,7 +8,6 @@ | |
|
||
|
||
""" | ||
from __future__ import print_function, division, absolute_import, unicode_literals | ||
|
||
import os | ||
import numpy as np | ||
|
@@ -23,7 +22,6 @@ | |
from niworkflows.nipype.interfaces import fsl | ||
from niworkflows.interfaces.base import SimpleInterface | ||
|
||
from ..utils.misc import genfname | ||
|
||
LOGGER = logging.getLogger('interface') | ||
|
||
|
@@ -77,8 +75,8 @@ def _run_interface(self, runtime): | |
in_files = [self.inputs.in_files] | ||
|
||
# Generate output average name early | ||
self._results['out_avg'] = genfname(self.inputs.in_files[0], | ||
suffix='avg') | ||
self._results['out_avg'] = fname_presuffix(self.inputs.in_files[0], | ||
suffix='avg', newpath=os.getcwd()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
||
if self.inputs.to_ras: | ||
in_files = [reorient(inf) for inf in in_files] | ||
|
@@ -93,7 +91,8 @@ def _run_interface(self, runtime): | |
if sqdata.ndim == 5: | ||
raise RuntimeError('Input image (%s) is 5D' % in_files[0]) | ||
else: | ||
in_files = [genfname(in_files[0], suffix='squeezed')] | ||
in_files = [fname_presuffix(in_files[0], suffix='squeezed', | ||
newpath=os.getcwd())] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
nb.Nifti1Image(sqdata, filenii.get_affine(), | ||
filenii.get_header()).to_filename(in_files[0]) | ||
|
||
|
@@ -366,24 +365,15 @@ def _run_interface(self, runtime): | |
|
||
|
||
def reorient(in_file, out_file=None): | ||
import nibabel as nb | ||
from fmriprep.utils.misc import genfname | ||
from builtins import (str, bytes) | ||
|
||
"""Reorient Nifti files to RAS""" | ||
if out_file is None: | ||
out_file = genfname(in_file, suffix='ras') | ||
|
||
if isinstance(in_file, (str, bytes)): | ||
nii = nb.load(in_file) | ||
nii = nb.as_closest_canonical(nii) | ||
nii.to_filename(out_file) | ||
out_file = fname_presuffix(in_file, suffix='ras', newpath=os.getcwd()) | ||
nb.as_closest_canonical(nb.load(in_file)).to_filename(out_file) | ||
return out_file | ||
|
||
|
||
def _flatten_split_merge(in_files): | ||
from builtins import bytes, str | ||
|
||
if isinstance(in_files, (bytes, str)): | ||
if isinstance(in_files, str): | ||
in_files = [in_files] | ||
|
||
nfiles = len(in_files) | ||
|
@@ -398,13 +388,13 @@ def _flatten_split_merge(in_files): | |
all_nii.append(nii) | ||
|
||
if len(all_nii) == 1: | ||
LOGGER.warn('File %s cannot be split', all_nii[0]) | ||
LOGGER.warning('File %s cannot be split', all_nii[0]) | ||
return in_files[0], in_files | ||
|
||
if len(all_nii) == nfiles: | ||
flat_split = in_files | ||
else: | ||
splitname = genfname(in_files[0], suffix='split%04d') | ||
splitname = fname_presuffix(in_files[0], suffix='split%04d', newpath=os.getcwd()) | ||
flat_split = [] | ||
for i, nii in enumerate(all_nii): | ||
flat_split.append(splitname % i) | ||
|
@@ -415,24 +405,21 @@ def _flatten_split_merge(in_files): | |
merged = in_files[0] | ||
else: | ||
# More that one in_files - need merge | ||
merged = genfname(in_files[0], suffix='merged') | ||
merged = fname_presuffix(in_files[0], suffix='merged', newpath=os.getcwd()) | ||
nb.concat_images(all_nii).to_filename(merged) | ||
|
||
return merged, flat_split | ||
|
||
|
||
def _gen_reference(fixed_image, moving_image, out_file=None): | ||
import numpy | ||
from nilearn.image import resample_img, load_img | ||
|
||
if out_file is None: | ||
out_file = genfname(fixed_image, suffix='reference') | ||
new_zooms = load_img(moving_image).header.get_zooms()[:3] | ||
out_file = fname_presuffix(fixed_image, suffix='reference', newpath=os.getcwd()) | ||
new_zooms = nli.load_img(moving_image).header.get_zooms()[:3] | ||
# Avoid small differences in reported resolution to cause changes to | ||
# FOV. See https://github.com/poldracklab/fmriprep/issues/512 | ||
new_zooms_round = numpy.round(new_zooms, 3) | ||
resample_img(fixed_image, target_affine=numpy.diag(new_zooms_round), | ||
interpolation='nearest').to_filename(out_file) | ||
new_zooms_round = np.round(new_zooms, 3) | ||
nli.resample_img(fixed_image, target_affine=np.diag(new_zooms_round), | ||
interpolation='nearest').to_filename(out_file) | ||
return out_file | ||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is the plan to completely discontinue Py2 support? At least a couple months ago, we were accepting patches from users who needed Py2, as long as they didn't complicate the code.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would discontinue py2 for good at this point, simplify our lives. But you are right, we haven't talked about this. WDYT @chrisfilo @rwblair ?