Skip to content

Commit

Permalink
Merge pull request #157 from shoshber/confounds
Browse files Browse the repository at this point in the history
WIP Confounds
  • Loading branch information
Shoshana Berleant committed Nov 11, 2016
2 parents 78424b0 + 9eda3a0 commit b6eb02b
Show file tree
Hide file tree
Showing 18 changed files with 520 additions and 25 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ ENV PATH $C3DPATH:$PATH
RUN rm -rf /usr/local/miniconda/lib/python*/site-packages/nipype* && \
pip install -e git+https://github.com/nipy/nipype.git@master#egg=nipype && \
pip install mock && \
pip install pandas && \
python -c "from matplotlib import font_manager"


Expand Down
Empty file modified build/files/run_fmriprep
100644 → 100755
Empty file.
Empty file modified build/files/run_unittests
100644 → 100755
Empty file.
6 changes: 6 additions & 0 deletions fmriprep/data/identitytransform.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
2 changes: 2 additions & 0 deletions fmriprep/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,12 @@
'nilearn',
'sklearn',
'nibabel',
'pandas',
'niworkflows>=0.0.3a7',
'grabbit',
'nipype',
'pybids',
'nitime'
]

LINKS_REQUIRES = [
Expand Down
80 changes: 80 additions & 0 deletions fmriprep/interfaces/mask.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
''' Utility functions related to masks and masking '''
#!/usr/bin/env python
import os
import logging

import numpy as np
import nibabel as nb
from nipype.interfaces.base import (traits, TraitedSpec, BaseInterface,
BaseInterfaceInputSpec, File)

LOG = logging.getLogger('binarizesegmentationinterface')

class BinarizeSegmentationInputSpec(BaseInterfaceInputSpec):
''' Takes an integer segmentation (e.g., from FAST) and binarizes it. '''
in_segments = File(exists=True, mandatory=True, desc='3d tissue class segmentation. '
'Values are integers')
false_values = traits.List([0], usedefault=True,
desc='list of values in in_mask that are to be set to false (0). '
'All others will be set to true (1)')
out_mask = File('mask.nii', exists=False, usedefault=True,
desc='the file name to save the output to')

class BinarizeSegmentationOutputSpec(TraitedSpec):
''' out_mask defaults to 'mask.nii' (see input spec) '''
out_mask = File(exists=True, desc='binarized_mask')

class BinarizeSegmentation(BaseInterface):
'''
Utility for turning a segmentation with integer values into a binary map.
An example input is the segmentation output by fsl.FAST
Use case: Get a white matter mask
>>> biseg = BinarizeSegmentation()
>>> biseg.inputs.in_segments = 'fast_out.nii'
>>> biseg.inputs.false_values = [0, 1, 2]
>>> biseg.run()
Use case: Get white matter and CSF (as for aCompCor)
>>> BinarizeSegmentation(in_seg='fast_out.nii', false_values=[1, 3]).run() # check
'''
input_spec = BinarizeSegmentationInputSpec
output_spec = BinarizeSegmentationOutputSpec
_results = {}

def _run_interface(self, runtime):
segments_data, segments_affine, output_filename = self._get_inputs()

mapper = np.vectorize(lambda orig_val: orig_val not in self.inputs.false_values)
bimap = mapper(segments_data)

bimap_nii = nb.Nifti1Image(bimap.astype(int), segments_affine)
nb.nifti1.save(bimap_nii, output_filename)
self._results['out_mask'] = output_filename

LOG.debug('BinarizeSegmentation interface saved mask of shape %s to file %s',
bimap.shape, output_filename)

return runtime

def _get_inputs(self):
''' manipulates inputs into useful form. does preliminary input-checking '''
segments_nii = nb.load(self.inputs.in_segments)
segments_data = segments_nii.get_data()
segments_affine = segments_nii.affine
output_filename = os.path.join(os.getcwd(), self.inputs.out_mask)

LOG.debug('BinarizeSegmentation interface loaded segments data from %s of shape %s',
self.inputs.in_segments, segments_data.shape)

if str(segments_data.dtype)[:2] == 'int':
raise ValueError('Segmentation must have integer values. Input {} had {}s'
.format(self.inputs.in_segments, segments_data.dtype))
if segments_data.ndim != 3:
raise ValueError('Segmentation must be 3-D. Input {} has shape {}'
.format(self.inputs.in_segments, segments_data.shape))

return segments_data, segments_affine, output_filename

def _list_outputs(self):
return self._results
38 changes: 33 additions & 5 deletions fmriprep/workflows/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,12 @@
from copy import deepcopy

from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces import fsl
from nipype.interfaces import freesurfer as fs
from nipype.interfaces import io as nio

from fmriprep.interfaces import BIDSDataGrabber
from fmriprep.utils.misc import collect_bids_data
from fmriprep.workflows import confounds

from fmriprep.workflows.anatomical import t1w_preprocessing
from fmriprep.workflows.sbref import sbref_preprocess, sbref_t1_registration
from fmriprep.workflows.fieldmap import phase_diff_and_magnitudes
Expand Down Expand Up @@ -88,6 +87,13 @@ def wf_ds054_type(subject_data, settings, name='fMRI_prep'):
# EPI unwarp
epiunwarp_wf = epi_unwarp(settings=settings)

# get confounds
confounds_wf = confounds.discover_wf(settings)
confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False, True]

# create list of transforms to resample t1 -> sbref -> epi
t1_to_epi_transforms = pe.Node(fsl.ConvertXFM(concat_xfm=True), name='T1ToEPITransforms')

workflow.connect([
(bidssrc, t1w_pre, [('t1w', 'inputnode.t1w')]),
(bidssrc, fmap_est, [('fmap', 'inputnode.input_images')]),
Expand All @@ -107,7 +113,18 @@ def wf_ds054_type(subject_data, settings, name='fMRI_prep'):
(hmcwf, epiunwarp_wf, [('inputnode.epi', 'inputnode.epi')]),
(fmap_est, epiunwarp_wf, [('outputnode.fmap', 'inputnode.fmap'),
('outputnode.fmap_mask', 'inputnode.fmap_mask'),
('outputnode.fmap_ref', 'inputnode.fmap_ref')])
('outputnode.fmap_ref', 'inputnode.fmap_ref')]),

(sbref_t1, t1_to_epi_transforms, [(('outputnode.mat_t1_to_sbr'), 'in_file')]),
(epi2sbref, t1_to_epi_transforms, [('outputnode.out_mat_inv', 'in_file2')]),

(t1_to_epi_transforms, confounds_wf, [('out_file', 'inputnode.t1_transform')]),

(hmcwf, confounds_wf, [('outputnode.movpar_file', 'inputnode.movpar_file'),
('outputnode.epi_mean', 'inputnode.reference_image')]),
(epiunwarp_wf, confounds_wf, [('outputnode.epi_mask', 'inputnode.epi_mask'),
('outputnode.epi_unwarp', 'inputnode.fmri_file')]),
(t1w_pre, confounds_wf, [('outputnode.t1_seg', 'inputnode.t1_seg')]),
])
return workflow

Expand Down Expand Up @@ -146,6 +163,10 @@ def wf_ds005_type(subject_data, settings, name='fMRI_prep'):
# mean EPI registration to T1w
epi_2_t1 = epi_mean_t1_registration(settings=settings)

# get confounds
confounds_wf = confounds.discover_wf(settings)
confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

# Apply transforms in 1 shot
epi_mni_trans_wf = epi_mni_transformation(settings=settings)

Expand All @@ -155,14 +176,21 @@ def wf_ds005_type(subject_data, settings, name='fMRI_prep'):
(hmcwf, epi_2_t1, [('outputnode.epi_mean', 'inputnode.epi_mean')]),
(t1w_pre, epi_2_t1, [('outputnode.t1_brain', 'inputnode.t1_brain'),
('outputnode.t1_seg', 'inputnode.t1_seg')]),

(t1w_pre, confounds_wf, [('outputnode.t1_seg', 'inputnode.t1_seg')]),
(hmcwf, confounds_wf, [('outputnode.movpar_file', 'inputnode.movpar_file'),
('outputnode.epi_brain', 'inputnode.fmri_file'),
('outputnode.epi_mean', 'inputnode.reference_image'),
('outputnode.epi_mask', 'inputnode.epi_mask')]),
(epi_2_t1, confounds_wf, [('outputnode.mat_t1_to_epi', 'inputnode.t1_transform')]),

(hmcwf, epi_mni_trans_wf, [('inputnode.epi', 'inputnode.epi')]),
(epi_2_t1, epi_mni_trans_wf, [('outputnode.mat_epi_to_t1', 'inputnode.mat_epi_to_t1')]),
(hmcwf, epi_mni_trans_wf, [('outputnode.xforms', 'inputnode.hmc_xforms'),
('outputnode.epi_mask', 'inputnode.epi_mask')]),
(t1w_pre, epi_mni_trans_wf, [('outputnode.t1_brain', 'inputnode.t1'),
('outputnode.t1_2_mni_forward_transform',
'inputnode.t1_2_mni_forward_transform')])

])

return workflow
Expand Down
129 changes: 129 additions & 0 deletions fmriprep/workflows/confounds.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
'''
Workflow for discovering confounds.
Calculates frame displacement, segment regressors, global regressor, dvars, aCompCor, tCompCor
'''
from nipype.interfaces import utility, nilearn, fsl
from nipype.algorithms import confounds
from nipype.pipeline import engine as pe

from fmriprep.interfaces import mask
from fmriprep import interfaces

FAST_DEFAULT_SEGS = ['CSF', 'gray matter', 'white matter']

def discover_wf(settings, name="ConfoundDiscoverer"):
''' All input fields are required.
Calculates global regressor and tCompCor
from motion-corrected fMRI ('inputnode.fmri_file').
Calculates DVARS from the fMRI and an EPI brain mask ('inputnode.epi_mask')
Calculates frame displacement from MCFLIRT movement parameters ('inputnode.movpar_file')
Calculates segment regressors and aCompCor
from the fMRI and a white matter/gray matter/CSF segmentation ('inputnode.t1_seg'), after
applying the transform to the images. Transforms should be fsl-formatted.
Saves the confounds in a file ('outputnode.confounds_file')'''

inputnode = pe.Node(utility.IdentityInterface(fields=['fmri_file', 'movpar_file', 't1_seg',
'epi_mask', 't1_transform',
'reference_image']),
name='inputnode')
outputnode = pe.Node(utility.IdentityInterface(fields=['confounds_file']),
name='outputnode')

# registration using ANTs
t1_registration = pe.Node(fsl.ApplyXfm(), name='T1Registration')

# Global and segment regressors
signals = pe.Node(nilearn.SignalExtraction(include_global=True, detrend=True,
class_labels=FAST_DEFAULT_SEGS),
name="SignalExtraction")
# DVARS
dvars = pe.Node(confounds.ComputeDVARS(save_all=True, remove_zerovariance=True),
name="ComputeDVARS")
# Frame displacement
frame_displace = pe.Node(confounds.FramewiseDisplacement(), name="FramewiseDisplacement")
# CompCor
tcompcor = pe.Node(confounds.TCompCor(components_file='tcompcor.tsv'), name="tCompCor")
acompcor_roi = pe.Node(mask.BinarizeSegmentation(
false_values=[FAST_DEFAULT_SEGS.index('gray matter') + 1, 0]), # 0 denotes background
name="CalcaCompCorROI")
acompcor = pe.Node(confounds.ACompCor(components_file='acompcor.tsv'), name="aCompCor")

# misc utilities
concat = pe.Node(utility.Function(function=_gather_confounds, input_names=['signals', 'dvars',
'frame_displace',
'tcompcor',
'acompcor'],
output_names=['combined_out']),
name="ConcatConfounds")
ds_confounds = pe.Node(interfaces.DerivativesDataSink(base_directory=settings['output_dir'],
suffix='confounds'),
name="DerivConfounds")

workflow = pe.Workflow(name=name)
workflow.connect([
# connect inputnode to each non-anatomical confound node
(inputnode, dvars, [('fmri_file', 'in_file'),
('epi_mask', 'in_mask')]),
(inputnode, frame_displace, [('movpar_file', 'in_plots')]),
(inputnode, tcompcor, [('fmri_file', 'realigned_file')]),

# anatomically-based confound computation requires coregistration
(inputnode, t1_registration, [('reference_image', 'reference'),
('t1_seg', 'in_file'),
('t1_transform', 'in_matrix_file')]),

# anatomical confound: signal extraction
(t1_registration, signals, [('out_file', 'label_files')]),
(inputnode, signals, [('fmri_file', 'in_file')]),
# anatomical confound: aCompCor
(inputnode, acompcor, [('fmri_file', 'realigned_file')]),
(t1_registration, acompcor_roi, [('out_file', 'in_segments')]),
(acompcor_roi, acompcor, [('out_mask', 'mask_file')]),

# connect the confound nodes to the concatenate node
(signals, concat, [('out_file', 'signals')]),
(dvars, concat, [('out_all', 'dvars')]),
(frame_displace, concat, [('out_file', 'frame_displace')]),
(tcompcor, concat, [('components_file', 'tcompcor')]),
(acompcor, concat, [('components_file', 'acompcor')]),

(concat, outputnode, [('combined_out', 'confounds_file')]),

# print stuff in derivatives
(concat, ds_confounds, [('combined_out', 'in_file')]),
(inputnode, ds_confounds, [('fmri_file', 'source_file')])
])

return workflow

def _gather_confounds(signals=None, dvars=None, frame_displace=None, tcompcor=None, acompcor=None):
''' load confounds from the filenames, concatenate together horizontally, and re-save '''
import pandas as pd
import os.path as op

def less_breakable(a_string):
''' hardens the string to different envs (i.e. case insensitive, no whitespace, '#' '''
return ''.join(a_string.split()).lower().strip('#')

all_files = [confound for confound in [signals, dvars, frame_displace, tcompcor, acompcor]
if confound != None]

confounds_data = pd.DataFrame()
for file_name in all_files: # assumes they all have headings already
new = pd.read_csv(file_name, sep="\t")
for column_name in new.columns:
new.rename(columns={column_name: less_breakable(column_name)}, inplace=True)
confounds_data = pd.concat((confounds_data, new), axis=1)

combined_out = op.abspath('confounds.tsv')
confounds_data.to_csv(combined_out, sep=str("\t"), index=False)

return combined_out

def reverse_order(inlist):
''' if a list, return the list in reversed order; else it is a single item, return it.'''
if isinstance(inlist, list):
inlist.reverse()
return inlist

0 comments on commit b6eb02b

Please sign in to comment.