Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Estimate S0 from data (DTI) #1036

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
81 changes: 53 additions & 28 deletions dipy/reconst/dti.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
from dipy.utils.six.moves import range
from dipy.utils.arrfuncs import pinv, eigh
from dipy.data import get_sphere
from ..core.gradients import gradient_table
from ..core.geometry import vector_norm
from ..core.sphere import Sphere
from dipy.core.gradients import gradient_table
from dipy.core.geometry import vector_norm
from dipy.core.sphere import Sphere
from .vec_val_sum import vec_val_vect
from ..core.onetime import auto_attr
from .base import ReconstModel
from dipy.core.onetime import auto_attr
from dipy.reconst.base import ReconstModel


def _roll_evals(evals, axis=-1):
Expand Down Expand Up @@ -724,14 +724,17 @@ def __init__(self, gtab, fit_method="WLS", *args, **kwargs):

Note
-----
In order to increase speed of processing, tensor fitting is done simultaneously
over many voxels. Many fit_methods use the 'step' parameter to set the number of
voxels that will be fit at once in each iteration. This is the chunk size as a
number of voxels. A larger step value should speed things up, but it will also
take up more memory. It is advisable to keep an eye on memory consumption as
this value is increased.

Example : In :func:`iter_fit_tensor` we have a default step value of 1e4
In order to increase speed of processing, tensor fitting is done
simultaneously over many voxels. Many fit_methods use the 'step'
parameter to set the number of voxels that will be fit at once in each
iteration. This is the chunk size as a number of voxels. A larger step
value should speed things up, but it will also take up more memory. It
is advisable to keep an eye on memory consumption as this value is
increased.

Example : In :func:`iter_fit_tensor` we have a default step value
of 1e4

References
----------
Expand Down Expand Up @@ -804,7 +807,17 @@ def fit(self, data, mask=None):
dti_params = np.zeros(data.shape[:-1] + (12,))
dti_params[mask, :] = params_in_mask

return TensorFit(self, dti_params)
evecs = dti_params[..., 3:12].reshape((dti_params.shape[:-1] + (3, 3)))
evals = dti_params[..., :3]
qform = vec_val_vect(evecs, evals)

sphere = Sphere(xyz=self.gtab.bvecs[~self.gtab.b0s_mask])
ADC = apparent_diffusion_coef(qform, sphere)

S0_hat = np.mean(data[..., ~self.gtab.b0s_mask] /
np.exp(-self.gtab.bvals[~self.gtab.b0s_mask] * ADC),
-1)
return TensorFit(self, dti_params, S0_hat=S0_hat)

def predict(self, dti_params, S0=1):
"""
Expand All @@ -825,11 +838,12 @@ def predict(self, dti_params, S0=1):

class TensorFit(object):

def __init__(self, model, model_params):
def __init__(self, model, model_params, S0_hat=None):
""" Initialize a TensorFit class instance.
"""
self.model = model
self.model_params = model_params
self.S0_hat = S0_hat

def __getitem__(self, index):
model_params = self.model_params
Expand Down Expand Up @@ -1132,7 +1146,7 @@ def adc(self, sphere):
"""
return apparent_diffusion_coef(self.quadratic_form, sphere)

def predict(self, gtab, S0=1, step=None):
def predict(self, gtab, S0=None, step=None):
r"""
Given a model fit, predict the signal on the vertices of a sphere

Expand All @@ -1146,13 +1160,16 @@ def predict(self, gtab, S0=1, step=None):
all voxels.

step : int
The chunk size as a number of voxels. Optional parameter with default value 10,000.

In order to increase speed of processing, tensor fitting is done simultaneously
over many voxels. This parameter sets the number of voxels that will be fit at
once in each iteration. A larger step value should speed things up, but it will
also take up more memory. It is advisable to keep an eye on memory consumption
as this value is increased.
The chunk size as a number of voxels. Optional parameter with
default value 10,000.

In order to increase speed of processing, tensor fitting is done
simultaneously over many voxels. This parameter sets the number of
voxels that will be fit at once in each iteration. A larger step
value should speed things up, but it will also take up more memory.
It is advisable to keep an eye on memory consumption as this value
is increased.

Notes
-----
Expand All @@ -1170,6 +1187,9 @@ def predict(self, gtab, S0=1, step=None):
which a signal is to be predicted and $b$ is the b value provided in
the GradientTable input for that direction
"""
if S0 is None:
S0 = self.S0_hat

shape = self.model_params.shape[:-1]
size = np.prod(shape)
if step is None:
Expand Down Expand Up @@ -1203,13 +1223,17 @@ def iter_fit_tensor(step=1e4):
Parameters
----------
step : int
The chunk size as a number of voxels. Optional parameter with default value 10,000.

In order to increase speed of processing, tensor fitting is done simultaneously
over many voxels. This parameter sets the number of voxels that will be fit at
once in each iteration. A larger step value should speed things up, but it will
also take up more memory. It is advisable to keep an eye on memory consumption
as this value is increased.
The chunk size as a number of voxels. Optional parameter with default
value 10,000.

In order to increase speed of processing, tensor fitting is done
simultaneously over many voxels. This parameter sets the number of
voxels that will be fit at once in each iteration. A larger step value
should speed things up, but it will also take up more memory. It is
advisable to keep an eye on memory consumption as this value is
increased.

"""

def iter_decorator(fit_tensor):
Expand Down Expand Up @@ -1252,7 +1276,8 @@ def wrapped_fit_tensor(design_matrix, data, step=step,
data = data.reshape(-1, data.shape[-1])
dtiparams = np.empty((size, 12), dtype=np.float64)
for i in range(0, size, step):
dtiparams[i:i + step] = fit_tensor(design_matrix, data[i:i + step],
dtiparams[i:i + step] = fit_tensor(design_matrix,
data[i:i + step],
*args, **kwargs)
return dtiparams.reshape(shape + (12, ))

Expand Down
10 changes: 10 additions & 0 deletions dipy/reconst/tests/test_dti.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def test_tensor_model():
assert_true(tensor_fit.model is tensor_model)
assert_equal(tensor_fit.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_fit.evals[0], evals)
assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
# Test that the eigenvectors are correct, one-by-one:
for i in range(3):
# Eigenvectors have intrinsic sign ambiguity
Expand Down Expand Up @@ -347,6 +348,7 @@ def test_wls_and_ls_fit():
err_msg="Calculation of tensor from Y does not "
"compare to analytical solution")
assert_almost_equal(tensor_est.md[0], md)
assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

# Test that we can fit a single voxel's worth of data (a 1d array)
y = Y[0]
Expand Down Expand Up @@ -661,6 +663,7 @@ def test_predict():
dm = dti.TensorModel(gtab, 'LS')
dmfit = dm.fit(S)
assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)
assert_array_almost_equal(dmfit.predict(gtab), S)
assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S)

fdata, fbvals, fbvecs = get_data()
Expand All @@ -673,6 +676,9 @@ def test_predict():
S0 = np.mean(data[..., gtab.b0s_mask], -1)
p = dtif.predict(gtab, S0)
assert_equal(p.shape, data.shape)
# Predict using S0_hat:
p = dtif.predict(gtab)
assert_equal(p.shape, data.shape)

# Use a smaller step in predicting:

Expand All @@ -688,6 +694,10 @@ def test_predict():
# Assign the step through kwarg:
p = dtif.predict(gtab, S0, step=1)
assert_equal(p.shape, data.shape)
# And without S0:
p = dtif.predict(gtab, step=1)
assert_equal(p.shape, data.shape)


def test_eig_from_lo_tri():
psphere = get_sphere('symmetric362')
Expand Down