Skip to content

Commit

Permalink
dwi2tensor improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
bjeurissen committed Sep 17, 2015
1 parent 8975fa8 commit 7faeb67
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 48 deletions.
95 changes: 59 additions & 36 deletions cmd/dwi2tensor.cpp
Expand Up @@ -32,7 +32,7 @@ using namespace App;
using namespace std;

typedef float value_type;
constexpr int default_iter = 2;
const int default_iter = 2;

void usage ()
{
Expand All @@ -41,12 +41,14 @@ void usage ()
+ Argument ("dt", "the output dt image.").type_image_out ();

OPTIONS
+ Option ("iter","number of iterative reweightings (default: 2); set to 0 for ordinary linear least squares.")
+ Argument ("iter").type_integer (0, default_iter, 10)
+ Option ("b0", "the output b0 image.")
+ Argument ("image").type_image_out()
+ Option ("mask", "only perform computation within the specified binary brain mask image.")
+ Argument ("image").type_image_in()
+ Option ("b0", "the output b0 image.")
+ Argument ("image").type_image_out()
+ Option ("dkt", "the output dkt image.")
+ Argument ("image").type_image_out()
+ Option ("iter","number of iterative reweightings (default: 2); set to 0 for ordinary linear least squares.")
+ Argument ("integer").type_integer (0, default_iter, 10)
+ DWI::GradImportOptions();

AUTHOR = "Ben Jeurissen (ben.jeurissen@uantwerpen.be)";
Expand All @@ -56,119 +58,140 @@ void usage ()
"There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.";

DESCRIPTION
+ "Diffusion tensor estimation using iteratively reweighted linear least squares estimator.";
+ "Diffusion (kurtosis) tensor estimation using iteratively reweighted linear least squares estimator.";

REFERENCES
+ "Veraart, J.; Sijbers, J.; Sunaert, S.; Leemans, A. & Jeurissen, B. "
"Weighted linear least squares estimation of diffusion MRI parameters: strengths, limitations, and pitfalls. "
"NeuroImage, 2013, 81, 335-346";
}

template <class MASKType, class B0Type>
template <class MASKType, class B0Type, class DKTType>
class Processor
{
public:
Processor (const Eigen::MatrixXd& b, const int& iter, MASKType* mask_image, B0Type* b0_image) :
Processor (const Eigen::MatrixXd& b, const int& iter, MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image) :
mask_image (mask_image),
b0_image (b0_image),
dkt_image (dkt_image),
dwi(b.rows()),
logdwi(b.rows()),
p(7),
w(b.rows(),b.rows()),
p(b.cols()),
w(b.rows()),
work(b.cols(),b.cols()),
llt(work.rows()),
b(b),
maxit(iter) { }

template <class DWIType, class DTType>
void operator() (DWIType& dwi_image, DTType& dt_image)
{
/* check mask */
if (mask_image) {
assign_pos_of (dwi_image, 0, 3).to (*mask_image);
if (!mask_image->value())
return;
}

/* input dwi */
for (auto l = Loop (3) (dwi_image); l; ++l)
dwi[dwi_image.index(3)] = dwi_image.value();

/* avoid issues with dwi <= 0.0 */
double small_intensity = 1.0e-6 * dwi.maxCoeff();
for (int i = 0; i < dwi.rows(); i++)
if (dwi[i] < small_intensity)
dwi[i] = small_intensity;

/* log dwi fit */
logdwi = dwi.array().log().matrix();
logdwi = dwi.array().log();
p = b.colPivHouseholderQr().solve(logdwi);

/* weighted linear least squares fit with iterative weights */
for (int it = 0; it < maxit; it++) {
w = (b*p).array().exp().matrix().asDiagonal();
p = (w*b).colPivHouseholderQr().solve(w*logdwi);
w = (b*p).array().exp();
p = (w.asDiagonal()*b).colPivHouseholderQr().solve(w.asDiagonal()*logdwi);
}

/* output b0 */
/*work.setZero();
work.selfadjointView<Eigen::Lower>().rankUpdate (b.transpose());
p = llt.compute (work.selfadjointView<Eigen::Lower>()).solve(b.transpose()*logdwi);
for (int it = 0; it < maxit; it++) {
w = (b*p).array().exp();
work.setZero();
work.selfadjointView<Eigen::Lower>().rankUpdate (b.transpose()*w.asDiagonal());
p = llt.compute (work.selfadjointView<Eigen::Lower>()).solve(b.transpose()*w.asDiagonal()*logdwi);
}*/

if (b0_image) {
assign_pos_of (dwi_image, 0, 3).to (*b0_image);
b0_image->value() = exp(p[6]);
}

/* output dt */
for (auto l = Loop(3)(dt_image); l; ++l)
assign_pos_of (dwi_image, 0, 3).to (dt_image);
for (auto l = Loop(3)(dt_image); l; ++l) {
dt_image.value() = p[dt_image.index(3)];
}

if (dkt_image) {
assign_pos_of (dwi_image, 0, 3).to (*dkt_image);
double adc_sq = (p[1]+p[2]+p[3])*(p[1]+p[2]+p[3])/9.0;
for (auto l = Loop(3)(*dkt_image); l; ++l) {
dkt_image->value() = p[dkt_image->index(3)+7]/adc_sq;
}
}
}

private:
copy_ptr<MASKType> mask_image;
copy_ptr<B0Type> b0_image;
copy_ptr<DKTType> dkt_image;
Eigen::VectorXd dwi;
Eigen::VectorXd logdwi;
Eigen::VectorXd p;
Eigen::MatrixXd w;
Eigen::VectorXd w;
Eigen::MatrixXd work;
Eigen::LLT<Eigen::MatrixXd> llt;
const Eigen::MatrixXd b;
const int maxit;
};


template <class MASKType, class B0Type>
inline Processor<MASKType, B0Type> processor (const Eigen::MatrixXd& b, const int& iter, MASKType* mask_image, B0Type* b0_image) {
return { b, iter, mask_image, b0_image };
template <class MASKType, class B0Type, class DKTType>
inline Processor<MASKType, B0Type, DKTType> processor (const Eigen::MatrixXd& b, const int& iter, MASKType* mask_image, B0Type* b0_image, DKTType* dkt_image) {
return { b, iter, mask_image, b0_image, dkt_image };
}




void run ()
{
auto dwi = Header::open (argument[0]).get_image<value_type>();
auto grad = DWI::get_valid_DW_scheme (dwi.header());
Eigen::MatrixXd b = -DWI::grad2bmatrix<double> (grad);


Image<bool>* mask = nullptr;
auto opt = get_options ("mask");
if (opt.size()) {
mask = new Image<bool> (Image<bool>::open (opt[0][0]));
check_dimensions (dwi, *mask, 0, 3);
}

int iter = get_option_value ("iter", default_iter);
auto iter = get_option_value ("iter", default_iter);

auto header = dwi.header();
header.datatype() = DataType::Float32;
header.set_ndim (4);
header.size(3) = 6;
auto dt = Header::create (argument[1], header).get_image<value_type>();


Image<value_type>* b0 = nullptr;
opt = get_options ("b0");
if (opt.size()) {
header.set_ndim (3);
b0 = new Image<value_type> (Image<value_type>::create (opt[0][0], header));
}

Image<value_type>* dkt = nullptr;
opt = get_options ("dkt");
if (opt.size()) {
header.set_ndim (4);
header.size(3) = 15;
dkt = new Image<value_type> (Image<value_type>::create (opt[0][0], header));
}

// TODO: fix crash if mask or b0 are not valid() (i.e. when those options are not supplied)
ThreadedLoop("computing tensors...", dwi, 0, 3).run (processor (b, iter, mask, b0), dwi, dt);
Eigen::MatrixXd b = -DWI::grad2bmatrix<double> (grad, opt.size()>0);

ThreadedLoop("computing tensors...", dwi, 0, 3).run (processor (b, iter, mask, b0, dkt), dwi, dt);
}

38 changes: 26 additions & 12 deletions src/dwi/tensor.h
Expand Up @@ -31,24 +31,38 @@ namespace MR
{

template <typename T, class MatrixType>
inline Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> grad2bmatrix (const MatrixType& grad)
inline Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> grad2bmatrix (const MatrixType& grad, bool dki = false)
{
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> bmat (grad.rows(),7);
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> bmat (grad.rows(),dki ? 22 : 7);
for (ssize_t i = 0; i < grad.rows(); ++i) {
bmat (i,0) = grad (i,3) * grad (i,0) *grad (i,0);
bmat (i,1) = grad (i,3) * grad (i,1) *grad (i,1);
bmat (i,2) = grad (i,3) * grad (i,2) *grad (i,2);
bmat (i,3) = grad (i,3) * 2*grad (i,0) *grad (i,1);
bmat (i,4) = grad (i,3) * 2*grad (i,0) *grad (i,2);
bmat (i,5) = grad (i,3) * 2*grad (i,1) *grad (i,2);
bmat (i,6) = -1.0;
bmat (i,0) = grad(i,3) * grad(i,0) * grad(i,0);
bmat (i,1) = grad(i,3) * grad(i,1) * grad(i,1);
bmat (i,2) = grad(i,3) * grad(i,2) * grad(i,2);
bmat (i,3) = grad(i,3) * grad(i,0) * grad(i,1) * 2;
bmat (i,4) = grad(i,3) * grad(i,0) * grad(i,2) * 2;
bmat (i,5) = grad(i,3) * grad(i,1) * grad(i,2) * 2;
bmat (i,6) = -1.0;
if (dki) {
bmat (i,7) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,0) * grad(i,0) * 1/6;
bmat (i,8) = -grad(i,3) * grad(i,3) * grad(i,1) * grad(i,1) * grad(i,1) * grad(i,1) * 1/6;
bmat (i,9) = -grad(i,3) * grad(i,3) * grad(i,2) * grad(i,2) * grad(i,2) * grad(i,2) * 1/6;
bmat (i,10) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,0) * grad(i,1) * 2/3;
bmat (i,11) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,0) * grad(i,2) * 2/3;
bmat (i,12) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,1) * grad(i,1) * grad(i,1) * 2/3;
bmat (i,13) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,2) * grad(i,2) * grad(i,2) * 2/3;
bmat (i,14) = -grad(i,3) * grad(i,3) * grad(i,1) * grad(i,1) * grad(i,1) * grad(i,2) * 2/3;
bmat (i,15) = -grad(i,3) * grad(i,3) * grad(i,1) * grad(i,2) * grad(i,2) * grad(i,2) * 2/3;
bmat (i,16) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,1) * grad(i,1);
bmat (i,17) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,2) * grad(i,2);
bmat (i,18) = -grad(i,3) * grad(i,3) * grad(i,1) * grad(i,1) * grad(i,2) * grad(i,2);
bmat (i,19) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,0) * grad(i,1) * grad(i,2) * 2;
bmat (i,20) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,1) * grad(i,1) * grad(i,2) * 2;
bmat (i,21) = -grad(i,3) * grad(i,3) * grad(i,0) * grad(i,1) * grad(i,2) * grad(i,2) * 2;
}
}
return bmat;
}




template <class MatrixType, class VectorTypeOut, class VectorTypeIn>
inline void dwi2tensor (VectorTypeOut& dt, const MatrixType& binv, VectorTypeIn& dwi)
{
Expand Down

3 comments on commit 7faeb67

@Lestropie
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this is in the process of modification: Thoughts on #17? Independently of any GUI-related possibilities, it makes sense given the unweighted signal is part of the fit that it should be included in the output data rather than separate.

@jdtournier
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well it's certainly feasible, as you say it's part of the fit. But I'm not convinced it follows that it should therefore be part of the same output image. It is different in kind to the DT coefficients (for one, you need to take its exponential before storing), and is very unlikely to be used as part of the same processing chain. I'd expect people might want to use the b=0 output as input one thing, and the DT for another. Having them be part of the same image forces an additional file split for these cases. So I personally think it would add more confusion than anything...

The other point (although a bit speculative at this stage) is that we were talking with Ben about whether the WLLS fit should be used in the tracking also. We thought the best thing to do would actually be to allow using the DT images directly, and implement proper geodesic interpolation, which would actually be pretty simple (and faster than the current approach). That would then allow any DT image to be used, regardless of how it was produced. It would be convenient to keep the exact same command-line usage as currently, which means we'd need to be able to differentiate between DT and DWI images. Currently this wouldn't be a problem since a 6 volume image can only be a DT image (you'd need at least 7 DWIs to estimate the tensor). Obviously this assumption would no longer be guaranteed if we add an extra volume to the DT image.

On the other hand, I'm not sure the argument above is necessarily going to hold forever: we might eventually remove the ability to track on the DWI directly from tckgen, since it currently only implements the most brain-dead approach - and it's not even the same algorithm as what's used by default in dwi2tensor...

But regardless, I think my main argument against this is that in practice, people will want to use them separately.

@Lestropie
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Curious to know others' thoughts, or how it's handled in other packages. If other software requires both, outputting the fit with both makes more sense than outputting separate images and having to concatenate; whereas wanting just S0 from the tensor fit is a volume split not entirely dissimilar to pulling b=0 images out of a DWI series, which many users will be accustomed to. But if other applications expect 6 volumes, it's best left as-is.

Having the S0 encompassed in the image would also be of assistance if a tensor overlay were implemented in mrview, allowing more options for glyph scaling. But again, speculative.

... whether the WLLS fit should be used in the tracking also. We thought the best thing to do would actually be to allow using the DT images directly, and implement proper geodesic interpolation...

Agreed.

It would be convenient to keep the exact same command-line usage as currently, which means we'd need to be able to differentiate between DT and DWI images.

Well that assumes you want to be able to support tensor tracking on both pre-calculated tensor estimates and on the raw DWIs, which to me would seem unusual. And even then, you could just check for a valid DW gradient scheme. My earlier thinking was the other way around: If an application is loading 'orientation data' and gets an image with 6 volumes, it doesn't know whether it's a tensor volume or lmax=2.

On the other hand, I'm not sure the argument above is necessarily going to hold forever...

... Oh. You said that already. :-/

Please sign in to comment.