Skip to content

Commit

Permalink
Stop using ndarray::Array::asEigen
Browse files Browse the repository at this point in the history
thus stop using ndarray::EigenView in C++ code
(though it is still indirectly used in pybind11 wrappers)
  • Loading branch information
r-owen committed Jun 13, 2018
1 parent 9a367b1 commit 9777421
Show file tree
Hide file tree
Showing 12 changed files with 133 additions and 119 deletions.
4 changes: 2 additions & 2 deletions python/lsst/meas/modelfit/mixture.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,12 @@ static PyMixture declareMixture(py::module &mod) {
cls.def("evaluate",
[](Mixture const &self, MixtureComponent const &component,
ndarray::Array<Scalar, 1, 0> const &array) -> Scalar {
return self.evaluate(component, array.asEigen());
return self.evaluate(component, ndarray::asEigenMatrix(array));
},
"component"_a, "x"_a);
cls.def("evaluate",
[](Mixture const &self, ndarray::Array<Scalar, 1, 0> const &array) -> Scalar {
return self.evaluate(array.asEigen());
return self.evaluate(ndarray::asEigenMatrix(array));
},
"x"_a);
cls.def("evaluate", (void (Mixture::*)(ndarray::Array<Scalar const, 2, 1> const &,
Expand Down
35 changes: 19 additions & 16 deletions src/CModel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ Pixel computeFluxInFootprint(
// We're only using the flux to provide a scale for the problem that eases some numerical problems,
// so for objects with SNR < 1, it's probably better to use the RMS than the flux, since the latter
// can be negative.
Pixel a = flat.asEigen<Eigen::ArrayXpr>().sum();
Pixel b = std::sqrt(flat.asEigen<Eigen::ArrayXpr>().square().sum());
Pixel a = ndarray::asEigenArray(flat).sum();
Pixel b = std::sqrt(ndarray::asEigenArray(flat).square().sum());
return std::max(a, b);
}

Expand Down Expand Up @@ -630,9 +630,9 @@ struct WeightSums {
ndarray::Array<Pixel const,1,1> const & data,
ndarray::Array<Pixel const,1,1> const & variance
) {
auto modelEigen = model.asEigen<Eigen::ArrayXpr>();
auto dataEigen = data.asEigen<Eigen::ArrayXpr>();
auto varianceEigen = variance.asEigen<Eigen::ArrayXpr>();
auto modelEigen = ndarray::asEigenArray(model);
auto dataEigen = ndarray::asEigenArray(data);
auto varianceEigen = ndarray::asEigenArray(variance);
double w = modelEigen.sum();
double wd = (modelEigen*dataEigen).sum();
double ww = modelEigen.square().sum();
Expand Down Expand Up @@ -806,11 +806,11 @@ class CModelStageImpl {
result.likelihood->getUnweightedData()
);
data.amplitudes.deep() = lstsq.getSolution();
result.objective
= 0.5*(
result.likelihood->getUnweightedData().asEigen().cast<Scalar>()
- modelMatrix.asEigen().cast<Scalar>() * lstsq.getSolution().asEigen()
).squaredNorm();
result.objective =
0.5 * (ndarray::asEigenMatrix(result.likelihood->getUnweightedData()).cast<Scalar>() -
ndarray::asEigenMatrix(modelMatrix).cast<Scalar>() *
ndarray::asEigenMatrix(data.amplitudes))
.squaredNorm();

WeightSums sums(modelMatrix, result.likelihood->getUnweightedData(), result.likelihood->getVariance());

Expand Down Expand Up @@ -878,12 +878,15 @@ class CModelAlgorithm::Impl {
model, fixed, expData.fitSys, expData.position,
exposure, footprint, expData.psf, UnitTransformedLikelihoodControl(false)
);
ndarray::Array<Pixel,2,-1> modelMatrix = makeModelMatrix(likelihood, nonlinear);
Vector gradient = -(modelMatrix.asEigen().adjoint() *
likelihood.getUnweightedData().asEigen()).cast<Scalar>();
auto unweightedData = likelihood.getUnweightedData();
ndarray::Array<Pixel, 2, -1> modelMatrix = makeModelMatrix(likelihood, nonlinear);
Vector gradient = -(ndarray::asEigenMatrix(modelMatrix).adjoint() *
ndarray::asEigenMatrix(unweightedData))
.cast<Scalar>();
Matrix hessian = Matrix::Zero(likelihood.getAmplitudeDim(), likelihood.getAmplitudeDim());
hessian.selfadjointView<Eigen::Lower>().rankUpdate(modelMatrix.asEigen().adjoint().cast<Scalar>());
Scalar q0 = 0.5*likelihood.getUnweightedData().asEigen().squaredNorm();
hessian.selfadjointView<Eigen::Lower>().rankUpdate(
ndarray::asEigenMatrix(modelMatrix).adjoint().cast<Scalar>());
Scalar q0 = 0.5 * ndarray::asEigenMatrix(unweightedData).squaredNorm();

// Use truncated Gaussian to compute the maximum-likelihood amplitudes with the constraint
// that all amplitude must be >= 0
Expand All @@ -904,7 +907,7 @@ class CModelAlgorithm::Impl {
// on the two components, which means the actual uncertainty is neither Gaussian nor symmetric,
// which is a lot harder to compute and a lot harder to use.
ndarray::Array<Pixel,1,1> model = ndarray::allocate(likelihood.getDataDim());
model.asEigen() = modelMatrix.asEigen() * amplitudes.cast<Pixel>();
ndarray::asEigenMatrix(model) = ndarray::asEigenMatrix(modelMatrix) * amplitudes.cast<Pixel>();
WeightSums sums(model, likelihood.getUnweightedData(), likelihood.getVariance());
result.fluxInner = sums.fluxInner;
result.fluxSigma = std::sqrt(sums.fluxVar)*result.flux/result.fluxInner;
Expand Down
14 changes: 8 additions & 6 deletions src/DoubleShapeletPsfApprox.cc
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ class ProfileObjective : public OptimizerObjective {
Scalar oAlpha = parameters[1] * _normalization;
Scalar iR2 = parameters[2] * parameters[2];
Scalar oR2 = parameters[3] * parameters[3];
auto argEigen = _arg.asEigen<Eigen::ArrayXpr>();
residuals.asEigen<Eigen::ArrayXpr>() = _data.asEigen<Eigen::ArrayXpr>()
auto argEigen = ndarray::asEigenArray(_arg);
ndarray::asEigenArray(residuals) = ndarray::asEigenArray(_data)
- (iAlpha/iR2)*(argEigen/iR2).exp()
- (oAlpha/oR2)*(argEigen/oR2).exp();
}
Expand All @@ -293,8 +293,8 @@ class ProfileObjective : public OptimizerObjective {
ndarray::Array<Scalar const,1,1> const & parameters,
ndarray::Array<Scalar,2,-2> const & derivatives
) const {
auto d = derivatives.asEigen<Eigen::ArrayXpr>();
auto argEigen = _arg.asEigen<Eigen::ArrayXpr>();
auto d = ndarray::asEigenArray(derivatives);
auto argEigen = ndarray::asEigenArray(_arg);
Scalar iR2 = parameters[2] * parameters[2];
Scalar oR2 = parameters[3] * parameters[3];
d.col(0) = - (_normalization/iR2)*(argEigen/iR2).exp();
Expand Down Expand Up @@ -422,8 +422,10 @@ void DoubleShapeletPsfApproxAlgorithm::fitShapelets(
fitMatrix[ndarray::view()(n1 - 1, n1 + n2 - 2)] = outerMatrix[ndarray::view()(1, n2)];
}
// Subtract the zeroth-order model from the data.
data.asEigen() -= innerMatrix.asEigen().col(0)*innerComponent.getCoefficients()[0];
data.asEigen() -= outerMatrix.asEigen().col(0)*outerComponent.getCoefficients()[0];
ndarray::asEigenMatrix(data) -=
ndarray::asEigenMatrix(innerMatrix).col(0) * innerComponent.getCoefficients()[0];
ndarray::asEigenMatrix(data) -=
ndarray::asEigenMatrix(outerMatrix).col(0) * outerComponent.getCoefficients()[0];
// Finaly, we can do the fit, and stuff the fitted coefficients back into the result.
auto lstsq = afw::math::LeastSquares::fromDesignMatrix(fitMatrix, data);
if (n1 > 1) {
Expand Down
15 changes: 9 additions & 6 deletions src/GeneralPsfFitter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -292,10 +292,13 @@ class GeneralPsfFitterPrior : public Prior {
}
}
}
nonlinearHessian.asEigen().selfadjointView<Eigen::Lower>().rankUpdate(nonlinearGradient.asEigen());
nonlinearHessian.asEigen() = nonlinearHessian.asEigen().selfadjointView<Eigen::Lower>();
nonlinearGradient.asEigen() *= p;
nonlinearHessian.asEigen() *= p;
ndarray::asEigenMatrix(nonlinearHessian)
.selfadjointView<Eigen::Lower>()
.rankUpdate(ndarray::asEigenMatrix(nonlinearGradient));
ndarray::asEigenMatrix(nonlinearHessian) =
ndarray::asEigenMatrix(nonlinearHessian).selfadjointView<Eigen::Lower>();
ndarray::asEigenMatrix(nonlinearGradient) *= p;
ndarray::asEigenMatrix(nonlinearHessian) *= p;
amplitudeGradient.deep() = 0.0;
amplitudeHessian.deep() = 0.0;
crossHessian.deep() = 0.0;
Expand Down Expand Up @@ -377,7 +380,7 @@ GeneralPsfFitter::GeneralPsfFitter(GeneralPsfFitterControl const & ctrl) :
int dim = shapelet::computeSize(i->second.order);
PTR(shapelet::MultiShapeletBasis) basis = std::make_shared<shapelet::MultiShapeletBasis>(dim);
ndarray::Array<double,2,2> matrix = ndarray::allocate(dim, dim);
matrix.asEigen().setIdentity();
ndarray::asEigenMatrix(matrix).setIdentity();
basis->addComponent(1.0, i->second.order, matrix);
basisVector.push_back(basis);

Expand Down Expand Up @@ -631,7 +634,7 @@ class MultiShapeletPsfLikelihood::Impl {
_builders[i](modelMatrix[ndarray::view()(amplitudeOffset, amplitudeEnd)], _ellipses[i]);
amplitudeOffset = amplitudeEnd;
}
modelMatrix.asEigen() /= _sigma;
ndarray::asEigenMatrix(modelMatrix) /= _sigma;
}

private:
Expand Down
29 changes: 15 additions & 14 deletions src/Mixture.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ void Mixture::evaluate(
ndarray::Array<Scalar const,2,1>::Iterator ix = x.begin(), xEnd = x.end();
ndarray::Array<Scalar,1,0>::Iterator ip = p.begin();
for (; ix != xEnd; ++ix, ++ip) {
*ip = evaluate(ix->asEigen());
*ip = evaluate(ndarray::asEigenMatrix(*ix));
}
}

Expand Down Expand Up @@ -208,7 +208,7 @@ void Mixture::evaluateComponents(
for (; ix != xEnd; ++ix, ++ip) {
ndarray::Array<Scalar,2,1>::Reference::Iterator jp = ip->begin();
for (const_iterator j = begin(); j != end(); ++j, ++jp) {
*jp = evaluate(*j, ix->asEigen());
*jp = evaluate(*j, ndarray::asEigenMatrix(*ix));
}
}
}
Expand Down Expand Up @@ -242,7 +242,7 @@ void Mixture::evaluateDerivatives(
hessian.deep() = 0.0;
Eigen::MatrixXd sigmaInv(_dim, _dim);
for (ComponentList::const_iterator i = _components.begin(); i != _components.end(); ++i) {
_workspace = x.asEigen() - i->_mu;
_workspace = ndarray::asEigenMatrix(x) - i->_mu;
i->_sigmaLLT.matrixL().solveInPlace(_workspace);
Scalar z = _workspace.squaredNorm();
i->_sigmaLLT.matrixL().adjoint().solveInPlace(_workspace);
Expand All @@ -251,13 +251,14 @@ void Mixture::evaluateDerivatives(
i->_sigmaLLT.matrixL().adjoint().solveInPlace(sigmaInv);
Scalar f = _evaluate(z) / i->_sqrtDet;
if (_isGaussian) {
gradient.asEigen() += -i->weight * f * _workspace;
hessian.asEigen() += i->weight * f * (_workspace * _workspace.adjoint() - sigmaInv);
ndarray::asEigenMatrix(gradient) += -i->weight * f * _workspace;
ndarray::asEigenMatrix(hessian) += i->weight * f * (_workspace * _workspace.adjoint() - sigmaInv);
} else {
double v = (_dim + _df) / (_df + z);
double u = v*v*(1.0 + 2.0/(_dim + _df));
gradient.asEigen() += -i->weight * f * v * _workspace;
hessian.asEigen() += i->weight * f * (u * _workspace * _workspace.adjoint() - v * sigmaInv);
ndarray::asEigenMatrix(gradient) += -i->weight * f * v * _workspace;
ndarray::asEigenMatrix(hessian) +=
i->weight * f * (u * _workspace * _workspace.adjoint() - v * sigmaInv);
}
}
}
Expand All @@ -282,9 +283,9 @@ void Mixture::draw(afw::math::Random & rng, ndarray::Array<Scalar,2,1> const & x
_workspace[j] = rng.gaussian();
}
if (_isGaussian) {
ix->asEigen() = component._mu + (component._sigmaLLT.matrixL() * _workspace);
ndarray::asEigenMatrix(*ix) = component._mu + (component._sigmaLLT.matrixL() * _workspace);
} else {
ix->asEigen() = component._mu
ndarray::asEigenMatrix(*ix) = component._mu
+ std::sqrt(_df/rng.chisq(_df)) * (component._sigmaLLT.matrixL() * _workspace);
}
}
Expand Down Expand Up @@ -313,7 +314,7 @@ void Mixture::updateEM(
for (int i = 0; i < nSamples; ++i) {
Scalar pSum = 0.0;
for (int k = 0; k < nComponents; ++k) {
double z = _computeZ(_components[k], x[i].asEigen());
double z = _computeZ(_components[k], ndarray::asEigenMatrix(x[i]));
pSum += p(i, k) = _components[k].weight*_evaluate(z)/_components[k]._sqrtDet;
if (!_isGaussian) {
gamma(i, k) = (_df + _dim) / (_df + z);
Expand All @@ -326,11 +327,11 @@ void Mixture::updateEM(
double weight = _components[k].weight = p.col(k).sum();
Vector & mu = _components[k]._mu;
Matrix sigma = Matrix::Zero(_dim, _dim);
mu = (p.col(k).adjoint() * x.asEigen()) / weight;
mu = (p.col(k).adjoint() * ndarray::asEigenMatrix(x)) / weight;
restriction.restrictMu(mu);
Vector dx = Vector::Zero(_dim);
for (int i = 0; i < nSamples; ++i) {
dx = x[i].asEigen() - mu;
dx = ndarray::asEigenMatrix(x[i]) - mu;
sigma.selfadjointView<Eigen::Lower>().rankUpdate(dx, p(i, k));
}
sigma /= weight;
Expand All @@ -343,12 +344,12 @@ void Mixture::updateEM(
Vector & mu = _components[k]._mu;
Matrix sigma = Matrix::Zero(_dim, _dim);
mu =
((p.col(k).array() * gamma.col(k).array()).matrix().adjoint() * x.asEigen())
((p.col(k).array() * gamma.col(k).array()).matrix().adjoint() * ndarray::asEigenMatrix(x))
/ p.col(k).dot(gamma.col(k));
restriction.restrictMu(mu);
Vector dx = Vector::Zero(_dim);
for (int i = 0; i < nSamples; ++i) {
dx = x[i].asEigen() - mu;
dx = ndarray::asEigenMatrix(x[i]) - mu;
sigma.selfadjointView<Eigen::Lower>().rankUpdate(dx, gamma(i, k) * p(i, k));
}
sigma /= weight;
Expand Down
10 changes: 5 additions & 5 deletions src/MixturePrior.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Scalar MixturePrior::marginalize(
ndarray::Array<Scalar const,1,1> const & parameters
) const {
return TruncatedGaussian::fromSeriesParameters(0.0, gradient, hessian).getLogIntegral()
- std::log(_mixture->evaluate(parameters.asEigen()));
- std::log(_mixture->evaluate(ndarray::asEigenMatrix(parameters)));
}

Scalar MixturePrior::maximize(
Expand All @@ -51,18 +51,18 @@ Scalar MixturePrior::maximize(
ndarray::Array<Scalar,1,1> const & amplitudes
) const {
TruncatedGaussian tg = TruncatedGaussian::fromSeriesParameters(0.0, gradient, hessian);
amplitudes.asEigen() = tg.maximize();
return tg.evaluateLog()(amplitudes.asEigen());
ndarray::asEigenMatrix(amplitudes) = tg.maximize();
return tg.evaluateLog()(ndarray::asEigenMatrix(amplitudes));
}

Scalar MixturePrior::evaluate(
ndarray::Array<Scalar const,1,1> const & parameters,
ndarray::Array<Scalar const,1,1> const & amplitudes
) const {
if ((amplitudes.asEigen<Eigen::ArrayXpr>() < 0.0).any()) {
if ((ndarray::asEigenArray(amplitudes) < 0.0).any()) {
return 0.0;
} else {
return _mixture->evaluate(parameters.asEigen());
return _mixture->evaluate(ndarray::asEigenMatrix(parameters));
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/Model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ void Model::transformParameters(
i->transform(transform.geometric).inPlace();
}
readEllipses(ellipses.begin(), nonlinear.begin(), fixed.begin());
amplitudes.asEigen() *= transform.flux;
ndarray::asEigenMatrix(amplitudes) *= transform.flux;
}


Expand Down
8 changes: 4 additions & 4 deletions src/SemiEmpiricalPrior.cc
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ Scalar SemiEmpiricalPrior::evaluate(
ndarray::Array<Scalar const,1,1> const & nonlinear,
ndarray::Array<Scalar const,1,1> const & amplitudes
) const {
if ((amplitudes.asEigen<Eigen::ArrayXpr>() < 0.0).any()) {
if ((ndarray::asEigenArray(amplitudes) < 0.0).any()) {
return 0.0;
} else {
return _impl->eta.p(nonlinear[0], nonlinear[1]) * _impl->lnR.p(nonlinear[2]);
Expand All @@ -373,7 +373,7 @@ void SemiEmpiricalPrior::evaluateDerivatives(
amplitudeHessian.deep() = 0.0;
crossHessian.deep() = 0.0;

if ((amplitudes.asEigen<Eigen::ArrayXpr>() < 0.0).any()) {
if ((ndarray::asEigenArray(amplitudes) < 0.0).any()) {
return;
}

Expand Down Expand Up @@ -420,8 +420,8 @@ Scalar SemiEmpiricalPrior::maximize(
ndarray::Array<Scalar,1,1> const & amplitudes
) const {
TruncatedGaussian tg = TruncatedGaussian::fromSeriesParameters(0.0, gradient, hessian);
amplitudes.asEigen() = tg.maximize();
return tg.evaluateLog()(amplitudes.asEigen()) -
ndarray::asEigenMatrix(amplitudes) = tg.maximize();
return tg.evaluateLog()(ndarray::asEigenMatrix(amplitudes)) -
std::log(_impl->eta.p(nonlinear[0], nonlinear[1]) * _impl->lnR.p(nonlinear[2]));
}

Expand Down
8 changes: 4 additions & 4 deletions src/SoftenedLinearPrior.cc
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ Scalar SoftenedLinearPrior::evaluate(
ndarray::Array<Scalar const,1,1> const & nonlinear,
ndarray::Array<Scalar const,1,1> const & amplitudes
) const {
if ((amplitudes.asEigen<Eigen::ArrayXpr>() < 0.0).any()) {
if ((ndarray::asEigenArray(amplitudes) < 0.0).any()) {
return 0.0;
} else {
return _evaluate(nonlinear);
Expand All @@ -164,7 +164,7 @@ void SoftenedLinearPrior::evaluateDerivatives(
amplitudeHessian.deep() = 0.0;
crossHessian.deep() = 0.0;

if ((amplitudes.asEigen<Eigen::ArrayXpr>() < 0.0).any()) {
if ((ndarray::asEigenArray(amplitudes) < 0.0).any()) {
return;
}

Expand Down Expand Up @@ -233,8 +233,8 @@ Scalar SoftenedLinearPrior::maximize(
ndarray::Array<Scalar,1,1> const & amplitudes
) const {
TruncatedGaussian tg = TruncatedGaussian::fromSeriesParameters(0.0, gradient, hessian);
amplitudes.asEigen() = tg.maximize();
return tg.evaluateLog()(amplitudes.asEigen()) - std::log(_evaluate(nonlinear));
ndarray::asEigenMatrix(amplitudes) = tg.maximize();
return tg.evaluateLog()(ndarray::asEigenMatrix(amplitudes)) - std::log(_evaluate(nonlinear));
}

void SoftenedLinearPrior::drawAmplitudes(
Expand Down

0 comments on commit 9777421

Please sign in to comment.