Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-13671 add option to skip rank update #80

Merged
merged 5 commits into from
May 20, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 8 additions & 6 deletions include/lsst/jointcal/Eigenstuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
#ifndef LSST_JOINTCAL_EIGENSTUFF_H
#define LSST_JOINTCAL_EIGENSTUFF_H

#include "Eigen/CholmodSupport" // to switch to cholmod
#include "lsst/pex/exceptions.h"

#include "Eigen/CholmodSupport" // to switch to cholmod
#include "Eigen/Core"

typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatrixX2d;

typedef Eigen::SparseMatrix<double> SpMat;
typedef Eigen::SparseMatrix<double> SparseMatrixD;

/* Cholesky factorization class using cholmod, with the small-rank update capability.
*
Expand Down Expand Up @@ -38,7 +39,7 @@ class CholmodSimplicialLDLT2
}

// this routine is the one we added
int update(SpMat const &H, bool UpOrDown) {
void update(SparseMatrixD const &H, bool UpOrDown) {
// check size
Index const size = Base::m_cholmodFactor->n;
EIGEN_UNUSED_VARIABLE(size);
Expand All @@ -51,10 +52,11 @@ class CholmodSimplicialLDLT2
cholmod_submatrix(&C_cs, (int *)Base::m_cholmodFactor->Perm, Base::m_cholmodFactor->n,
nullptr, -1, true, true, &this->cholmod());
assert(C_cs_perm);
int ret = cholmod_updown(UpOrDown, C_cs_perm, Base::m_cholmodFactor, &this->cholmod());
int isOk = cholmod_updown(UpOrDown, C_cs_perm, Base::m_cholmodFactor, &this->cholmod());
cholmod_free_sparse(&C_cs_perm, &this->cholmod());
assert(ret != 0);
return ret;
if (!isOk) {
throw(LSST_EXCEPT(lsst::pex::exceptions::RuntimeError, "cholmod_update failed!"));
}
}

protected:
Expand Down
6 changes: 5 additions & 1 deletion include/lsst/jointcal/FitterBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ class FitterBase {
* @param[in] whatToFit See child method assignIndices for valid string values.
* @param[in] nSigmaCut How many sigma to reject outliers at. Outlier
* rejection ignored for nSigmaCut=0.
* @param[in] doRankUpdate Use CholmodSimplicialLDLT2.update() to do a fast rank update after outlier
* removal; otherwise do a slower full recomputation of the matrix.
* Only matters if nSigmaCut != 0.
*
* @return Return code describing success/failure of fit.
*
Expand All @@ -59,7 +62,8 @@ class FitterBase {
* the second run with the same "whatToFit" will produce no change in
* the fitted parameters, if the calculations and indices are defined correctly.
*/
MinimizeResult minimize(std::string const &whatToFit, double nSigmaCut = 0);
MinimizeResult minimize(std::string const &whatToFit, double const nSigmaCut = 0,
bool const doRankUpdate = true);

/**
* Returns the chi2 for the current state.
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/jointcal/fitter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ namespace {
void declareFitterBase(py::module &mod) {
py::class_<FitterBase, std::shared_ptr<FitterBase>> cls(mod, "FitterBase");

cls.def("minimize", &FitterBase::minimize, "whatToFit"_a, "nSigRejCut"_a = 0);
cls.def("minimize", &FitterBase::minimize, "whatToFit"_a, "nSigRejCut"_a = 0, "doRankUpdate"_a = true);
cls.def("computeChi2", &FitterBase::computeChi2);
cls.def("saveChi2Contributions", &FitterBase::saveChi2Contributions);
}
Expand Down
57 changes: 49 additions & 8 deletions python/lsst/jointcal/jointcal.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,33 @@ class JointcalConfig(pexConfig.Config):
dtype=int,
default=7,
)
photometryDoRankUpdate = pexConfig.Field(
doc="Do the rank update step during minimization. "
"Skipping this can help deal with models that are too non-linear.",
dtype=bool,
default=True,
)
astrometryDoRankUpdate = pexConfig.Field(
doc="Do the rank update step during minimization (should not change the astrometry fit). "
"Skipping this can help deal with models that are too non-linear.",
dtype=bool,
default=True,
)
outlierRejectSigma = pexConfig.Field(
doc="How many sigma to reject outliers at during minimization.",
dtype=float,
default=5.0,
)
maxPhotometrySteps = pexConfig.Field(
doc="Maximum number of minimize iterations to take when fitting photometry.",
dtype=int,
default=20,
)
maxAstrometrySteps = pexConfig.Field(
doc="Maximum number of minimize iterations to take when fitting photometry.",
dtype=int,
default=20,
)
astrometryRefObjLoader = pexConfig.ConfigurableField(
target=LoadIndexedReferenceObjectsTask,
doc="Reference object loader for astrometric fit",
Expand Down Expand Up @@ -596,7 +623,13 @@ def _fit_photometry(self, associations, dataName=None):
model.freezeErrorTransform()
self.log.debug("Photometry error scales are frozen.")

chi2 = self._iterate_fit(associations, fit, model, 20, "photometry", "Model Fluxes")
chi2 = self._iterate_fit(associations,
fit,
model,
self.config.maxPhotometrySteps,
"photometry",
"Model Fluxes",
doRankUpdate=self.config.photometryDoRankUpdate)

add_measurement(self.job, 'jointcal.photometry_final_chi2', chi2.chi2)
add_measurement(self.job, 'jointcal.photometry_final_ndof', chi2.ndof)
Expand Down Expand Up @@ -676,7 +709,13 @@ def _fit_astrometry(self, associations, dataName=None):
raise FloatingPointError('Pre-iteration chi2 is invalid: %s'%chi2)
self.log.info("Fit prepared with %s", str(chi2))

chi2 = self._iterate_fit(associations, fit, model, 20, "astrometry", "Distortions Positions")
chi2 = self._iterate_fit(associations,
fit,
model,
self.config.maxAstrometrySteps,
"astrometry",
"Distortions Positions",
doRankUpdate=self.config.astrometryDoRankUpdate)

add_measurement(self.job, 'jointcal.astrometry_final_chi2', chi2.chi2)
add_measurement(self.job, 'jointcal.astrometry_final_ndof', chi2.ndof)
Expand All @@ -696,21 +735,23 @@ def _check_stars(self, associations):
self.log.warn("ccdImage %s has only %s RefStars (desired %s)",
ccdImage.getName(), nRefStars, self.config.minRefStarsPerCcd)

def _iterate_fit(self, associations, fit, model, max_steps, name, whatToFit):
def _iterate_fit(self, associations, fit, model, max_steps, name, whatToFit, doRankUpdate=True):
"""Run fit.minimize up to max_steps times, returning the final chi2."""

for i in range(max_steps):
r = fit.minimize(whatToFit, 5) # outlier removal at 5 sigma.
# outlier removal at 5 sigma.
r = fit.minimize(whatToFit, self.config.outlierRejectSigma, doRankUpdate=doRankUpdate)
chi2 = fit.computeChi2()
self._check_stars(associations)
if not np.isfinite(chi2.chi2):
raise FloatingPointError('Fit iteration chi2 is invalid: %s'%chi2)
self.log.info(str(chi2))
if r == MinimizeResult.Converged:
self.log.debug("fit has converged - no more outliers - redo minimization "
"one more time in case we have lost accuracy in rank update.")
# Redo minimization one more time in case we have lost accuracy in rank update
r = fit.minimize(whatToFit, 5) # outliers removal at 5 sigma.
if doRankUpdate:
self.log.debug("fit has converged - no more outliers - redo minimization "
"one more time in case we have lost accuracy in rank update.")
# Redo minimization one more time in case we have lost accuracy in rank update
r = fit.minimize(whatToFit, 5) # outliers removal at 5 sigma.
chi2 = fit.computeChi2()
self.log.info("Fit completed with: %s", str(chi2))
break
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/jointcal/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ def wcs_convert(xv, yv, wcs):
yout = np.zeros((xv.shape[0], yv.shape[0]))
for i, x in enumerate(xv):
for j, y in enumerate(yv):
sky = wcs.pixelToSky(x, y).toFk5()
sky = wcs.pixelToSky(x, y)
xout[i, j] = sky.getRa()
yout[i, j] = sky.getDec()
return xout, yout
Expand Down
8 changes: 4 additions & 4 deletions src/AstrometryFit.cc
Original file line number Diff line number Diff line change
Expand Up @@ -492,15 +492,15 @@ void AstrometryFit::offsetParams(Eigen::VectorXd const &delta) {

// should not be too large !
#ifdef STORAGE
static void write_sparse_matrix_in_fits(SpMat const &mat, std::string const &fitsName) {
static void write_sparse_matrix_in_fits(SparseMatrixD const &mat, std::string const &fitsName) {
if (mat.rows() * mat.cols() > 2e8) {
LOGLS_WARN(_log,
"write_sparse_matrix_in_fits: yout matrix is too large. " << fitsName << " not generated");
return;
}
Mat m(mat.rows(), mat.cols());
for (int k = 0; k < mat.outerSize(); ++k)
for (SpMat::InnerIterator it(mat, k); it; ++it) {
for (SparseMatrixD::InnerIterator it(mat, k); it; ++it) {
m(it.row(), it.col()) = it.value();
}
m.writeFits(fitsName);
Expand Down Expand Up @@ -532,9 +532,9 @@ void AstrometryFit::checkStuff() {
Eigen::VectorXd grad(_nParTot);
grad.setZero();
leastSquareDerivatives(tripletList, grad);
SpMat jacobian(_nParTot, tripletList.getNextFreeIndex());
SparseMatrixD jacobian(_nParTot, tripletList.getNextFreeIndex());
jacobian.setFromTriplets(tripletList.begin(), tripletList.end());
SpMat hessian = jacobian * jacobian.transpose();
SparseMatrixD hessian = jacobian * jacobian.transpose();
#ifdef STORAGE
char name[24];
sprintf(name, "H%d.fits", k);
Expand Down
65 changes: 45 additions & 20 deletions src/FitterBase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,16 @@ unsigned FitterBase::findOutliers(double nSigmaCut, MeasuredStarList &msOutliers
return nOutliers;
}

MinimizeResult FitterBase::minimize(std::string const &whatToFit, double nSigmaCut) {
namespace {
/// Return a Hessian matrix filled from tripletList of size nParTot x nParTot.
SparseMatrixD createHessian(int nParTot, TripletList const &tripletList) {
SparseMatrixD jacobian(nParTot, tripletList.getNextFreeIndex());
jacobian.setFromTriplets(tripletList.begin(), tripletList.end());
return jacobian * jacobian.transpose();
}
} // namespace

MinimizeResult FitterBase::minimize(std::string const &whatToFit, double nSigmaCut, bool doRankUpdate) {
assignIndices(whatToFit);

MinimizeResult returnCode = MinimizeResult::Converged;
Expand All @@ -133,20 +142,14 @@ MinimizeResult FitterBase::minimize(std::string const &whatToFit, double nSigmaC

LOGLS_DEBUG(_log, "End of triplet filling, ntrip = " << tripletList.size());

SpMat hessian;
{
SpMat jacobian(_nParTot, tripletList.getNextFreeIndex());
jacobian.setFromTriplets(tripletList.begin(), tripletList.end());
// release memory shrink_to_fit is C++11
tripletList.clear(); // tripletList.shrink_to_fit();
hessian = jacobian * jacobian.transpose();
} // release the Jacobian
SparseMatrixD hessian = createHessian(_nParTot, tripletList);
tripletList.clear(); // we don't need it any more after we have the hessian.

LOGLS_DEBUG(_log, "Starting factorization, hessian: dim="
<< hessian.rows() << " non-zeros=" << hessian.nonZeros()
<< " filling-frac = " << hessian.nonZeros() / std::pow(hessian.rows(), 2));

CholmodSimplicialLDLT2<SpMat> chol(hessian);
CholmodSimplicialLDLT2<SparseMatrixD> chol(hessian);
if (chol.info() != Eigen::Success) {
LOGLS_ERROR(_log, "minimize: factorization failed ");
return MinimizeResult::Failed;
Expand Down Expand Up @@ -176,21 +179,43 @@ MinimizeResult FitterBase::minimize(std::string const &whatToFit, double nSigmaC
totalMeasOutliers += msOutliers.size();
totalRefOutliers += fsOutliers.size();
if (nOutliers == 0) break;
TripletList tripletList(nOutliers);
TripletList outlierTriplets(nOutliers);
grad.setZero(); // recycle the gradient
// compute the contributions of outliers to derivatives
outliersContributions(msOutliers, fsOutliers, tripletList, grad);
outliersContributions(msOutliers, fsOutliers, outlierTriplets, grad);
// Remove significant outliers
removeMeasOutliers(msOutliers);
removeRefOutliers(fsOutliers);
// convert triplet list to eigen internal format
SpMat H(_nParTot, tripletList.getNextFreeIndex());
H.setFromTriplets(tripletList.begin(), tripletList.end());
int update_status = chol.update(H, false /* means downdate */);
LOGLS_DEBUG(_log, "cholmod update_status " << update_status);
// The contribution of outliers to the gradient is the opposite
// of the contribution of all other terms, because they add up to 0
grad *= -1;
if (doRankUpdate) {
// convert triplet list to eigen internal format
SparseMatrixD H(_nParTot, outlierTriplets.getNextFreeIndex());
H.setFromTriplets(outlierTriplets.begin(), outlierTriplets.end());
chol.update(H, false /* means downdate */);
// The contribution of outliers to the gradient is the opposite
// of the contribution of all other terms, because they add up to 0
grad *= -1;
} else {
// don't reuse tripletList because we want a new nextFreeIndex.
TripletList nextTripletList(_lastNTrip);
grad.setZero();
// Rebuild the matrix and gradient
leastSquareDerivatives(nextTripletList, grad);
_lastNTrip = nextTripletList.size();
LOGLS_DEBUG(_log, "Triplets recomputed, ntrip = " << nextTripletList.size());

hessian = createHessian(_nParTot, nextTripletList);
nextTripletList.clear(); // we don't need it any more after we have the hessian.

LOGLS_DEBUG(_log,
"Restarting factorization, hessian: dim="
<< hessian.rows() << " non-zeros=" << hessian.nonZeros()
<< " filling-frac = " << hessian.nonZeros() / std::pow(hessian.rows(), 2));
chol.compute(hessian);
if (chol.info() != Eigen::Success) {
LOGLS_ERROR(_log, "minimize: factorization failed ");
return MinimizeResult::Failed;
}
}
}

// only print the outlier summary if outlier rejection was turned on.
Expand Down
37 changes: 35 additions & 2 deletions tests/test_jointcal_decam.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ def setUp(self):
input_dir=input_dir,
all_visits=all_visits,
other_args=other_args,
do_plot=do_plot)
do_plot=do_plot,
log_level="DEBUG")

def test_jointcalTask_2_visits(self):
self.config = lsst.jointcal.jointcal.JointcalConfig()
Expand Down Expand Up @@ -76,7 +77,9 @@ def test_jointcalTask_2_visits(self):

self._testJointcalTask(2, relative_error, self.dist_rms_absolute, pa1, metrics=metrics)

def test_jointcalTask_2_visits_constrainedAstrometry_no_photometry(self):
def setup_jointcalTask_2_visits_constrainedAstrometry_no_photometry(self):
"""Help keep the two constrainedAstrometry tests consistent and make
the difference between them more obvious."""
self.config = lsst.jointcal.jointcal.JointcalConfig()
self.config.photometryRefObjLoader.retarget(LoadAstrometryNetObjectsTask)
self.config.astrometryRefObjLoader.retarget(LoadAstrometryNetObjectsTask)
Expand All @@ -96,6 +99,29 @@ def test_jointcalTask_2_visits_constrainedAstrometry_no_photometry(self):
'astrometry_final_chi2': 2072.89,
'astrometry_final_ndof': 3970,
}
return relative_error, pa1, metrics

def test_jointcalTask_2_visits_constrainedAstrometry_no_photometry(self):
relative_error, pa1, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry_no_photometry()

self._testJointcalTask(2, relative_error, self.dist_rms_absolute, pa1, metrics=metrics)

def test_jointcalTask_2_visits_constrainedAstrometry_no_rank_update(self):
"""Demonstrate that skipping the rank update doesn't significantly affect astrometry.
"""
relative_error, pa1, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry_no_photometry()
self.config.astrometryDoRankUpdate = False

self._testJointcalTask(2, relative_error, self.dist_rms_absolute, pa1, metrics=metrics)

def test_jointcalTask_2_visits_constrainedAstrometry_4sigma_outliers(self):
"""4 sigma outlier rejection means a fewer available sources, resulting
in a smaller ndof and chi2.
"""
relative_error, pa1, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry_no_photometry()
self.config.outlierRejectSigma = 4
metrics['astrometry_final_chi2'] = 1506.7
metrics['astrometry_final_ndof'] = 3682

self._testJointcalTask(2, relative_error, self.dist_rms_absolute, pa1, metrics=metrics)

Expand Down Expand Up @@ -148,6 +174,13 @@ def test_jointcalTask_2_visits_constrainedPhotometry_flagged_selector(self):

self._testJointcalTask(2, None, None, pa1, metrics=metrics)

@unittest.skip("DM-14439 : This test produces different chi2/ndof on Linux and macOS.")
def test_jointcalTask_2_visits_constrainedPhotometry_no_rank_update(self):
pa1, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry_no_astrometry()
self.config.photometryDoRankUpdate = False

self._testJointcalTask(2, None, None, pa1, metrics=metrics)


class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
Expand Down