Skip to content

Commit

Permalink
Merge pull request #235 from lsst/tickets/DM-16468
Browse files Browse the repository at this point in the history
DM-16468: Use measured convergence to predict gain
  • Loading branch information
isullivan committed Nov 23, 2018
2 parents be3b003 + 0de0f62 commit 7949643
Show file tree
Hide file tree
Showing 2 changed files with 213 additions and 14 deletions.
96 changes: 82 additions & 14 deletions python/lsst/pipe/tasks/dcrAssembleCoadd.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,19 @@ class DcrAssembleCoaddConfig(CompareWarpAssembleCoaddConfig):
)
baseGain = pexConfig.Field(
dtype=float,
doc="Relative weight to give the new solution when updating the model."
"A value of 1.0 gives equal weight to both solutions.",
default=1.,
doc="Relative weight to give the new solution vs. the last solution when updating the model."
"A value of 1.0 gives equal weight to both solutions."
"Small values imply slower convergence of the solution, but can "
"help prevent overshooting and failures in the fit."
"If ``baseGain`` is None, a conservative gain "
"will be calculated from the number of subfilters. ",
default=None,
)
useProgressiveGain = pexConfig.Field(
dtype=bool,
doc="Use a gain that slowly increases above ``baseGain`` to accelerate convergence?",
doc="Use a gain that slowly increases above ``baseGain`` to accelerate convergence? "
"When calculating the next gain, we use up to 5 previous gains and convergence values."
"Can be set to False to force the model to change at the rate of ``baseGain``. ",
default=True,
)
doAirmassWeight = pexConfig.Field(
Expand Down Expand Up @@ -103,7 +109,7 @@ class DcrAssembleCoaddConfig(CompareWarpAssembleCoaddConfig):
dtype=float,
doc="Maximum relative change of the model allowed between subfilters."
"Set to zero to disable.",
default=2.,
default=4.,
)
convergenceMaskPlanes = pexConfig.ListField(
dtype=str,
Expand Down Expand Up @@ -377,11 +383,12 @@ def run(self, skyInfo, tempExpRefList, imageScalerList, weightList,
stats.ctrl)
self.log.info("Initial convergence : %s", convergenceMetric)
convergenceList = [convergenceMetric]
gainList = []
convergenceCheck = 1.
subfilterVariance = None
while (convergenceCheck > self.config.convergenceThreshold or
modelIter < self.config.minNumIter):
gain = self.calculateGain(modelIter)
gain = self.calculateGain(convergenceList, gainList)
self.dcrAssembleSubregion(dcrModels, subBBox, dcrBBox, tempExpRefList, imageScalerList,
weightList, spanSetMaskList, stats.flags, stats.ctrl,
convergenceMetric, baseMask, subfilterVariance, gain,
Expand All @@ -392,6 +399,12 @@ def run(self, skyInfo, tempExpRefList, imageScalerList, weightList,
spanSetMaskList,
stats.ctrl)
convergenceCheck = (convergenceList[-1] - convergenceMetric)/convergenceMetric
if convergenceCheck < 0:
self.log.warn("Coadd %s diverged before reaching maximum iterations or"
" desired convergence improvement of %s."
" Divergence: %s",
subBBox, self.config.convergenceThreshold, convergenceCheck)
break
convergenceList.append(convergenceMetric)
if modelIter > self.config.maxNumIter:
if self.config.useConvergence:
Expand All @@ -402,7 +415,7 @@ def run(self, skyInfo, tempExpRefList, imageScalerList, weightList,
break

if self.config.useConvergence:
self.log.info("Iteration %s with convergence metric %s, %.4f%% improvement (gain: %.1f)",
self.log.info("Iteration %s with convergence metric %s, %.4f%% improvement (gain: %.2f)",
modelIter, convergenceMetric, 100.*convergenceCheck, gain)
modelIter += 1
else:
Expand Down Expand Up @@ -773,7 +786,7 @@ def fillCoadd(self, dcrModels, skyInfo, tempExpRefList, weightList, calibration=
dcrCoadds.append(coaddExposure)
return dcrCoadds

def calculateGain(self, modelIter):
def calculateGain(self, convergenceList, gainList):
"""Calculate the gain to use for the current iteration.
After calculating a new DcrModel, each value is averaged with the
Expand All @@ -785,19 +798,74 @@ def calculateGain(self, modelIter):
Parameters
----------
modelIter : `int`
The current iteration of forward modeling.
convergenceList : `list` of `float`
The quality of fit metric from each previous iteration.
gainList : `list` of `float`
The gains used in each previous iteration: appended with the new
gain value.
Gains are numbers between ``self.config.baseGain`` and 1.
Returns
-------
gain : `float`
Relative weight to give the new solution when updating the model.
A value of 1.0 gives equal weight to both solutions.
Raises
------
ValueError
If ``len(convergenceList) != len(gainList)+1``.
"""
if self.config.useProgressiveGain:
iterGain = np.log(modelIter)*self.config.baseGain if modelIter > 0 else self.config.baseGain
return max(self.config.baseGain, iterGain)
return self.config.baseGain
nIter = len(convergenceList)
if nIter != len(gainList) + 1:
raise ValueError("convergenceList (%d) must be one element longer than gainList (%d)."
% (len(convergenceList), len(gainList)))

if self.config.baseGain is None:
# If ``baseGain`` is not set, calculate it from the number of DCR subfilters
# The more subfilters being modeled, the lower the gain should be.
baseGain = 1./(self.config.dcrNumSubfilters - 1)
else:
baseGain = self.config.baseGain

if self.config.useProgressiveGain and nIter > 2:
# To calculate the best gain to use, compare the past gains that have been used
# with the resulting convergences to estimate the best gain to use.
# Algorithmically, this is a Kalman filter.
# If forward modeling proceeds perfectly, the convergence metric should
# asymptotically approach a final value.
# We can estimate that value from the measured changes in convergence
# weighted by the gains used in each previous iteration.
estFinalConv = [((1 + gainList[i])*convergenceList[i + 1] - convergenceList[i])/gainList[i]
for i in range(nIter - 1)]
# The convergence metric is strictly positive, so if the estimated final convergence is
# less than zero, force it to zero.
estFinalConv = np.array(estFinalConv)
estFinalConv[estFinalConv < 0] = 0
# Because the estimate may slowly change over time, only use the most recent measurements.
estFinalConv = np.median(estFinalConv[max(nIter - 5, 0):])
lastGain = gainList[-1]
lastConv = convergenceList[-2]
newConv = convergenceList[-1]
# The predicted convergence is the value we would get if the new model calculated
# in the previous iteration was perfect. Recall that the updated model that is
# actually used is the gain-weighted average of the new and old model,
# so the convergence would be similarly weighted.
predictedConv = (estFinalConv*lastGain + lastConv)/(1. + lastGain)
# If the measured and predicted convergence are very close, that indicates
# that our forward model is accurate and we can use a more aggressive gain
# If the measured convergence is significantly worse (or better!) than predicted,
# that indicates that the model is not converging as expected and
# we should use a more conservative gain.
delta = (predictedConv - newConv)/((lastConv - estFinalConv)/(1 + lastGain))
newGain = 1 - abs(delta)
# Average the gains to prevent oscillating solutions.
newGain = (newGain + lastGain)/2.
gain = max(baseGain, newGain)
else:
gain = baseGain
gainList.append(gain)
return gain

def calculateModelWeights(self, dcrModels, dcrBBox):
"""Build an array that smoothly tapers to 0 away from detected sources.
Expand Down
131 changes: 131 additions & 0 deletions tests/test_dcrAssembleCoadd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# This file is part of pipe_tasks.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.

import unittest

import lsst.utils.tests

from lsst.pipe.tasks.dcrAssembleCoadd import DcrAssembleCoaddTask, DcrAssembleCoaddConfig


class DcrAssembleCoaddCalculateGainTestCase(lsst.utils.tests.TestCase):
"""Tests of dcrAssembleCoaddTask.calculateGain()."""
def setUp(self):
self.baseGain = 0.5
self.gainList = [self.baseGain, self.baseGain]
self.convergenceList = [0.2]
# Calculate the convergence we would expect if the model was converging perfectly,
# so that the improvement is limited only by our conservative gain.
for i in range(2):
self.convergenceList.append(self.convergenceList[i]/(self.baseGain + 1))
self.nextGain = (1 + self.baseGain) / 2

self.config = DcrAssembleCoaddConfig()
self.task = DcrAssembleCoaddTask(self.config)

def testUnbalancedLists(self):
gainList = [1, 2, 3, 4]
convergenceList = [1, 2]
with self.assertRaises(ValueError):
self.task.calculateGain(convergenceList, gainList)

def testNoProgressiveGain(self):
self.config.useProgressiveGain = False
self.config.baseGain = self.baseGain
expectGain = self.baseGain
expectGainList = self.gainList + [expectGain]
result = self.task.calculateGain(self.convergenceList, self.gainList)
self.assertEqual(result, expectGain)
self.assertEqual(self.gainList, expectGainList)

def testBaseGainNone(self):
"""If baseGain is None, gain is calculated from the default values."""
self.config.useProgressiveGain = False
expectGain = 1 / (self.config.dcrNumSubfilters - 1)
expectGainList = self.gainList + [expectGain]
result = self.task.calculateGain(self.convergenceList, self.gainList)
self.assertEqual(result, expectGain)
self.assertEqual(self.gainList, expectGainList)

def testProgressiveFirstStep(self):
"""The first and second steps always return baseGain."""
convergenceList = self.convergenceList[:1]
gainList = []
self.config.baseGain = self.baseGain
expectGain = self.baseGain
expectGainList = [expectGain]
result = self.task.calculateGain(convergenceList, gainList)
self.assertEqual(result, expectGain)
self.assertEqual(gainList, expectGainList)

def testProgressiveSecondStep(self):
"""The first and second steps always return baseGain."""
convergenceList = self.convergenceList[:2]
gainList = self.gainList[:1]
self.config.baseGain = self.baseGain
expectGain = self.baseGain
expectGainList = gainList + [expectGain]
result = self.task.calculateGain(convergenceList, gainList)
self.assertEqual(result, expectGain)
self.assertEqual(gainList, expectGainList)

def testProgressiveGain(self):
"""Test that gain follows the "perfect" situation defined in setUp."""
self.config.baseGain = self.baseGain
expectGain = self.nextGain
expectGainList = self.gainList + [expectGain]
result = self.task.calculateGain(self.convergenceList, self.gainList)
self.assertFloatsAlmostEqual(result, expectGain)
self.assertEqual(self.gainList, expectGainList)

def testProgressiveGainBadFit(self):
"""Test that gain is reduced if the predicted convergence does not
match the measured convergence (in this case, converging too quickly).
"""
wrongGain = 1.0
gainList = [self.baseGain, self.baseGain]
convergenceList = [0.2]
for i in range(2):
convergenceList.append(convergenceList[i]/(wrongGain + 1))
# The below math is a simplified version of the full algorithm,
# assuming the predicted convergence is zero.
# Note that in this case, nextGain is smaller than wrongGain.
nextGain = (self.baseGain + (1 + self.baseGain) / (1 + wrongGain)) / 2

self.config.baseGain = self.baseGain
expectGain = nextGain
expectGainList = self.gainList + [expectGain]
result = self.task.calculateGain(convergenceList, gainList)
self.assertFloatsAlmostEqual(result, nextGain)
self.assertEqual(gainList, expectGainList)


def setup_module(module):
lsst.utils.tests.init()


class MatchMemoryTestCase(lsst.utils.tests.MemoryTestCase):
pass


if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()

0 comments on commit 7949643

Please sign in to comment.