Skip to content

Commit

Permalink
Predict best gain to use from past measurements of convergence.
Browse files Browse the repository at this point in the history
The implementation is essentially a Kalman filter.
  • Loading branch information
isullivan committed Nov 9, 2018
1 parent 32af22e commit 037bc97
Showing 1 changed file with 52 additions and 10 deletions.
62 changes: 52 additions & 10 deletions python/lsst/pipe/tasks/dcrAssembleCoadd.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,10 @@ class DcrAssembleCoaddConfig(CompareWarpAssembleCoaddConfig):
baseGain = pexConfig.Field(
dtype=float,
doc="Relative weight to give the new solution when updating the model."
"A value of 1.0 gives equal weight to both solutions.",
default=1.,
"A value of 1.0 gives equal weight to both solutions."
"If ``baseGain`` is set to zero, a conservative gain "
"will be calculated from the number of subfilters",
default=0.,
)
useProgressiveGain = pexConfig.Field(
dtype=bool,
Expand Down Expand Up @@ -367,11 +369,12 @@ def run(self, skyInfo, tempExpRefList, imageScalerList, weightList,
stats.ctrl)
self.log.info("Initial convergence : %s", convergenceMetric)
convergenceList = [convergenceMetric]
gainList = []
convergenceCheck = 1.
subfilterVariance = None
while (convergenceCheck > self.config.convergenceThreshold or
modelIter < self.config.minNumIter):
gain = self.calculateGain(modelIter)
gain = self.calculateGain(convergenceList, gainList)
self.dcrAssembleSubregion(dcrModels, subBBox, tempExpRefList, imageScalerList,
weightList, spanSetMaskList, stats.flags, stats.ctrl,
convergenceMetric, baseMask, subfilterVariance, gain,
Expand Down Expand Up @@ -771,7 +774,7 @@ def fillCoadd(self, dcrModels, skyInfo, tempExpRefList, weightList, calibration=
dcrCoadds.append(coaddExposure)
return dcrCoadds

def calculateGain(self, modelIter):
def calculateGain(self, convergenceList, gainList):
"""Calculate the gain to use for the current iteration.
After calculating a new DcrModel, each value is averaged with the
Expand All @@ -783,19 +786,58 @@ def calculateGain(self, modelIter):
Parameters
----------
modelIter : `int`
The current iteration of forward modeling.
convergenceList : `list` of `float`
The quality of fit metric from each previous iteration.
gainList : `list` of `float`
The gains used in each previous iteration.
Returns
-------
gain : `float`
Relative weight to give the new solution when updating the model.
A value of 1.0 gives equal weight to both solutions.
"""
if self.config.useProgressiveGain:
iterGain = np.log(modelIter)*self.config.baseGain if modelIter > 0 else self.config.baseGain
return max(self.config.baseGain, iterGain)
return self.config.baseGain
if self.config.baseGain <= 0:
# If ``baseGain`` is not set, calculate it from the number of DCR subfilters
# The more subfilters being modeled, the lower the gain should be.
baseGain = 1./(self.config.dcrNumSubfilters - 1)
else:
baseGain = self.config.baseGain
nIter = len(convergenceList)
if self.config.useProgressiveGain and nIter > 2:
# To calculate the best gain to use, compare the past gains that have been used
# with the resulting convergences to estimate the best gain to use.
# Algorithmically, this is a Kalman filter.
# If forward modeling proceeds perfectly, the convergence metric should
# asymptotically approach a final value.
# We can estimate that value from the measured changes in convergence
# weighted by the gains used in each previous iteration.
estFinalConv = [((1 + gainList[i])*convergenceList[i + 1] - convergenceList[i])/gainList[i]
for i in range(nIter - 1)]
# Because the estimate may slowly change over time, only use the most recent measurements.
estFinalConv = np.median(estFinalConv[max(nIter - 5, 0):])
lastGain = gainList[nIter - 2]
lastConv = convergenceList[nIter - 2]
newConv = convergenceList[nIter - 1]
# The predicted convergence is the value we would get if the new model calculated
# in the previous iteration was perfect. Recall that the updated model that is
# actually used is the gain-weighted average of the new and old model,
# so the convergence would be similarly weighted.
predictedConv = (estFinalConv*lastGain + lastConv)/(1. + lastGain)
# If the measured and predicted convergence are very close, that indicates
# that our forward model is accurate and we can use a more aggressive gain
# If the measured convergence is significantly worse (or better!) than predicted,
# that indicates that the model is not converging as expected and
# we should use a more conservative gain.
delta = (predictedConv - newConv)/((lastConv - estFinalConv)/(1 + lastGain))
newGain = 1 - abs(delta)
# Average the gains to prevent oscillating solutions.
newGain = (newGain + lastGain)/2.
gain = max(baseGain, newGain)
else:
gain = baseGain
gainList.append(gain)
return gain

def calculateModelWeights(self, dcrModels, bbox):
"""Build an array that smoothly tapers to 0 away from detected sources.
Expand Down

0 comments on commit 037bc97

Please sign in to comment.