Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-34644: Clean up decorrelation code #214

Merged
merged 8 commits into from
May 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
127 changes: 64 additions & 63 deletions python/lsst/ip/diffim/imageDecorrelation.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def run(self, scienceExposure, templateExposure, subtractedExposure, psfMatching
scienceExposure : `lsst.afw.image.Exposure`
The original science exposure (before pre-convolution, if ``preConvMode==True``).
templateExposure : `lsst.afw.image.Exposure`
The original template exposure warped into the science exposure dimensions.
The original template exposure warped, but not psf-matched, to the science exposure.
subtractedExposure : `lsst.afw.image.Exposure`
the subtracted exposure produced by
`ip_diffim.ImagePsfMatchTask.subtractExposures()`. The `subtractedExposure` must
Expand Down Expand Up @@ -181,8 +181,10 @@ def run(self, scienceExposure, templateExposure, subtractedExposure, psfMatching
image PSF corrected by the center of image matching kernel.

If ``templateMatched==True``, the templateExposure was matched (convolved)
to the ``scienceExposure`` by ``psfMatchingKernel``. Otherwise the ``scienceExposure``
was matched (convolved) by ``psfMatchingKernel``.
to the ``scienceExposure`` by ``psfMatchingKernel`` during image differencing.
Otherwise the ``scienceExposure`` was matched (convolved) by ``psfMatchingKernel``.
In either case, note that the original template and science images are required,
not the psf-matched version.

This task discards the variance plane of ``subtractedExposure`` and re-computes
it from the variance planes of ``scienceExposure`` and ``templateExposure``.
Expand Down Expand Up @@ -228,103 +230,97 @@ def run(self, scienceExposure, templateExposure, subtractedExposure, psfMatching
self.log.info("Original variance plane means. Science:%.5e, warped template:%.5e)",
svar, tvar)

if templateMatched:
# Regular subtraction, we convolved the template
self.log.info("Decorrelation after template image convolution")
expVar = svar
matchedVar = tvar
exposure = scienceExposure
matchedExposure = templateExposure
else:
# We convolved the science image
self.log.info("Decorrelation after science image convolution")
expVar = tvar
matchedVar = svar
exposure = templateExposure
matchedExposure = scienceExposure

# Should not happen unless entire image has been masked, which could happen
# if this is a small subimage of the main exposure. In this case, just return a full NaN
# exposure
if np.isnan(expVar) or np.isnan(matchedVar):
if np.isnan(svar) or np.isnan(tvar):
# Double check that one of the exposures is all NaNs
if (np.all(np.isnan(exposure.image.array))
or np.all(np.isnan(matchedExposure.image.array))):
if (np.all(np.isnan(scienceExposure.image.array))
or np.all(np.isnan(templateExposure.image.array))):
self.log.warning('Template or science image is entirely NaNs: skipping decorrelation.')
outExposure = subtractedExposure.clone()
return pipeBase.Struct(correctedExposure=outExposure, )

# The maximal correction value converges to sqrt(matchedVar/expVar).
if templateMatched:
# Regular subtraction, we convolved the template
self.log.info("Decorrelation after template image convolution")
varianceMean = svar
targetVarianceMean = tvar
# variance plane of the image that is not convolved
variance = scienceExposure.variance.array
# Variance plane of the convolved image, before convolution.
targetVariance = templateExposure.variance.array
else:
# We convolved the science image
self.log.info("Decorrelation after science image convolution")
varianceMean = tvar
targetVarianceMean = svar
# variance plane of the image that is not convolved
variance = templateExposure.variance.array
# Variance plane of the convolved image, before convolution.
targetVariance = scienceExposure.variance.array

# The maximal correction value converges to sqrt(targetVarianceMean/varianceMean).
# Correction divergence warning if the correction exceeds 4 orders of magnitude.
mOverExpVar = matchedVar/expVar
mOverExpVar = targetVarianceMean/varianceMean
if mOverExpVar > 1e8:
self.log.warning("Diverging correction: matched image variance is "
" much larger than the unconvolved one's"
", matchedVar/expVar:%.2e", mOverExpVar)
", targetVarianceMean/varianceMean:%.2e", mOverExpVar)

oldVarMean = self.computeVarianceMean(subtractedExposure)
self.log.info("Variance plane mean of uncorrected diffim: %f", oldVarMean)

kArr = kimg.array
diffExpArr = subtractedExposure.image.array
diffimShape = subtractedExposure.image.array.shape
psfImg = subtractedExposure.getPsf().computeKernelImage(geom.Point2D(xcen, ycen))
psfDim = psfImg.getDimensions()
psfArr = psfImg.array

# Determine the common shape
kSum = np.sum(kArr)
kSumSq = kSum*kSum
self.log.debug("Matching kernel sum: %.3e", kSum)
psfShape = psfImg.array.shape

if preConvMode:
self.log.info("Decorrelation of likelihood image")
self.computeCommonShape(preConvImg.array.shape, kArr.shape,
psfArr.shape, diffExpArr.shape)
corr = self.computeScoreCorrection(kArr, expVar, matchedVar, preConvImg.array)
psfShape, diffimShape)
corr = self.computeScoreCorrection(kArr, varianceMean, targetVarianceMean, preConvImg.array)
else:
self.log.info("Decorrelation of difference image")
self.computeCommonShape(kArr.shape, psfArr.shape, diffExpArr.shape)
corr = self.computeDiffimCorrection(kArr, expVar, matchedVar)

diffExpArr = self.computeCorrectedImage(corr.corrft, diffExpArr)
self.computeCommonShape(kArr.shape, psfShape, diffimShape)
corr = self.computeDiffimCorrection(kArr, varianceMean, targetVarianceMean)

corrPsfArr = self.computeCorrectedDiffimPsf(corr.corrft, psfArr)
psfcI = afwImage.ImageD(psfDim)
psfcI.array = corrPsfArr
psfcK = afwMath.FixedKernel(psfcI)
psfNew = measAlg.KernelPsf(psfcK)
correctedImage = self.computeCorrectedImage(corr.corrft, subtractedExposure.image.array)
correctedPsf = self.computeCorrectedDiffimPsf(corr.corrft, psfImg.array)

correctedExposure = subtractedExposure.clone()
correctedExposure.image.array[...] = diffExpArr # Allow for numpy type casting
# The subtracted exposure variance plane is already correlated, we cannot propagate
# it through another convolution; instead we need to use the uncorrelated originals
# The whitening should scale it to expVar + matchedVar on average
# The whitening should scale it to varianceMean + targetVarianceMean on average
if self.config.completeVarPlanePropagation:
self.log.debug("Using full variance plane calculation in decorrelation")
newVarArr = self.calculateVariancePlane(
exposure.variance.array, matchedExposure.variance.array,
expVar, matchedVar, corr.cnft, corr.crft)
correctedVariance = self.calculateVariancePlane(
variance, targetVariance,
varianceMean, targetVarianceMean, corr.cnft, corr.crft)
else:
self.log.debug("Using estimated variance plane calculation in decorrelation")
newVarArr = self.estimateVariancePlane(
exposure.variance.array, matchedExposure.variance.array,
correctedVariance = self.estimateVariancePlane(
variance, targetVariance,
corr.cnft, corr.crft)

corrExpVarArr = correctedExposure.variance.array
corrExpVarArr[...] = newVarArr # Allow for numpy type casting

# Determine the common shape
kSum = np.sum(kArr)
kSumSq = kSum*kSum
self.log.debug("Matching kernel sum: %.3e", kSum)
if not templateMatched:
# ImagePsfMatch.subtractExposures re-scales the difference in
# the science image convolution mode
corrExpVarArr /= kSumSq
correctedExposure.setPsf(psfNew)
correctedVariance /= kSumSq
subtractedExposure.image.array[...] = correctedImage # Allow for numpy type casting
subtractedExposure.variance.array[...] = correctedVariance
subtractedExposure.setPsf(correctedPsf)

newVarMean = self.computeVarianceMean(correctedExposure)
newVarMean = self.computeVarianceMean(subtractedExposure)
self.log.info("Variance plane mean of corrected diffim: %.5e", newVarMean)

# TODO DM-23857 As part of the spatially varying correction implementation
# consider whether returning a Struct is still necessary.
return pipeBase.Struct(correctedExposure=correctedExposure, )
return pipeBase.Struct(correctedExposure=subtractedExposure, )

def computeCommonShape(self, *shapes):
"""Calculate the common shape for FFT operations. Set `self.freqSpaceShape`
Expand Down Expand Up @@ -579,15 +575,15 @@ def calculateVariancePlane(self, vplane1, vplane2, varMean1, varMean2, c1ft, c2f
v1[filtInf] = np.inf

D = np.real(np.fft.ifft2(c2ft))
c2ft = np.fft.fft2(D*D)
c2SqFt = np.fft.fft2(D*D)

v2shape = vplane2.shape
filtInf = np.isinf(vplane2)
filtNan = np.isnan(vplane2)
vplane2 = np.copy(vplane2)
vplane2[filtInf | filtNan] = varMean2
D = self.padCenterOriginArray(vplane2, self.freqSpaceShape)
v2 = np.real(np.fft.ifft2(np.fft.fft2(D) * c2ft))
v2 = np.real(np.fft.ifft2(np.fft.fft2(D) * c2SqFt))
v2 = self.padCenterOriginArray(v2, v2shape, useInverse=True)
v2[filtNan] = np.nan
v2[filtInf] = np.inf
Expand All @@ -607,7 +603,7 @@ def computeCorrectedDiffimPsf(self, corrft, psfOld):

Returns
-------
psfNew : `numpy.ndarray`
correctedPsf : `lsst.meas.algorithms.KernelPsf`
The corrected psf, same shape as `psfOld`, sum normed to 1.

Notes
Expand All @@ -623,7 +619,12 @@ def computeCorrectedDiffimPsf(self, corrft, psfOld):
psfNew = psfNew.real
psfNew = self.padCenterOriginArray(psfNew, psfShape, useInverse=True)
psfNew = psfNew/psfNew.sum()
return psfNew

psfcI = afwImage.ImageD(geom.Extent2I(*psfShape))
psfcI.array = psfNew
psfcK = afwMath.FixedKernel(psfcI)
correctedPsf = measAlg.KernelPsf(psfcK)
return correctedPsf

def computeCorrectedImage(self, corrft, imgOld):
"""Compute the decorrelated difference image.
Expand Down Expand Up @@ -737,7 +738,7 @@ def run(self, subExposure, expandedSubExposure, fullBBox,
subExp1 = templateExposure.Factory(templateExposure, expandedSubExposure.getBBox())

# Prevent too much log INFO verbosity from DecorrelateALKernelTask.run
logLevel = self.log.getLevel()
logLevel = self.log.level
self.log.setLevel(self.log.WARNING)
Copy link
Member

@timj timj May 4, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I really need to add the logging context manager to the utils package so that the log level resets automatically when the task below completes. I'm not overly happy with how we handle this since it really is saying that this task is too verbose by default and it's impossible to ever see the info messages. As written, if someone sets the log level to DEBUG in pipetask they won't get any debug messages out of this task and that seems wrong.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I completely agree that code that forces the log level like this is problematic. This is code that is not used outside of the unit tests, for now at least, and I just wanted to change this line to fix a warning while building the package. This particular task will need to be refactored if we want to use it in production.

res = DecorrelateALKernelTask.run(self, subExp2, subExp1, expandedSubExposure,
psfMatchingKernel, preConvKernel, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/ip/diffim/imageMapReduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ def run(self, mapperResults, exposure, **kwargs):
mask.getArray()[isNan[0], isNan[1]] |= bad

if reduceOp == 'average':
wts = weights.getArray().astype(np.float)
wts = weights.getArray().astype(float)
self.log.info('AVERAGE: Maximum overlap: %f', np.nanmax(wts))
self.log.info('AVERAGE: Average overlap: %f', np.nanmean(wts))
self.log.info('AVERAGE: Minimum overlap: %f', np.nanmin(wts))
Expand Down
4 changes: 2 additions & 2 deletions tests/test_imageDecorrelation.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ def testNoiseDiffimCorrection(self):

self._setUpSourcelessImages(svar=svar, tvar=tvar)
diffExp, mKernel, expected_var = self._makeAndTestUncorrectedDiffim()
corrected_diffExp = self._runDecorrelationTask(diffExp, mKernel)
corrected_diffExp = self._runDecorrelationTask(diffExp.clone(), mKernel)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just for my edification, why the change to .clone() here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Other places too, actually....

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's so I can modify the difference exposure in place in the decorrelation task, and not effect the tests.


rho_sci = estimatePixelCorrelation(self.im1ex.getImage().getArray())
rho_rawdiff = estimatePixelCorrelation(diffExp.getImage().getArray())
Expand Down Expand Up @@ -529,7 +529,7 @@ def _testDiffimCorrection_spatialTask(self, svar, tvar, varyPsf=0.0):
diffExp, mKernel, expected_var = self._makeAndTestUncorrectedDiffim()
variances = []
for spatiallyVarying in [False, True]:
corrected_diffExp = self._runDecorrelationSpatialTask(diffExp, mKernel,
corrected_diffExp = self._runDecorrelationSpatialTask(diffExp.clone(), mKernel,
spatiallyVarying)
var, mn = self._testDecorrelation(expected_var, corrected_diffExp)
variances.append(var)
Expand Down