Skip to content

Commit

Permalink
Merge branch 'tickets/DM-30172'
Browse files Browse the repository at this point in the history
  • Loading branch information
czwa committed Dec 9, 2021
2 parents 491245d + 52c56b1 commit 0db887b
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 19 deletions.
17 changes: 14 additions & 3 deletions python/lsst/cp/pipe/makeBrighterFatterKernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ def run(self, inputPtc, dummy, camera, inputDims):

if self.config.level == 'DETECTOR':
detectorCorrList = list()
detectorFluxes = list()

bfk = BrighterFatterKernel(camera=camera, detectorId=detector.getId(), level=self.config.level)
bfk.means = inputPtc.finalMeans # ADU
Expand Down Expand Up @@ -263,6 +264,10 @@ def run(self, inputPtc, dummy, camera, inputDims):
if q[0][0] > 0.0:
self.log.warning("Amp: %s %d skipped due to value of (variance-mean)=%f",
ampName, xcorrNum, q[0][0])
# If we drop an element of ``scaledCorrList``
# (which is what this does), we need to ensure we
# drop the flux entry as well.
fluxes = np.delete(fluxes, xcorrNum)
continue

# This removes the "t (I_a^2 + I_b^2)" factor in
Expand All @@ -274,6 +279,7 @@ def run(self, inputPtc, dummy, camera, inputDims):
if (xcorrCheck > self.config.xcorrCheckRejectLevel) or not (np.isfinite(xcorrCheck)):
self.log.warning("Amp: %s %d skipped due to value of triangle-inequality sum %f",
ampName, xcorrNum, xcorrCheck)
fluxes = np.delete(fluxes, xcorrNum)
continue

scaledCorrList.append(scaled)
Expand Down Expand Up @@ -320,13 +326,17 @@ def run(self, inputPtc, dummy, camera, inputDims):
bfk.ampKernels[ampName] = postKernel
if self.config.level == 'DETECTOR':
detectorCorrList.extend(scaledCorrList)
detectorFluxes.extend(fluxes)
bfk.valid[ampName] = True
self.log.info("Amp: %s Sum: %g Center Info Pre: %g Post: %g",
ampName, finalSum, preKernel[center, center], postKernel[center, center])

# Assemble a detector kernel?
if self.config.level == 'DETECTOR':
preKernel = self.averageCorrelations(detectorCorrList, f"Det: {detName}")
if self.config.correlationQuadraticFit:
preKernel = self.quadraticCorrelations(detectorCorrList, detectorFluxes, f"Amp: {ampName}")
else:
preKernel = self.averageCorrelations(detectorCorrList, f"Det: {detName}")
finalSum = np.sum(preKernel)
center = int((bfk.shape[0] - 1) / 2)

Expand Down Expand Up @@ -397,9 +407,10 @@ def quadraticCorrelations(self, xCorrList, fluxList, name):
# i,j indices are inverted to apply the transposition,
# as is done in the averaging case.
linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 1e-4], fluxList,
xCorrList[:, j, i], funcPolynomial)
xCorrList[:, j, i], funcPolynomial,
scaleResidual=False)
meanXcorr[i, j] = linearFit[1] # Discard the intercept.
self.log.debug("Quad fit meanXcorr[%d,%d] = %g", i, j, linearFit[1])
self.log.info("Quad fit meanXcorr[%d,%d] = %g", i, j, linearFit[1])

# To match previous definitions, pad by one element.
meanXcorr = np.pad(meanXcorr, ((1, 1)))
Expand Down
8 changes: 6 additions & 2 deletions python/lsst/cp/pipe/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def makeDataRefList(self, namespace):
self.refList += refList


def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy'):
def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True):
"""Iteratively reweighted least squares fit.
This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights
Expand All @@ -375,6 +375,8 @@ def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Ca
weightType : `str`, optional
Type of weighting to use. One of Cauchy, Anderson, bisquare,
box, Welsch, Huber, logistic, or Fair.
scaleResidual : `bool`, optional
If true, the residual is scaled by the sqrt of the Y values.
Returns
-------
Expand All @@ -397,7 +399,9 @@ def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Ca

polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
for iteration in range(10):
resid = np.abs(dataY - function(polyFit, dataX)) / np.sqrt(dataY)
resid = np.abs(dataY - function(polyFit, dataX))
if scaleResidual:
resid = resid / np.sqrt(dataY)
if weightType == 'Cauchy':
# Use Cauchy weighting. This is a soft weight.
# At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05].
Expand Down
28 changes: 14 additions & 14 deletions tests/test_brighterFatterKernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,20 +135,20 @@ def test_quadratic(self):
ptc.noise['amp 1'] = 5.0

results = task.run(ptc, ['this is a dummy exposure'], self.camera, {'detector': 1})
expectation = np.array([[-3.12500e-06, -6.25000e-06, 3.12500e-06, 1.25000e-05,
3.12500e-06, -6.25000e-06, -3.12500e-06],
[-6.25000e-06, -2.50000e-05, 6.25000e-06, 4.37500e-05,
6.25000e-06, -2.50000e-05, -6.25000e-06],
[3.12500e-06, 6.25000e-06, 1.03125e-04, 2.50000e-04,
1.03125e-04, 6.25000e-06, 3.12500e-06],
[1.25000e-05, 4.37500e-05, 2.50000e-04, 8.50000e-04,
2.50000e-04, 4.37500e-05, 1.25000e-05],
[3.12500e-06, 6.25000e-06, 1.03125e-04, 2.50000e-04,
1.03125e-04, 6.25000e-06, 3.12500e-06],
[-6.25000e-06, -2.50000e-05, 6.25000e-06, 4.37500e-05,
6.25000e-06, -2.50000e-05, -6.25000e-06],
[-3.12500e-06, -6.25000e-06, 3.12500e-06, 1.25000e-05,
3.12500e-06, -6.25000e-06, -3.12500e-06]])
expectation = np.array([[1.55337977e-30, 3.13979520e-30, 4.60562292e-30, 5.19037630e-30,
4.60562292e-30, 3.13979520e-30, 1.55337977e-30],
[3.07372386e-30, 6.40017812e-30, 1.00923202e-29, 1.15502594e-29,
1.00923202e-29, 6.40017812e-30, 3.07372386e-30],
[4.34133757e-30, 9.45605038e-30, 1.43881780e-29, 1.39759388e-29,
1.43881780e-29, 9.45605038e-30, 4.34133757e-30],
[4.83557602e-30, 1.06596290e-29, 1.71783205e-29, 1.55771296e-29,
1.71783205e-29, 1.06596290e-29, 4.83557602e-30],
[4.34133757e-30, 9.45605038e-30, 1.43881780e-29, 1.39759388e-29,
1.43881780e-29, 9.45605038e-30, 4.34133757e-30],
[3.07372386e-30, 6.40017812e-30, 1.00923202e-29, 1.15502594e-29,
1.00923202e-29, 6.40017812e-30, 3.07372386e-30],
[1.55337977e-30, 3.13979520e-30, 4.60562292e-30, 5.19037630e-30,
4.60562292e-30, 3.13979520e-30, 1.55337977e-30]])

self.assertFloatsAlmostEqual(results.outputBFK.ampKernels['amp 1'], expectation, atol=1e-5)

Expand Down

0 comments on commit 0db887b

Please sign in to comment.