Skip to content

Commit

Permalink
Merge pull request #240 from lsst/tickets/DM-44025
Browse files Browse the repository at this point in the history
DM-44025: Update ptc turnoff computation code and improve tests with real data.
  • Loading branch information
erykoff committed Apr 26, 2024
2 parents b0b17cf + 30067e9 commit 31fd3c8
Show file tree
Hide file tree
Showing 3 changed files with 819 additions and 18 deletions.
101 changes: 83 additions & 18 deletions python/lsst/cp/pipe/ptc/cpSolvePtcTask.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,11 +174,21 @@ class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig,
"otherwise ADU.",
default=50_000.,
)
maxDeltaInitialPtcOutlierFit = pexConfig.Field(
dtype=float,
doc="If there are any outliers in the initial fit that have mean greater than "
"maxSignalInitialPtcOutlierFit, then no points that have this delta "
"mean from the previous ``good`` point are allowed. If "
"scaleMaxSignalInitialPtcOutlierFit=True then the units are electrons; "
"otherwise ADU.",
default=9_000.,
)
scaleMaxSignalInitialPtcOutlierFit = pexConfig.Field(
dtype=bool,
doc="Scale maxSignalInitialPtcOutlierFit by approximate gain? If yes then "
"maxSignalInitialPtcOutlierFit is assumed to have units of electrons, "
"otherwise ADU.",
doc="Scale maxSignalInitialPtcOutlierFit and maxDeltaInitialPtcOutlierFit by "
"approximate gain? If yes then "
"maxSignalInitialPtcOutlierFit and maxDeltaInitialPtcOutlierFit are assumed "
"to have units of electrons, otherwise ADU.",
default=True,
)
minVarPivotSearch = pexConfig.Field(
Expand Down Expand Up @@ -1118,17 +1128,24 @@ def errFunc(p, x, y):
varVecOriginal = dataset.rawVars[ampName].copy()
varVecOriginal = self._makeZeroSafe(varVecOriginal)

# These must be sorted for the given amplifier.
meanVecSort = np.argsort(meanVecOriginal)
meanVecSorted = meanVecOriginal[meanVecSort]
varVecSorted = varVecOriginal[meanVecSort]

if self.config.doLegacyTurnoffSelection:
# Discard points when the variance starts to decrease after two
# consecutive signal levels
goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
goodPoints = self._getInitialGoodPoints(meanVecSorted, varVecSorted,
self.config.minVarPivotSearch,
self.config.consecutivePointsVarDecreases)
else:
goodPoints = dataset.expIdMask[ampName]
# Make sure we have this properly sorted.
goodPoints = dataset.expIdMask[ampName].copy()
goodPoints = goodPoints[meanVecSort]

# Check if all points are bad from the 'cpExtractPtcTask'
initialExpIdMask = dataset.expIdMask[ampName]
initialExpIdMask = dataset.expIdMask[ampName].copy()

if not (goodPoints.any() and initialExpIdMask.any()):
msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or "
Expand All @@ -1139,7 +1156,7 @@ def errFunc(p, x, y):
self.fillBadAmp(dataset, ptcFitType, ampName)
continue

mask = goodPoints
mask = goodPoints.copy()

if ptcFitType == 'EXPAPPROXIMATION':
ptcFunc = funcAstier
Expand Down Expand Up @@ -1167,16 +1184,20 @@ def errFunc(p, x, y):
# the EO Testing pipeline.

if self.config.scaleMaxSignalInitialPtcOutlierFit:
approxGain = np.nanmedian(meanVecOriginal/varVecOriginal)
approxGain = np.nanmedian(meanVecSorted/varVecSorted)
maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit/approxGain
maxDeltaADUInitialPtcOutlierFit = self.config.maxDeltaInitialPtcOutlierFit/approxGain
self.log.info(
"Using approximate gain %.3f and ADU signal cutoff of %.1f for amplifier %s",
"Using approximate gain %.3f and ADU signal cutoff of %.1f and delta %.1f "
"for amplifier %s",
approxGain,
maxADUInitialPtcOutlierFit,
maxDeltaADUInitialPtcOutlierFit,
ampName,
)
else:
maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit
maxDeltaADUInitialPtcOutlierFit = self.config.maxDeltaInitialPtcOutlierFit

if maxIterationsPtcOutliers == 0:
# We are not doing any outlier rejection here, but we do want
Expand All @@ -1185,25 +1206,26 @@ def errFunc(p, x, y):
errFunc,
parsIniPtc,
bounds=bounds,
args=(meanVecOriginal[mask], varVecOriginal[mask]),
args=(meanVecSorted[mask], varVecSorted[mask]),
)
pars = res.x
newMask = mask.copy()
else:
newMask = (mask & (meanVecOriginal <= maxADUInitialPtcOutlierFit))
newMask = (mask & (meanVecSorted <= maxADUInitialPtcOutlierFit))

converged = False
count = 0
lastMask = mask.copy()
while count < maxIterationsPtcOutliers:
res = least_squares(
errFunc,
parsIniPtc,
bounds=bounds,
args=(meanVecOriginal[newMask], varVecOriginal[newMask]),
args=(meanVecSorted[newMask], varVecSorted[newMask]),
)
pars = res.x

sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
sigResids = (varVecSorted - ptcFunc(pars, meanVecSorted))/np.sqrt(varVecSorted)
# The new mask includes points where the residuals are
# finite, are less than the cut, and include the original
# mask of known points that should not be used.
Expand All @@ -1212,7 +1234,8 @@ def errFunc(p, x, y):
& (np.abs(np.nan_to_num(sigResids)) < sigmaCutPtcOutliers)
& mask
)
if np.count_nonzero(newMask) == 0:
# Demand at least 2 points to continue.
if np.count_nonzero(newMask) < 2:
msg = (f"SERIOUS: All points after outlier rejection are bad. "
f"Setting {ampName} to BAD.")
self.log.warning(msg)
Expand All @@ -1227,17 +1250,45 @@ def errFunc(p, x, y):
ampName,
)

# Loop over all used (True) points. If one of them follows
# a False point, then it must be within
# maxDeltaADUInitialPtcOutlierFit of a True point. If it
# is a large gap, everything above is marked False.
useMask, = np.where(newMask)
for useIndex, usePoint in enumerate(useMask):
if useIndex == 0 or newMask[usePoint - 1]:
# The previous point was good; continue.
continue
deltaADU = meanVecSorted[usePoint] - meanVecSorted[useMask[useIndex - 1]]
if deltaADU < maxDeltaADUInitialPtcOutlierFit:
# This jump is fine; continue.
continue

# Mark all further points bad.
newMask[usePoint:] = False
break

# If the mask hasn't changed then break out.
if np.all(newMask == lastMask):
self.log.debug("Convergence at iteration %d; breaking loop for %s.", count, ampName)
converged = True
break

lastMask = newMask.copy()

count += 1

# Set the mask to the new mask
mask = newMask.copy()
if not converged:
self.log.warning(
"Outlier detection was not converged prior to %d iteration for %s",
count,
ampName
)

# Set the mask to the new mask, and reset the sorting.
mask = np.zeros(len(meanVecSort), dtype=np.bool_)
mask[meanVecSort[newMask]] = True
maskSorted = newMask.copy()

if not mask.any():
# We hae already filled the bad amp above, so continue.
Expand All @@ -1250,8 +1301,22 @@ def errFunc(p, x, y):
varVecFinal = varVecOriginal[mask]

# Save the maximum point after outlier detection as the
# PTC turnoff point.
dataset.ptcTurnoff[ampName] = meanVecFinal[-1]
# PTC turnoff point. We need to pay attention to the sorting
# here.
dataset.ptcTurnoff[ampName] = np.max(meanVecFinal)
# And compute the ptcTurnoffSamplingError as one half the
# difference between the previous and next point.
lastGoodIndex = np.where(maskSorted)[0][-1]
ptcTurnoffLow = meanVecSorted[lastGoodIndex - 1]
if lastGoodIndex == (len(meanVecSorted) - 1):
# If it's the last index, just use the interval.
ptcTurnoffSamplingError = dataset.ptcTurnoff[ampName] - ptcTurnoffLow
elif not np.isfinite(meanVecSorted[lastGoodIndex + 1]):
# If the next index is not finite, just use the interval.
ptcTurnoffSamplingError = dataset.ptcTurnoff[ampName] - ptcTurnoffLow
else:
ptcTurnoffSamplingError = (meanVecSorted[lastGoodIndex + 1] - ptcTurnoffLow)/2.
dataset.ptcTurnoffSamplingError[ampName] = ptcTurnoffSamplingError

if Counter(mask)[False] > 0:
self.log.info("Number of points discarded in PTC of amplifier %s:"
Expand Down
1 change: 1 addition & 0 deletions tests/test_linearity.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ def _create_ptc(self, amp_names, exp_times, means, ccobcurr=None, photo_charges=

config = PhotonTransferCurveSolveTask.ConfigClass()
config.maximumRangeCovariancesAstier = 1
config.maxDeltaInitialPtcOutlierFit = 100_000.0
solve_task = PhotonTransferCurveSolveTask(config=config)
ptc = solve_task.run(datasets).outputPtcDataset

Expand Down

0 comments on commit 31fd3c8

Please sign in to comment.