Skip to content

Commit

Permalink
Merge pull request #177 from lsst/tickets/DM-38309
Browse files Browse the repository at this point in the history
DM-38309: Update to use new PhotonTransferCurveDataset internal format.
  • Loading branch information
erykoff committed Mar 22, 2023
2 parents 07ccae0 + c2e90bd commit 2f872fb
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 64 deletions.
37 changes: 27 additions & 10 deletions python/lsst/cp/pipe/ptc/cpExtractPtcTask.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def _guaranteeOutputs(self, inputDims, outputs, outputRefs):
newCovariances.append(outputs.outputCovariances[entry])
else:
newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', 1)
newPtc.setAmpValues('no amp')
newPtc.setAmpValuesPartialDataset('no amp')
newCovariances.append(newPtc)
return pipeBase.Struct(outputCovariances=newCovariances)

Expand Down Expand Up @@ -350,7 +350,7 @@ def run(self, inputExp, inputDims, taskMetadata):
dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY',
self.config.maximumRangeCovariancesAstier)
for ampName in ampNames:
dummyPtcDataset.setAmpValues(ampName)
dummyPtcDataset.setAmpValuesPartialDataset(ampName)
# Get read noise. Try from the exposure, then try
# taskMetadata. This adds a get() for the exposures.
readNoiseLists = {}
Expand Down Expand Up @@ -480,22 +480,39 @@ def run(self, inputExp, inputDims, taskMetadata):
tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
ampName) for covRow in covAstier]
tempStructArray = np.array(tupleRows, dtype=tags)

covArray, vcov, _ = self.makeCovArray(tempStructArray,
self.config.maximumRangeCovariancesAstier)

# The returned covArray should only have 1 entry;
# raise if this is not the case.
if covArray.shape[0] != 1:
raise RuntimeError("Serious programming error in covArray shape.")

covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))

# Correct covArray for sigma clipping:
# 1) Apply varFactor twice for the whole covariance matrix
covArray *= varFactor**2
# 2) But, only once for the variance element of the
# matrix, covArray[0,0] (so divide one factor out).
covArray[0, 0] /= varFactor

partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff],
rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)],
expIdMask=[expIdMask], covArray=covArray,
covSqrtWeights=covSqrtWeights, gain=gain,
noise=readNoiseDict[ampName])
# matrix, covArray[0, 0, 0] (so divide one factor out).
# (the first 0 is because this is a 3D array for insertion into
# the combined dataset).
covArray[0, 0, 0] /= varFactor

partialPtcDataset.setAmpValuesPartialDataset(
ampName,
inputExpIdPair=(expId1, expId2),
rawExpTime=expTime,
rawMean=muDiff,
rawVar=varDiff,
expIdMask=expIdMask,
covariance=covArray[0, :, :],
covSqrtWeights=covSqrtWeights[0, :, :],
gain=gain,
noise=readNoiseDict[ampName],
)

# Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
# Below, np.where(expId1 == np.array(inputDims)) returns a tuple
# with a single-element array, so [0][0]
Expand Down
74 changes: 45 additions & 29 deletions python/lsst/cp/pipe/ptc/cpSolvePtcTask.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,38 +235,54 @@ def run(self, inputCovariances, camera=None, detId=0):
if partialPtcDataset.ptcFitType == 'DUMMY':
continue
for ampName in ampNames:
datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName])
if type(partialPtcDataset.rawExpTimes[ampName]) is list:
datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0])
else:
datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName])
if type(partialPtcDataset.rawMeans[ampName]) is list:
datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0])
else:
datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName])
if type(partialPtcDataset.rawVars[ampName]) is list:
datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0])
else:
datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName])
if type(partialPtcDataset.expIdMask[ampName]) is list:
datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName][0])
else:
datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName])
datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0]))
datasetPtc.covariancesSqrtWeights[ampName].append(
np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0]))
# The partial dataset consists of lists of values for each
# quantity. In the case of the input exposure pairs, this is a
# list of tuples. In all cases we only want the first
# (and only) element of the list.
datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName][0])
datasetPtc.rawExpTimes[ampName] = np.append(datasetPtc.rawExpTimes[ampName],
partialPtcDataset.rawExpTimes[ampName][0])
datasetPtc.rawMeans[ampName] = np.append(datasetPtc.rawMeans[ampName],
partialPtcDataset.rawMeans[ampName][0])
datasetPtc.rawVars[ampName] = np.append(datasetPtc.rawVars[ampName],
partialPtcDataset.rawVars[ampName][0])
datasetPtc.expIdMask[ampName] = np.append(datasetPtc.expIdMask[ampName],
partialPtcDataset.expIdMask[ampName][0])
datasetPtc.covariances[ampName] = np.append(
datasetPtc.covariances[ampName].ravel(),
partialPtcDataset.covariances[ampName].ravel()
).reshape(
(
len(datasetPtc.rawExpTimes[ampName]),
datasetPtc.covMatrixSide,
datasetPtc.covMatrixSide,
)
)
datasetPtc.covariancesSqrtWeights[ampName] = np.append(
datasetPtc.covariancesSqrtWeights[ampName].ravel(),
partialPtcDataset.covariancesSqrtWeights[ampName].ravel()
).reshape(
(
len(datasetPtc.rawExpTimes[ampName]),
datasetPtc.covMatrixSide,
datasetPtc.covMatrixSide,
)
)

# Sort arrays that are filled so far in the final dataset by
# rawMeans index
for ampName in ampNames:
index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName])))
datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index]
datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index]
datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index]
datasetPtc.covariancesSqrtWeights[ampName] = np.array(
datasetPtc.covariancesSqrtWeights[ampName])[index]
index = np.argsort(datasetPtc.rawMeans[ampName])
datasetPtc.inputExpIdPairs[ampName] = np.array(
datasetPtc.inputExpIdPairs[ampName]
)[index].tolist()
datasetPtc.rawExpTimes[ampName] = datasetPtc.rawExpTimes[ampName][index]
datasetPtc.rawMeans[ampName] = datasetPtc.rawMeans[ampName][index]
datasetPtc.rawVars[ampName] = datasetPtc.rawVars[ampName][index]
datasetPtc.expIdMask[ampName] = datasetPtc.expIdMask[ampName][index]
datasetPtc.covariances[ampName] = datasetPtc.covariances[ampName][index]
datasetPtc.covariancesSqrtWeights[ampName] = datasetPtc.covariances[ampName][index]

if self.config.ptcFitType == "FULLCOVARIANCE":
# Fit the measured covariances vs mean signal to
# the Astier+19 full model (Eq. 20). Before that
Expand Down
44 changes: 19 additions & 25 deletions tests/test_ptc.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
#
"""Test cases for cp_pipe."""

from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import copy
Expand Down Expand Up @@ -108,12 +107,14 @@ def setUp(self):
self.c3 = -4.7e-12 # tuned so that it turns over for 200k mean

self.ampNames = [amp.getName() for amp in self.flatExp1.getDetector().getAmplifiers()]
self.dataset = PhotonTransferCurveDataset(self.ampNames, " ") # pack raw data for fitting
self.dataset = PhotonTransferCurveDataset(self.ampNames, ptcFitType="PARTIAL")
self.covariancesSqrtWeights = {}
for ampName in self.ampNames: # just the expTimes and means here - vars vary per function
self.dataset.rawExpTimes[ampName] = self.timeVec
self.dataset.rawMeans[ampName] = muVec
self.covariancesSqrtWeights[ampName] = []
self.dataset.covariancesSqrtWeights[ampName] = np.zeros((1,
self.dataset.covMatrixSide,
self.dataset.covMatrixSide))

# ISR metadata
self.metadataContents = TaskMetadata()
Expand Down Expand Up @@ -173,8 +174,8 @@ def test_covAstier(self):
# Force the last PTC dataset to have a NaN, and ensure that the
# task runs (DM-38029). This is a minor perturbation and does not
# affect the output comparison.
resultsExtract.outputCovariances[-2].rawMeans['C:0,0'] = [np.nan]
resultsExtract.outputCovariances[-2].rawVars['C:0,0'] = [np.nan]
resultsExtract.outputCovariances[-2].rawMeans['C:0,0'] = np.array([np.nan])
resultsExtract.outputCovariances[-2].rawVars['C:0,0'] = np.array([np.nan])

resultsSolve = solveTask.run(resultsExtract.outputCovariances,
camera=FakeCamera([self.flatExp1.getDetector()]))
Expand All @@ -184,6 +185,17 @@ def test_covAstier(self):
for v1, v2 in zip(varStandard[amp], resultsSolve.outputPtcDataset.finalVars[amp]):
self.assertAlmostEqual(v1/v2, 1.0, places=1)

# Test various operations on the PTC output from the task.
ptc = resultsSolve.outputPtcDataset

expIdsUsed = ptc.getExpIdsUsed("C:0,0")
# Check that these are the same as the inputs, paired up, with the
# final two removed.
self.assertTrue(np.all(expIdsUsed == np.array(expIds).reshape(len(expIds) // 2, 2)[:-1]))

goodAmps = ptc.getGoodAmps()
self.assertEqual(goodAmps, self.ampNames)

def ptcFitAndCheckPtc(self, order=None, fitType=None, doTableArray=False, doFitBootstrap=False):
localDataset = copy.deepcopy(self.dataset)
localDataset.ptcFitType = fitType
Expand Down Expand Up @@ -217,7 +229,7 @@ def ptcFitAndCheckPtc(self, order=None, fitType=None, doTableArray=False, doFitB
+ self.noiseSq/(g*g))
for mu in localDataset.rawMeans[ampName]]
else:
RuntimeError("Enter a fit function type: 'POLYNOMIAL' or 'EXPAPPROXIMATION'")
raise RuntimeError("Enter a fit function type: 'POLYNOMIAL' or 'EXPAPPROXIMATION'")

# Initialize mask and covariance weights that will be used in fits.
# Covariance weights values empirically determined from one of
Expand All @@ -237,6 +249,7 @@ def ptcFitAndCheckPtc(self, order=None, fitType=None, doTableArray=False, doFitB
0.00131929, 0.00121935, 0.0011334,
0.00105893, 0.00099357, 0.0009358,
0.00088439, 0.00083833]

configLin.maxLookupTableAdu = 200000 # Max ADU in input mock flats
configLin.maxLinearAdu = 100000
configLin.minLinearAdu = 50000
Expand Down Expand Up @@ -424,25 +437,6 @@ def test_getInitialGoodPoints(self):
consecutivePointsVarDecreases=2)
assert np.all(points) == np.all(np.array([True, True, True, True, False]))

def test_getExpIdsUsed(self):
localDataset = copy.copy(self.dataset)

for pair in [(12, 34), (56, 78), (90, 10)]:
localDataset.inputExpIdPairs["C:0,0"].append(pair)
localDataset.expIdMask["C:0,0"] = np.array([True, False, True])
self.assertTrue(np.all(localDataset.getExpIdsUsed("C:0,0") == [(12, 34), (90, 10)]))

localDataset.expIdMask["C:0,0"] = np.array([True, False, True, True]) # wrong length now
with self.assertRaises(AssertionError):
localDataset.getExpIdsUsed("C:0,0")

def test_getGoodAmps(self):
dataset = self.dataset

self.assertTrue(dataset.ampNames == self.ampNames)
dataset.badAmps.append("C:0,1")
self.assertTrue(dataset.getGoodAmps() == [amp for amp in self.ampNames if amp != "C:0,1"])

def runGetGainFromFlatPair(self, correctionType='NONE'):
extractConfig = self.defaultConfigExtract
extractConfig.gainCorrectionType = correctionType
Expand Down

0 comments on commit 2f872fb

Please sign in to comment.