Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-30170: Define crosstalk tests for cp_verify #15

Merged
merged 7 commits into from
May 10, 2022
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
979 changes: 979 additions & 0 deletions examples/cpVerifyCrosstalk.ipynb

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions pipelines/VerifyCrosstalk.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
description: cp_verify CROSSTALK calibration verification
tasks:
verifyCrosstalkApply:
class: lsst.ip.isr.isrTask.IsrTask
config:
connections.ccdExposure: 'raw'
connections.outputExposure: 'verifyCrosstalkProc'
connections.crosstalk: 'crosstalk'
doCrosstalk: true
verifyCrosstalkExtract:
class: lsst.cp.pipe.measureCrosstalk.CrosstalkExtractTask
config:
connections.inputExp: 'verifyCrosstalkProc'
connections.outputRatios: 'verifyCrosstalkRatio'
verifyCrosstalkSolve:
class: lsst.cp.pipe.measureCrosstalk.CrosstalkSolveTask
config:
connections.inputRatios: 'verifyCrosstalkRatio'
connections.outputCrosstalk: 'verifyCrosstalk'
verifyCrosstalkCalib:
class: lsst.cp.verify.verifyCrosstalk.CpVerifyCrosstalkTask
config:
connections.inputCalib: 'verifyCrosstalk'
connections.outputStats: 'verifyCrosstalkDetStats'
verifyCrosstalk:
class: lsst.cp.verify.CpVerifyCalibMergeTask
config:
connections.inputStats: 'verifyCrosstalkDetStats'
connections.outputStats: 'verifyCrosstalkStats'
2 changes: 2 additions & 0 deletions python/lsst/cp/verify/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@

from .version import *
from .verifyStats import *
from .verifyCalib import *
from .mergeResults import *

from .verifyBias import *
from .verifyDefects import *
from .verifyDark import *
from .verifyFlat import *
from .verifyCrosstalk import *
143 changes: 138 additions & 5 deletions python/lsst/cp/verify/mergeResults.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
__all__ = ['CpVerifyExpMergeConfig', 'CpVerifyExpMergeTask',
'CpVerifyRunMergeConfig', 'CpVerifyRunMergeTask',
'CpVerifyVisitExpMergeConfig', 'CpVerifyVisitExpMergeTask',
'CpVerifyVisitRunMergeConfig', 'CpVerifyVisitRunMergeTask']
'CpVerifyVisitRunMergeConfig', 'CpVerifyVisitRunMergeTask',
'CpVerifyCalibMergeConfig', 'CpVerifyCalibMergeTask']


class CpVerifyExpMergeConnections(pipeBase.PipelineTaskConnections,
Expand Down Expand Up @@ -299,12 +300,12 @@ def run(self, inputStats, inputDims):

VERIFY:
ExposureId1:
VERIFY_MEAN: boolean
VERIFY_SIGMA: boolean
VERIFY_TEST1: boolean
VERIFY_TEST2: boolean
ExposureId2:
[...]
MEAN_UNIMODAL: boolean
SIGMA_UNIMODAL: boolean
TEST_VALUE: boolean
TEST_VALUE2: boolean
"""
outputStats = {}
success = True
Expand Down Expand Up @@ -439,3 +440,135 @@ class CpVerifyVisitRunMergeTask(CpVerifyRunMergeTask):
_DefaultName = 'cpVerifyVisitRunMerge'

pass


class CpVerifyCalibMergeConnections(pipeBase.PipelineTaskConnections,
dimensions={"instrument"},
defaultTemplates={}):
inputStats = cT.Input(
name="exposureStats",
doc="Input statistics to merge.",
storageClass="StructuredDataDict",
dimensions=["instrument", "detector"],
multiple=True,
)

outputStats = cT.Output(
name="exposureStats",
doc="Output statistics.",
storageClass="StructuredDataDict",
dimensions=["instrument"],
)


class CpVerifyCalibMergeConfig(pipeBase.PipelineTaskConfig,
pipelineConnections=CpVerifyCalibMergeConnections):
"""Configuration paramters for exposure stats merging.
"""
runStatKeywords = pexConfig.DictField(
keytype=str,
itemtype=str,
doc="Dictionary of statistics to run on the set of exposure values. The key should be the test "
"name to record in the output, and the value should be the `lsst.afw.math` statistic name string.",
default={},
)


class CpVerifyCalibMergeTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
"""Merge statistics from detectors together.
"""
ConfigClass = CpVerifyCalibMergeConfig
_DefaultName = 'cpVerifyCalibMerge'

def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)

dimensions = [exp.dataId.byName() for exp in inputRefs.inputStats]
inputs['inputDims'] = dimensions

outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)

def run(self, inputStats, inputDims):
"""Merge statistics.

Parameters
----------
inputStats : `list` [`dict`]
Measured statistics for a detector.
inputDims : `list` [`dict`]
List of dictionaries of input data dimensions/values.
Each list entry should contain:

``"detector"``
detector id value (`int`)

Returns
-------
outputStats : `dict`
Merged full exposure statistics.

Notes
-----
The outputStats should have a yaml representation as follows.

Detector detId:
FAILURES:
- Detector detId TEST_NAME
SUCCESS: boolean
"""
outputStats = {}
success = True
for detStats, dimensions in zip(inputStats, inputDims):
detId = dimensions['detector']
detName = f"Detector {detId}"
calcStats = {}

detSuccess = detStats.pop('SUCCESS')
if detSuccess:
calcStats['SUCCESS'] = True
else:
calcStats['FAILURES'] = list()
success = False
for testName in detStats['VERIFY']:
calcStats['FAILURES'].append(detName + " " + testName)

outputStats[detName] = calcStats

runSuccess = True
if len(self.config.runStatKeywords):
outputStats['VERIFY'], runSuccess = self.verify(outputStats)

outputStats['SUCCESS'] = success & runSuccess

return pipeBase.Struct(
outputStats=outputStats,
)

def verify(self, statisticsDictionary):
"""Verify if the measured statistics meet the verification criteria.

Parameters
----------
statisticsDictionary : `dict` [`str`, `dict`],
Dictionary of measured statistics. The inner dictionary
should have keys that are statistic names (`str`) with
values that are some sort of scalar (`int` or `float` are
the mostly likely types).

Returns
-------
outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
A dictionary indexed by the amplifier name, containing
dictionaries of the verification criteria.
success : `bool`
A boolean indicating if all tests have passed.

Raises
------
NotImplementedError :
This method must be implemented by the calibration-type
subclass.

"""
raise NotImplementedError("Subclasses must implement verification criteria.")
177 changes: 177 additions & 0 deletions python/lsst/cp/verify/verifyCalib.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
# This file is part of cp_verify.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import lsst.pex.config as pexConfig
import lsst.pipe.base as pipeBase
import lsst.pipe.base.connectionTypes as cT


__all__ = ['CpVerifyCalibConfig', 'CpVerifyCalibTask']


class CpVerifyCalibConnections(pipeBase.PipelineTaskConnections,
dimensions={"instrument", "detector"},
defaultTemplates={}):
inputCalib = cT.Input(
name="calib",
doc="Input calib to calculate statistics for.",
storageClass="IsrCalib",
dimensions=["instrument", "detector"],
isCalibration=True
)

outputStats = cT.Output(
name="calibStats",
doc="Output statistics from cp_verify.",
storageClass="StructuredDataDict",
dimensions=["instrument", "detector"],
)


class CpVerifyCalibConfig(pipeBase.PipelineTaskConfig,
pipelineConnections=CpVerifyCalibConnections):
"""Configuration parameters for CpVerifyCalibTask.
"""
# Statistics options.
useReadNoise = pexConfig.Field(
dtype=bool,
doc="Compare sigma against read noise?",
default=True,
)
numSigmaClip = pexConfig.Field(
dtype=float,
doc="Rejection threshold (sigma) for statistics clipping.",
default=5.0,
)
clipMaxIter = pexConfig.Field(
dtype=int,
doc="Max number of clipping iterations to apply.",
default=3,
)

# Keywords and statistics to measure from different sources.
calibStatKeywords = pexConfig.DictField(
keytype=str,
itemtype=str,
doc="Calib statistics to run.",
default={},
)


class CpVerifyCalibTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
"""Main statistic measurement and validation class.

This operates on a generic calibration, and is designed to be
subclassed so specific calibrations can apply their own validation
methods.
"""

ConfigClass = CpVerifyCalibConfig
_DefaultName = 'cpVerifyCalib'

def run(self, inputCalib):
"""Calculate quality statistics and verify they meet the requirements
for a calibration.

Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to be measured.

Returns
-------
result : `lsst.pipe.base.Struct`
Result struct with components:
- ``outputStats`` : `dict`
The output measured statistics.

Notes
-----
The outputStats should have a yaml representation of the form
(with STAT and TEST being the appropriate statistic and test
names)

DET:
STAT: value
STAT2: value
VERIFY:
TEST: boolean
SUCCESS: boolean

"""
outputStats = {}

outputStats['DET'] = self.detectorStatistics(inputCalib)
outputStats['VERIFY'], outputStats['SUCCESS'] = self.verify(inputCalib, outputStats)

return pipeBase.Struct(
outputStats=outputStats,
)

# Methods that need to be implemented by the calibration-level subclasses.
def detectorStatistics(self, inputCalib):
"""Calculate detector level statistics from the calibration.

Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to verify.

Returns
-------
outputStatistics : `dict` [`str`, scalar]
A dictionary of the statistics measured and their values.

Raises
------
NotImplementedError :
This method must be implemented by the calibration-type
subclass.
"""
raise NotImplementedError("Subclasses must implement detector statistics method.")

def verify(self, inputCalib, statisticsDict):
"""Verify that the measured calibration meet the verification criteria.

Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to verify.
statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
Dictionary of measured statistics. The inner dictionary
should have keys that are statistic names (`str`) with
values that are some sort of scalar (`int` or `float` are
the mostly likely types).

Returns
-------
outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
A dictionary indexed by the amplifier name, containing
dictionaries of the verification criteria.
success : `bool`
A boolean indicating whether all tests have passed.

Raises
------
NotImplementedError :
This method must be implemented by the calibration-type
subclass.
"""
raise NotImplementedError("Subclasses must implement verification criteria.")