Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

U/pgee/dm 1013 #7

Merged
merged 3 commits into from
Jan 21, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
27 changes: 21 additions & 6 deletions python/lsst/meas/base/plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,23 +137,38 @@ def __init__(self, config, name, schema, metadata):
SingleFramePlugin.__init__(self, config, name, schema, metadata)
self.keyProbability = schema.addField(name + "_value", type="D",
doc="Set to 1 for extended sources, 0 for point sources.")
self.keyFlag = schema.addField(name + "_flag", type="Flag", doc="Set to 1 for any fatal failure.")

def measure(self, measRecord, exposure):
modelFlux = measRecord.getModelFlux()
modelFluxErr = measRecord.getModelFluxErr()
modelFluxFlag = measRecord.getModelFluxFlag()
psfFlux = measRecord.getPsfFlux()
psfFluxErr = measRecord.getPsfFluxErr()
flux1 = self.config.fluxRatio*modelFlux + self.config.modelErrFactor*modelFluxErr
flux2 = psfFlux + self.config.psfErrFactor*psfFluxErr
if flux1 < flux2:
measRecord.set(self.keyProbability, 0.0)
psfFluxFlag = measRecord.getPsfFluxFlag()
flux1 = self.config.fluxRatio*modelFlux
if not self.config.modelErrFactor == 0:
flux1 += self.config.modelErrFactor*modelFluxErr
flux2 = psfFlux
if not self.config.psfErrFactor == 0:
flux2 += self.config.psfErrFactor*psfFluxErr
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The logic here appears to have got more complicated for no very clear reason. If self.config.modelErrFactor == 0, then adding something multiplied by it has no effect (and the same for self.config.psfErrFactor, of course). What are the if statements adding?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code is basically coding around the possibility that the Err is a NAN.


# A generic failure occurs when either FluxFlag is set to True
# A generic failure also occurs if either calculated flux value is NAN:
# this can occur if the Flux field itself is NAN,
# or the ErrFactor != 0 and the FluxErr is NAN
if numpy.isnan(flux1) or numpy.isnan(flux2) or modelFluxFlag or psfFluxFlag:
self.fail(measRecord)
else:
measRecord.set(self.keyProbability, 1.0);
if flux1 < flux2:
measRecord.set(self.keyProbability, 0.0)
else:
measRecord.set(self.keyProbability, 1.0)

def fail(self, measRecord, error=None):
# Override fail() to do nothing in the case of an exception. We should be setting a flag
# instead.
pass
measRecord.set(self.keyFlag, True)


# --- Forced Plugins ---
Expand Down
112 changes: 111 additions & 1 deletion tests/testClassification.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

DATA_DIR = os.path.join(os.environ["MEAS_BASE_DIR"], "tests")

class SFMTestCase(lsst.utils.tests.TestCase):
class SFMTestCase(lsst.meas.base.tests.AlgorithmTestCase):

def testAlgorithm(self):

Expand Down Expand Up @@ -75,6 +75,116 @@ def testAlgorithm(self):
else:
self.assertEqual(probability, 1.0)

def testFlags(self):
"""Test all the failure modes of this algorithm, as well as checking that it succeeds when it should.

Since this algorithm depends on having a ModelFlux and a PsfFlux measurement, it is a failure
mode when either is NAN, or when ModelFluxFlag or PsfFluxFlag is True.

When psfFluxFactor != 0, the PsfFluxErr cannot be NAN, but otherwise is ignored

When modelFluxFactor != 0, the ModelFluxErr cannot be NAN, but otherwise is ignored
"""
exp = afwImage.ExposureF()
schema = lsst.afw.table.SourceTable.makeMinimalSchema()

sfm_config = lsst.meas.base.sfm.SingleFrameMeasurementConfig()
sfm_config.plugins = ["base_SdssCentroid", "base_PsfFlux", "base_SincFlux",
"base_ClassificationExtendedness"]
sfm_config.slots.centroid = "base_SdssCentroid"
sfm_config.slots.shape = None
sfm_config.slots.psfFlux = "base_PsfFlux"
sfm_config.slots.modelFlux = "base_SincFlux"
sfm_config.slots.apFlux = None
sfm_config.slots.instFlux = None
task = SingleFrameMeasurementTask(schema, config=sfm_config)
measCat = SourceCatalog(schema)

# Test no error case - all necessary values are set
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
source.set("base_SincFlux_fluxSigma", 2)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertFalse(source.get("base_ClassificationExtendedness_flag"))

# Test psfFlux flag case - failure in PsfFlux
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
source.set("base_SincFlux_fluxSigma", 2)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
source.set("base_PsfFlux_flag", True)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))

# Test modelFlux flag case - falure in ModelFlux
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
source.set("base_SincFlux_fluxSigma", 2)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
source.set("base_SincFlux_flag", True)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))

# Test modelFlux NAN case
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_fluxSigma", 2)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
source.set("base_SincFlux_flag", True)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))

# Test psfFlux NAN case
source = measCat.addNew()
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
source.set("base_SincFlux_fluxSigma", 2)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
source.set("base_SincFlux_flag", True)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))

# Test modelFluxErr NAN case when modelErrFactor is zero and non-zero
sfm_config.plugins["base_ClassificationExtendedness"].modelErrFactor = 0.
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertFalse(source.get("base_ClassificationExtendedness_flag"))

sfm_config.plugins["base_ClassificationExtendedness"].modelErrFactor = 1.
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_PsfFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))

# Test psfFluxErr NAN case when psfErrFactor is zero and non-zero
sfm_config.plugins["base_ClassificationExtendedness"].psfErrFactor = 0.
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_SincFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertFalse(source.get("base_ClassificationExtendedness_flag"))

sfm_config.plugins["base_ClassificationExtendedness"].psfErrFactor = 1.
source = measCat.addNew()
source.set("base_PsfFlux_flux", 100)
source.set("base_SincFlux_fluxSigma", 1)
source.set("base_SincFlux_flux", 200)
task.plugins["base_ClassificationExtendedness"].measure(source, exp)
self.assertTrue(source.get("base_ClassificationExtendedness_flag"))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You could write the same tests with a lot less code by defining a method which takes the source parameters and expected flag value as arguments and then calling it repeatedly.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought of that too, but it didn't seem like it mattered for a unit test. And there are little differences from test to test.


def suite():
"""Returns a suite containing all the test cases in this module."""

Expand Down