Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-39605: Use butler.dimensions rather than butler.registry.dimensions #456

Merged
merged 2 commits into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions python/lsst/obs/base/_instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def writeCameraGeom(
run = self.makeUnboundedCalibrationRunName(*labels)
butler.registry.registerRun(run)
datasetType = DatasetType(
"camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions
"camera", ("instrument",), "Camera", isCalibration=True, universe=butler.dimensions
)
butler.registry.registerDatasetType(datasetType)
camera = self.getCamera()
Expand Down Expand Up @@ -403,7 +403,7 @@ def writeStandardTextCuratedCalibrations(
definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
datasetType = DatasetType(
datasetTypeName,
universe=butler.registry.dimensions,
universe=butler.dimensions,
isCalibration=True,
# MyPy should be able to figure out that the kwargs here have
# the right types, but it can't.
Expand Down Expand Up @@ -544,7 +544,7 @@ class attribute for curated calibrations corresponding to the
dimension_arguments["physical_filter"] = md["FILTER"]

dataId = DataCoordinate.standardize(
universe=butler.registry.dimensions,
universe=butler.dimensions,
instrument=self.getName(),
**dimension_arguments,
)
Expand Down Expand Up @@ -679,7 +679,7 @@ def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tu
# to ensure it only happens once.
# This will also catch problems with the data ID not having keys we need.
try:
dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
dataId = butler.registry.expandDataId(dataId, graph=butler.dimensions["exposure"].graph)
except DataIdError as exc:
raise LookupError(str(exc)) from exc
try:
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/obs/base/defineVisits.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ def __init__(self, config: DefineVisitsConfig, *, butler: Butler, **kwargs: Any)
config.validate() # Not a CmdlineTask nor PipelineTask, so have to validate the config here.
super().__init__(config, **kwargs)
self.butler = butler
self.universe = self.butler.registry.dimensions
self.universe = self.butler.dimensions
self.progress = Progress("obs.base.DefineVisitsTask")
self.makeSubtask("groupExposures")
self.makeSubtask("computeVisitRegions", butler=self.butler)
Expand Down
23 changes: 0 additions & 23 deletions python/lsst/obs/base/exposureAssembler.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,29 +120,6 @@ def getComponent(self, composite: lsst.afw.image.Exposure, componentName: str) -
f"Do not know how to retrieve component {componentName} from {type(composite)}"
)

def getValidComponents(self, composite: Exposure) -> Dict[str, Any]:
"""Extract all non-None components from a composite.

Parameters
----------
composite : `object`
Composite from which to extract components.

Returns
-------
comps : `dict`
Non-None components extracted from the composite, indexed by the
component name as derived from the `self.storageClass`.
"""
# For Exposure we call the generic version twice: once for top level
# components, and again for ExposureInfo.
expItems, expInfoItems = self._groupRequestedComponents()

components = super().getValidComponents(composite)
infoComps = super().getValidComponents(composite.getInfo())
components.update(infoComps)
return components

def disassemble(
self, composite: Any, subset: Optional[Iterable] = None, override: Optional[Any] = None
) -> Dict[str, DatasetComponent]:
Expand Down
6 changes: 3 additions & 3 deletions python/lsst/obs/base/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def getDatasetType(self) -> DatasetType:
"raw",
("instrument", "detector", "exposure"),
"Exposure",
universe=self.butler.registry.dimensions,
universe=self.butler.dimensions,
)

# Mypy can not determine that the config passed to super() is this type.
Expand All @@ -309,7 +309,7 @@ def __init__(
config.validate() # Not a CmdlineTask nor PipelineTask, so have to validate the config here.
super().__init__(config, **kwargs)
self.butler = butler
self.universe = self.butler.registry.dimensions
self.universe = self.butler.dimensions
self.datasetType = self.getDatasetType()
self._on_success = on_success
self._on_metadata_failure = on_metadata_failure
Expand Down Expand Up @@ -1230,7 +1230,7 @@ def ingestFiles(
datasetTypeName, dimensions, storageClass = raw_definition
if not (datasetType := datasetTypes.get(datasetTypeName)):
datasetType = DatasetType(
datasetTypeName, dimensions, storageClass, universe=self.butler.registry.dimensions
datasetTypeName, dimensions, storageClass, universe=self.butler.dimensions
)
else:
datasetType = self.datasetType
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/obs/base/instrument_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def writeAdditionalCuratedCalibrations(

datasetType = DatasetType(
"testCalib",
universe=butler.registry.dimensions,
universe=butler.dimensions,
isCalibration=True,
dimensions=("instrument", "detector"),
storageClass="CuratedCalibration",
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/obs/base/script/defineVisits.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def defineVisits(
# If this is old schema but is using modern visit grouping algorithm,
# (which is the default for new code) revert to one-to-one (which
# was the old default).
exposure_dimension = butler.registry.dimensions["exposure"]
exposure_dimension = butler.dimensions["exposure"]
modern = "one-to-one-and-by-counter"
if "seq_end" not in exposure_dimension.metadata and config.groupExposures.name == modern:
legacy = "one-to-one"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_defineVisits.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def define_visits_incrementally(self, exposure: DimensionRecord) -> None:
self.butler.registry.insertDimensionData("exposure", exposure)
dataIds = [
DataCoordinate.standardize(
instrument="DummyCam", exposure=exposure.id, universe=self.butler.registry.dimensions
instrument="DummyCam", exposure=exposure.id, universe=self.butler.dimensions
)
]
self.task.run(dataIds, incremental=True)
Expand Down
8 changes: 2 additions & 6 deletions tests/test_ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,8 @@ class RawIngestTestCase(IngestTestBase, unittest.TestCase):
def visits(self):
butler = Butler(self.root, collections=[self.outputRun])
return {
DataCoordinate.standardize(
instrument="DummyCam", visit=100, universe=butler.registry.dimensions
): [
DataCoordinate.standardize(
instrument="DummyCam", exposure=100, universe=butler.registry.dimensions
)
DataCoordinate.standardize(instrument="DummyCam", visit=100, universe=butler.dimensions): [
DataCoordinate.standardize(instrument="DummyCam", exposure=100, universe=butler.dimensions)
]
}

Expand Down