diff --git a/bin.src/make_latiss_export.py b/bin.src/make_export.py
similarity index 53%
rename from bin.src/make_latiss_export.py
rename to bin.src/make_export.py
index 0dff97c6..97417528 100755
--- a/bin.src/make_latiss_export.py
+++ b/bin.src/make_export.py
@@ -21,9 +21,9 @@
# along with this program. If not, see .
-"""Selectively export the contents of the LATISS dataset.
+"""Selectively export some contents from a butler repo.
-This script selects some LATISS data in a source butler repo, and makes an export
+This script selects some data in a source butler repo, and makes an export
file for importing to the test central prompt processing repository.
"""
@@ -32,6 +32,7 @@
import logging
import sys
import tempfile
+import yaml
import lsst.daf.butler as daf_butler
from lsst.utils.timer import time_this
@@ -54,6 +55,13 @@ def _make_parser():
"exported from the source repo. If no target repo is given, all "
"selected datasets in the source repo will be exported.",
)
+ parser.add_argument(
+ "--select",
+ required=True,
+ help="URI to a YAML file containing expressions to identify the "
+ "datasets and collections to be exported. An example is at "
+ "etc/export_latiss.yaml."
+ )
return parser
@@ -62,6 +70,8 @@ def main():
args = _make_parser().parse_args()
src_butler = daf_butler.Butler(args.src_repo)
+ with open(args.select, "r") as file:
+ wants = yaml.safe_load(file)
with tempfile.TemporaryDirectory() as temp_repo:
if args.target_repo:
@@ -72,10 +82,10 @@ def main():
target_butler = daf_butler.Butler(config)
with time_this(msg="Datasets and collections exported", level=logging.INFO):
- _export_for_copy(src_butler, target_butler)
+ _export_for_copy(src_butler, target_butler, wants)
-def _export_for_copy(butler, target_butler):
+def _export_for_copy(butler, target_butler, wants):
"""Export selected data to make copies in another butler repo.
Parameters
@@ -86,65 +96,43 @@ def _export_for_copy(butler, target_butler):
The target Butler to which datasets are exported. It is checked
to avoid exporting existing datasets. No checks are done to
verify if datasets are really identical.
+ wants : `dict`
+ A dictionary to identify selections with optional keys:
+
+ ``"datasets"``, optional
+ A list of dataset selection expressions (`list` of `dict`).
+ The list is iterated over to find matching datasets in the butler,
+ with the matching criteria provided via the selection expressions.
+ Each selection expression has the keyworded argument dictionary to
+ be passed to butler to query datasets; it has the same meanings
+ as the parameters of `lsst.daf.butler.Registry.queryDatasets`.
+ ``"collections"``, optional
+ A list of collection selection expressions (`list` of `dict`).
+ The list is iterated over to find matching collections in the butler,
+ with the matching criteria provided via the selection expressions.
+ Each selection expression has the keyworded argument dictionary to
+ be passed to butler to query collectionss; it has the same meanings
+ as the parameters of `lsst.daf.butler.Registry.queryCollections`.
"""
with butler.export(format="yaml") as contents:
- logging.debug("Selecting goodSeeingCoadd datasets")
- records = _filter_datasets(
- butler,
- target_butler,
- datasetType="goodSeeingCoadd",
- collections="LATISS/templates",
- )
- contents.saveDatasets(records)
-
- refcats = {"atlas_refcat2_20220201", "gaia_dr3_20230707"}
- logging.debug(f"Selecting refcats datasets {refcats}")
- records = _filter_datasets(
- butler, target_butler, datasetType=refcats, collections="refcats*"
- )
- contents.saveDatasets(records)
-
- logging.debug("Selecting skymaps dataset")
- records = _filter_datasets(
- butler, target_butler, datasetType="skyMap", collections="skymaps"
- )
- contents.saveDatasets(records)
-
- logging.debug("Selecting datasets in LATISS/calib")
- records = _filter_datasets(
- butler,
- target_butler,
- datasetType=...,
- # Workaround: use a matching expression rather than a specific
- # string "LATISS/calib" for the collection argument, so to avoid
- # MissingCollectionError when the collection does not exist in
- # the target repo.
- collections="*LATISS/calib",
- )
- contents.saveDatasets(records)
-
- logging.debug("Selecting pretrained ML model dataset")
- records = _filter_datasets(
- butler, target_butler, datasetType="pretrainedModelPackage", collections="pretrained_models"
- )
- contents.saveDatasets(records)
+ if "datasets" in wants:
+ for selection in wants["datasets"]:
+ logging.debug(f"Selecting datasets: {selection}")
+ if "datasetType" not in selection:
+ selection["datasetType"] = ...
+ records = _filter_datasets(butler, target_butler, **selection)
+ contents.saveDatasets(records)
# Save selected collections and chains
- for collection in butler.registry.queryCollections(
- expression="LATISS/calib",
- flattenChains=True,
- includeChains=True,
- ) + [
- "LATISS/templates",
- "LATISS/calib/unbounded",
- "pretrained_models",
- ]:
- logging.debug(f"Selecting collection {collection}")
- try:
- target_butler.registry.queryCollections(collection)
- except daf_butler.registry.MissingCollectionError:
- # MissingCollectionError is raised if the collection does not exist in target_butler.
- contents.saveCollection(collection)
+ if "collections" in wants:
+ for selection in wants["collections"]:
+ for collection in butler.registry.queryCollections(**selection):
+ logging.debug(f"Selecting collection {collection}")
+ try:
+ target_butler.registry.queryCollections(collection)
+ except daf_butler.registry.MissingCollectionError:
+ # MissingCollectionError is raised if the collection does not exist in target_butler.
+ contents.saveCollection(collection)
if __name__ == "__main__":
diff --git a/bin.src/make_hsc_rc2_export.py b/bin.src/make_hsc_rc2_export.py
deleted file mode 100755
index cd70e75d..00000000
--- a/bin.src/make_hsc_rc2_export.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-# This file is part of prompt_processing.
-#
-# Developed for the LSST Data Management System.
-# This product includes software developed by the LSST Project
-# (https://www.lsst.org).
-# See the COPYRIGHT file at the top-level directory of this distribution
-# for details of code ownership.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-
-"""Selectively export the contents of the HSC-RC2 dataset.
-
-This script selects some HSC-RC2 data in a source butler repo, and makes an export
-file for making a central prompt processing repository.
-"""
-
-
-import argparse
-import logging
-import sys
-import time
-
-import lsst.daf.butler as daf_butler
-
-
-def _make_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--src-repo",
- required=True,
- help="The location of the repository to be exported.",
- )
- return parser
-
-
-def main():
- logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
-
- args = _make_parser().parse_args()
- butler = daf_butler.Butler(args.src_repo)
-
- logging.info("Exporting Gen 3 registry to configure new repos...")
- start = time.time_ns()
- _export_for_copy(butler)
- end = time.time_ns()
- logging.info("Export finished in %.3fs.", 1e-9 * (end - start))
-
-
-def _export_for_copy(butler):
- """Export selected data to make copies in another butler repo.
-
- Parameters
- ----------
- butler: `lsst.daf.butler.Butler`
- The source Butler from which datasets are exported
- """
- with butler.export(format="yaml") as contents:
- logging.debug("Selecting goodSeeingCoadd datasets")
- records = butler.registry.queryDatasets(
- datasetType="goodSeeingCoadd",
- collections="HSC/runs/RC2/w_2022_44/DM-36763",
- )
- contents.saveDatasets(records)
-
- logging.debug("Selecting refcats datasets")
- records = butler.registry.queryDatasets(
- datasetType=..., collections="refcats"
- )
- contents.saveDatasets(records)
-
- logging.debug("Selecting skymaps dataset")
- records = butler.registry.queryDatasets(
- datasetType="skyMap", collections="skymaps", dataId={"skymap": "hsc_rings_v1"})
- contents.saveDatasets(records)
-
- logging.debug("Selecting datasets in HSC/calib")
- records = butler.registry.queryDatasets(
- datasetType=..., collections="HSC/calib"
- )
- contents.saveDatasets(records)
-
- # Save calibration collection
- for collection in butler.registry.queryCollections(
- expression="HSC/calib*",
- collectionTypes=daf_butler.CollectionType.CALIBRATION,
- ):
- contents.saveCollection(collection)
- # Do not export chains, as they will need to be reworked to satisfy
- # prompt processing's assumptions.
-
-
-if __name__ == "__main__":
- main()
diff --git a/bin.src/make_remote_butler.py b/bin.src/make_remote_butler.py
index 4ad6c05e..8b7cd22a 100755
--- a/bin.src/make_remote_butler.py
+++ b/bin.src/make_remote_butler.py
@@ -55,7 +55,6 @@ def _make_parser():
help="The export file containing the repository contents. Defaults to ./export.yaml.")
parser.add_argument("--instrument",
help="The short name of the instrument (HSC, LATISS, etc).")
- parser.add_argument("--hsc-rc2", action="store_true", help="Extra fix up for HSC-RC2 dataset.")
return parser
@@ -97,46 +96,6 @@ def _add_chains(butler, instrument_name):
)
-def _hsc_rc2(butler):
- """fix up some specifics of the HSC-RC2 dataset export
-
- Parameters
- ----------
- butler: `lsst.daf.butler.Butler`
- The source Butler from which datasets are exported
- """
- # Chain calibration collections
- instrument = Instrument.fromName("HSC", butler.registry)
- butler.registry.setCollectionChain(
- instrument.makeCalibrationCollectionName(),
- [
- "HSC/calib/DM-32378",
- "HSC/calib/gen2/20180117",
- "HSC/calib/DM-28636",
- ],
- )
-
- butler.registry.registerCollection(
- instrument.makeUnboundedCalibrationRunName(),
- type=CollectionType.CHAINED
- )
- butler.registry.setCollectionChain(
- instrument.makeUnboundedCalibrationRunName(),
- [
- "HSC/calib/gen2/20180117/unbounded",
- "HSC/calib/DM-28636/unbounded",
- ],
- )
- # Chain rerun collections to templates
- # The export script should have guaranteed that there are only coadds in these collections.
- current = butler.registry.getCollectionChain("templates")
- addition = butler.registry.queryCollections("HSC/runs/*",
- collectionTypes=CollectionType.RUN)
- butler.registry.setCollectionChain("templates",
- list(addition) + list(current),
- flatten=False)
-
-
def main():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
@@ -150,8 +109,6 @@ def main():
with time_this(msg="Import", level=logging.INFO):
butler.import_(directory=args.src_repo, filename=args.export_file, transfer="auto")
_add_chains(butler, args.instrument)
- if args.hsc_rc2:
- _hsc_rc2(butler)
if __name__ == "__main__":
diff --git a/bin.src/make_template_export.py b/bin.src/make_template_export.py
deleted file mode 100755
index b50ec081..00000000
--- a/bin.src/make_template_export.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-# This file is part of prompt_processing.
-#
-# Developed for the LSST Data Management System.
-# This product includes software developed by the LSST Project
-# (https://www.lsst.org).
-# See the COPYRIGHT file at the top-level directory of this distribution
-# for details of code ownership.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-
-"""Export goodSeeingCoadd from a collection and make an export file
-for importing those data to a central prompt processing repository.
-All of the goodSeeingCoadd datasets in this collection are exported
-without selection.
-"""
-
-
-import argparse
-import logging
-import sys
-import time
-
-import lsst.daf.butler as daf_butler
-
-
-def _make_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--src-repo",
- required=True,
- help="The location of the repository to be exported.",
- )
- parser.add_argument(
- "--collection",
- required=True,
- help="The collection to query data to be exported.",
- )
- return parser
-
-
-def main():
- logging.basicConfig(level=logging.INFO, stream=sys.stdout)
-
- args = _make_parser().parse_args()
- butler = daf_butler.Butler(args.src_repo, collections=args.collection)
-
- logging.info("Exporting from %s", butler)
- start = time.time_ns()
- _export_for_copy(butler)
- end = time.time_ns()
- logging.info("Export finished in %.3fs.", 1e-9 * (end - start))
-
-
-def _export_for_copy(butler):
- """Export selected data to make copies in another butler repo.
-
- Parameters
- ----------
- butler: `lsst.daf.butler.Butler`
- The source Butler from which datasets are exported
- """
- with butler.export(format="yaml") as contents:
- logging.debug("Selecting goodSeeingCoadd datasets")
- records = butler.registry.queryDatasets(
- datasetType="goodSeeingCoadd",
- )
- contents.saveDatasets(records)
-
-
-if __name__ == "__main__":
- main()
diff --git a/doc/playbook.rst b/doc/playbook.rst
index caaa9f42..b712f15b 100644
--- a/doc/playbook.rst
+++ b/doc/playbook.rst
@@ -151,7 +151,7 @@ The bucket ``rubin-pp-dev`` holds incoming raw images.
The bucket ``rubin-pp-dev-users`` holds:
* ``rubin-pp-dev-users/central_repo/`` contains the central repository described in `DMTN-219`_.
- This repository currently contains HSC and LATISS data, uploaded with ``make_hsc_rc2_export.py``, ``make_latiss_export.py``, and ``make_template_export.py``.
+ This repository currently contains HSC, LATISS, and LSSTComCamSim data, uploaded with ``make_export.py``.
* ``rubin-pp-dev-users/unobserved/`` contains raw files that the upload scripts can draw from to create incoming raws.
@@ -372,7 +372,7 @@ Install the Prompt Processing code, and set it up before use:
The tester scripts send ``next_visit`` events for each detector via Kafka on the ``next-visit-topic`` topic.
They then upload a batch of files representing the snaps of the visit to the ``rubin-pp-dev`` S3 bucket, simulating incoming raw images.
-``python/tester/upload.py``: Command line arguments are the instrument name (currently HSC or LATISS) and the number of groups of images to send.
+``python/tester/upload.py``: Command line arguments are the instrument name (currently HSC, LATISS, and LSSTComCamSim) and the number of groups of images to send.
Sample command line:
@@ -380,6 +380,7 @@ Sample command line:
python upload.py HSC 3
python upload.py LATISS 3
+ python upload.py LSSTComCamSim 1
This script draws images stored in the ``rubin-pp-dev-users`` bucket.
@@ -389,6 +390,7 @@ This script draws images stored in the ``rubin-pp-dev-users`` bucket.
One of the files, the unobserved group `2023-10-11T01:45:47.810`, has modified RA at a location with no templates.
Astrometry is also expected to fail in WCS fitting.
This visit can test pipeline fallback features.
+* For LSSTComCamSim, 2 groups, in total 18 raw fits files and their corresponding json metadata files, are curated.
``python/tester/upload_hsc_rc2.py``: Command line argument is the number of groups of images to send.
@@ -444,6 +446,7 @@ For passwordless login, create a ``~/.pgpass`` file with contents:
usdf-prompt-processing-dev.slac.stanford.edu:5432:lsst-devl:rubin:PASSWORD
usdf-prompt-processing-dev.slac.stanford.edu:5432:ppcentralbutler:latiss_prompt:PASSWORD
usdf-prompt-processing-dev.slac.stanford.edu:5432:ppcentralbutler:hsc_prompt:PASSWORD
+ usdf-prompt-processing-dev.slac.stanford.edu:5432:ppcentralbutler:lsstcomcamsim_prompt:PASSWORD
and execute ``chmod 0600 ~/.pgpass``.
@@ -455,6 +458,8 @@ From ``rubin-devl``, new APDB schemas can be created in the usual way:
-c db_url="postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl"
make_apdb.py -c namespace="pp_apdb_hsc" \
-c db_url="postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl"
+ make_apdb.py -c namespace="pp_apdb_lsstcomcamsim" \
+ -c db_url="postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl"
Resetting the APDB
------------------
@@ -472,3 +477,9 @@ To restore the APDB to a clean state, run the following:
psql -h usdf-prompt-processing-dev.slac.stanford.edu lsst-devl rubin -c 'drop schema "pp_apdb_hsc" cascade;'
make_apdb.py -c namespace="pp_apdb_hsc" \
-c db_url="postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl"
+
+.. code-block:: sh
+
+ psql -h usdf-prompt-processing-dev.slac.stanford.edu lsst-devl rubin -c 'drop schema "pp_apdb_lsstcomcamsim" cascade;'
+ make_apdb.py -c namespace="pp_apdb_lsstcomcamsim" \
+ -c db_url="postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl"
diff --git a/etc/export_comCamSim.yaml b/etc/export_comCamSim.yaml
new file mode 100644
index 00000000..811a085d
--- /dev/null
+++ b/etc/export_comCamSim.yaml
@@ -0,0 +1,16 @@
+datasets:
+- collections: refcats
+ datasetType: uw_stars_20240228
+- collections: skymaps
+ datasetType: skyMap
+# Workaround for DM-43294: use a matching expression rather than
+# a specific collection string for the collection argument, so
+# to avoid MissingCollectionError when the collection does not
+# exist in the target repo.
+- collections: "*LSSTComCamSim/calib"
+
+collections:
+- expression: LSSTComCamSim/calib
+ flattenChains: True
+ includeChains: True
+- expression: LSSTComCamSim/calib/unbounded
diff --git a/etc/export_hsc_rc2.yaml b/etc/export_hsc_rc2.yaml
new file mode 100644
index 00000000..94bf7c0b
--- /dev/null
+++ b/etc/export_hsc_rc2.yaml
@@ -0,0 +1,19 @@
+datasets:
+- collections: HSC/runs/RC2/w_2022_44/DM-36763
+ datasetType: goodSeeingCoadd
+- collections: refcats*
+- collections: skymaps
+ dataId:
+ skymap: hsc_rings_v1
+ datasetType: skyMap
+# Workaround for DM-43294: use a matching expression rather than
+# a specific string "HSC/calib" for the collection argument, so
+# to avoid MissingCollectionError when the collection does not
+# exist in the target repo.
+- collections: "*HSC/calib"
+collections:
+- expression: HSC/calib
+ flattenChains: True
+ includeChains: True
+- expression: HSC/templates
+- expression: HSC/calib/unbounded
diff --git a/etc/export_latiss.yaml b/etc/export_latiss.yaml
new file mode 100644
index 00000000..83a04fdf
--- /dev/null
+++ b/etc/export_latiss.yaml
@@ -0,0 +1,23 @@
+datasets:
+- collections: LATISS/templates
+ datasetType: goodSeeingCoadd
+- collections: refcats*
+ datasetType: atlas_refcat2_20220201
+- collections: refcats*
+ datasetType: gaia_dr3_20230707
+- collections: skymaps
+ datasetType: skyMap
+# Workaround for DM-43294: use a matching expression rather than
+# a specific string "LATISS/calib" for the collection argument, so
+# to avoid MissingCollectionError when the collection does not
+# exist in the target repo.
+- collections: "*LATISS/calib"
+- collections: pretrained_models
+ datasetType: pretrainedModelPackage
+collections:
+- expression: LATISS/calib
+ flattenChains: True
+ includeChains: True
+- expression: LATISS/templates
+- expression: LATISS/calib/unbounded
+- expression: pretrained_models
diff --git a/etc/export_templates.yaml b/etc/export_templates.yaml
new file mode 100644
index 00000000..9e6834d6
--- /dev/null
+++ b/etc/export_templates.yaml
@@ -0,0 +1,3 @@
+datasets:
+- collections: u/hchiang2/w_2023_01/DM-37751/templates
+ datasetType: goodSeeingCoadd
diff --git a/python/activator/middleware_interface.py b/python/activator/middleware_interface.py
index a44a2a48..dd605ce4 100644
--- a/python/activator/middleware_interface.py
+++ b/python/activator/middleware_interface.py
@@ -538,7 +538,7 @@ def _export_refcats(self, center, radius):
# collection, so we have to specify a list here. Replace this
# with another solution ASAP.
possible_refcats = ["gaia", "panstarrs", "gaia_dr2_20200414", "ps1_pv3_3pi_20170110",
- "atlas_refcat2_20220201", "gaia_dr3_20230707"]
+ "atlas_refcat2_20220201", "gaia_dr3_20230707", "uw_stars_20240228"]
_log.debug("Searching for refcats in %s...", shard_ids)
refcats = set(_filter_datasets(
self.central_butler, self.butler,
diff --git a/python/activator/raw.py b/python/activator/raw.py
index 7aa13736..0c38c204 100644
--- a/python/activator/raw.py
+++ b/python/activator/raw.py
@@ -40,7 +40,7 @@
import re
import time
-from lsst.obs.lsst import LsstCam, LsstComCam
+from lsst.obs.lsst import LsstCam, LsstComCam, LsstComCamSim
from lsst.obs.lsst.translators.lsst import LsstBaseTranslator
from lsst.resources import ResourcePath
@@ -65,7 +65,7 @@
################################
# The list of camera names that might be used for LSST
-_LSST_CAMERA_LIST = ("LATISS", "ComCam", "LSSTComCam", "LSSTCam", "TS8", "LSST-TS8")
+_LSST_CAMERA_LIST = ("LATISS", "ComCam", "LSSTComCam", "LSSTComCamSim", "LSSTCam", "TS8", "LSST-TS8")
# Translate from Camera path prefixes to official names.
_TRANSLATE_INSTRUMENT = {
@@ -77,6 +77,7 @@
_CAMERA_ABBREV = {
"LATISS": "AT",
"LSSTComCam": "CC",
+ "LSSTComCamSim": "CC",
"LSSTCam": "MC",
"LSST-TS8": "TS",
}
@@ -90,10 +91,12 @@
_LSSTCAM = LsstCam.getCamera().getNameMap()
_LSSTCOMCAM = LsstComCam.getCamera().getNameMap()
+_LSSTCOMCAMSIM = LsstComCamSim.getCamera().getNameMap()
_DETECTOR_FROM_RS = {
"LATISS": {"R00_S00": 0},
"LSSTComCam": {name: value.getId() for name, value in _LSSTCOMCAM.items()},
+ "LSSTComCamSim": {name: value.getId() for name, value in _LSSTCOMCAMSIM.items()},
"LSST-TS8": {f"R22_S{x}{y}": x * 3 + y for x in range(3) for y in range(3)},
"LSSTCam": {name: value.getId() for name, value in _LSSTCAM.items()},
}
diff --git a/python/tester/upload.py b/python/tester/upload.py
index 05588d9a..7fee9d19 100644
--- a/python/tester/upload.py
+++ b/python/tester/upload.py
@@ -56,12 +56,14 @@
class Instrument:
n_snaps: int
n_detectors: int
+ sal_index: int = 0
INSTRUMENTS = {
- "LSSTCam": Instrument(2, 189 + 8 + 8),
- "LSSTComCam": Instrument(2, 9),
- "LATISS": Instrument(1, 1),
+ "LSSTCam": Instrument(2, 189 + 8 + 8, 1),
+ "LSSTComCam": Instrument(2, 9, 1),
+ "LSSTComCamSim": Instrument(1, 9, 3),
+ "LATISS": Instrument(1, 1, 2),
"DECam": Instrument(1, 62),
"HSC": Instrument(1, 112),
}
@@ -153,6 +155,38 @@ def main():
_log.error(f"No raw files found for {instrument}, aborting.")
+def _add_to_raw_pool(raw_pool, snap_num, visit, blob):
+ """Add a detector-snap to the raw pool for uploading.
+
+ Parameters
+ ----------
+ raw_pool : mapping [`str`, mapping [`int`, mapping [`activator.FannedOutVisit`, `s3.ObjectSummary`]]]
+ Available raws as a mapping from group IDs to a mapping of snap ID.
+ The value of the innermost mapping is the observation metadata for
+ each detector, and a Blob representing the image taken in that
+ detector-snap.
+ visit : `activator.visit.FannedOutVisit`
+ The visit-detector combination to be added with this raw.
+ snap_num : `int`
+ The snap number for this raw.
+ blob : `s3.ObjectSummary`
+ The raw image for this detector-snap.
+ """
+ group = visit.groupId
+ if group in raw_pool:
+ snap_dict = raw_pool[group]
+ if snap_num in snap_dict:
+ _log.debug(f"New detector {visit.detector} added to snap {snap_num} of group {group}.")
+ detector_dict = snap_dict[snap_num]
+ detector_dict[visit] = blob
+ else:
+ _log.debug(f"New snap {snap_num} added to group {group}.")
+ snap_dict[snap_num] = {visit: blob}
+ else:
+ _log.debug(f"New group {group} registered.")
+ raw_pool[group] = {snap_num: {visit: blob}}
+
+
def get_samples_non_lsst(bucket, instrument):
"""Return any predefined raw exposures for a non-LSST instrument.
@@ -222,18 +256,7 @@ def get_samples_non_lsst(bucket, instrument):
private_sndStamp=hsc_metadata[exp_id]["time"]-2*duration,
)
_log.debug(f"File {blob.key} parsed as snap {snap_num} of visit {visit}.")
- if group in result:
- snap_dict = result[group]
- if snap_num in snap_dict:
- _log.debug(f"New detector {visit.detector} added to snap {snap_num} of group {group}.")
- detector_dict = snap_dict[snap_num]
- detector_dict[visit] = blob
- else:
- _log.debug(f"New snap {snap_num} added to group {group}.")
- snap_dict[snap_num] = {visit: blob}
- else:
- _log.debug(f"New group {group} registered.")
- result[group] = {snap_num: {visit: blob}}
+ _add_to_raw_pool(result, snap_num, visit, blob)
return result
@@ -282,14 +305,15 @@ def get_samples_lsst(bucket, instrument):
detector=_DETECTOR_FROM_RS[instrument][m["raft_sensor"]],
groupId=md["GROUPID"],
nimages=INSTRUMENTS[instrument].n_snaps,
- filters=md["FILTBAND"],
+ # ComCam currently sets the FILTBAND header to null.
+ filters=md["FILTBAND"] or md["FILTER"],
coordinateSystem=FannedOutVisit.CoordSys.ICRS,
position=[md["RA"], md["DEC"]],
startTime=astropy.time.Time(md["DATE-BEG"], format="isot", scale="tai").unix_tai,
rotationSystem=FannedOutVisit.RotSys.SKY,
cameraAngle=md["ROTPA"],
survey="SURVEY",
- salIndex=2, # 2 is LATISS
+ salIndex=INSTRUMENTS[instrument].sal_index,
scriptSalIndex=2,
dome=FannedOutVisit.Dome.OPEN,
duration=duration,
@@ -298,7 +322,7 @@ def get_samples_lsst(bucket, instrument):
).unix_tai-2*duration,
)
_log.debug(f"File {blob.key} parsed as visit {visit} and registered as group {md['GROUPID']}.")
- result[md["GROUPID"]] = {0: {visit: blob}}
+ _add_to_raw_pool(result, 0, visit, blob)
return result
diff --git a/python/tester/utils.py b/python/tester/utils.py
index 80f3121e..4f00238f 100644
--- a/python/tester/utils.py
+++ b/python/tester/utils.py
@@ -39,7 +39,7 @@
from lsst.obs.lsst.translators.lsst import LsstBaseTranslator
-from activator.raw import _LSST_CAMERA_LIST
+from activator.raw import _LSST_CAMERA_LIST, _CAMERA_ABBREV
_log = logging.getLogger("lsst." + __name__)
_log.setLevel(logging.INFO)
@@ -128,13 +128,13 @@ def make_exposure_id(instrument, group_id, snap):
The header key-value pairs to accompany with the exposure ID in the
format for ``instrument``'s header.
"""
- match instrument:
- case "HSC":
- return make_hsc_id(group_id, snap)
- case "LATISS":
- return make_latiss_id(group_id, snap)
- case _:
- raise NotImplementedError(f"Exposure ID generation not supported for {instrument}.")
+ if instrument in _LSST_CAMERA_LIST:
+ abbrev = _CAMERA_ABBREV[instrument]
+ return make_lsst_id(group_id, snap, abbrev)
+ elif instrument == "HSC":
+ return make_hsc_id(group_id, snap)
+ else:
+ raise NotImplementedError(f"Exposure ID generation not supported for {instrument}.")
def make_hsc_id(group_id, snap):
@@ -175,8 +175,8 @@ def make_hsc_id(group_id, snap):
return exposure_id, {"EXP-ID": f"HSCE{exposure_id:08d}"}
-def make_latiss_id(group_id, snap):
- """Generate an exposure ID that the Butler can parse as a valid LATISS ID.
+def make_lsst_id(group_id, snap, abbrev):
+ """Generate an exposure ID that the Butler can parse as a valid LSST ID.
Parameters
----------
@@ -184,22 +184,28 @@ def make_latiss_id(group_id, snap):
The mocked group ID.
snap : `int`
A snap ID.
+ abbrev : `str`
+ The abbreviation of the LSST camera.
Returns
-------
exposure_number :
An exposure ID in the format expected by Gen 3 Middleware.
headers : `dict`
- The key-value pairs are in the form to appear in LATISS headers.
+ The key-value pairs are in the form to appear in LSST headers.
"""
day_obs, seq_num = decode_group(group_id)
exposure_num = LsstBaseTranslator.compute_exposure_id(day_obs, seq_num)
- obs_id = f"AT_O_{day_obs}_{seq_num:06d}"
+ # Just use the default ``O`` which may be different from the original
+ # controller in the header.
+ controller = "O"
+ obs_id = f"{abbrev}_{controller}_{day_obs}_{seq_num:06d}"
return exposure_num, {
"DAYOBS": day_obs,
"SEQNUM": seq_num,
"OBSID": obs_id,
"GROUPID": group_id,
+ "CONTRLLR": controller,
}