Skip to content

Commit

Permalink
Conform to LSST naming style.
Browse files Browse the repository at this point in the history
  • Loading branch information
kfindeisen committed Oct 12, 2017
1 parent e53d35d commit c8b686a
Show file tree
Hide file tree
Showing 11 changed files with 252 additions and 252 deletions.
4 changes: 2 additions & 2 deletions bin.src/ap_verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# see <http://www.lsstcorp.org/LegalNotices/>.
#

from lsst.ap.verify import run_ap_verify
from lsst.ap.verify import runApVerify

if __name__ == "__main__":
run_ap_verify()
runApVerify()
71 changes: 36 additions & 35 deletions python/lsst/ap/verify/ap_verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,17 @@

from __future__ import absolute_import, division, print_function

__all__ = ["run_ap_verify"]
__all__ = ["runApVerify"]

import argparse
import os
import re

import lsst.log
from .dataset import Dataset
from .metrics import MetricsParser, check_squash_ready, AutoJob
from .pipeline_driver import ApPipeParser, run_ap_pipe
from .measurements import measure_from_metadata
from .metrics import MetricsParser, checkSquashReady, AutoJob
from .pipeline_driver import ApPipeParser, runApPipe
from .measurements import measureFromMetadata


class _VerifyApParser(argparse.ArgumentParser):
Expand All @@ -52,7 +52,7 @@ def __init__(self):
epilog='',
parents=[ApPipeParser(), MetricsParser()],
add_help=True)
self.add_argument('--dataset', choices=Dataset.get_supported_datasets(), required=True,
self.add_argument('--dataset', choices=Dataset.getSupportedDatasets(), required=True,
help='The source of data to pass through the pipeline.')

output = self.add_mutually_exclusive_group(required=True)
Expand Down Expand Up @@ -83,12 +83,12 @@ class _FormattedType:
invalid argument.
"""
def __init__(self, fmt, msg='"%s" does not have the expected format.'):
full_format = fmt
if not full_format.startswith('^'):
full_format = '^' + full_format
if not full_format.endswith('$'):
full_format += '$'
self._format = re.compile(full_format)
fullFormat = fmt
if not fullFormat.startswith('^'):
fullFormat = '^' + fullFormat
if not fullFormat.endswith('$'):
fullFormat += '$'
self._format = re.compile(fullFormat)
self._message = msg

def __call__(self, value):
Expand All @@ -98,65 +98,66 @@ def __call__(self, value):
raise argparse.ArgumentTypeError(self._message % value)


def _get_output_dir(input_dir, output_arg, rerun_arg):
def _getOutputDir(inputDir, outputArg, rerunArg):
"""Choose an output directory based on program arguments.
Parameters
----------
input_dir: `str`
inputDir: `str`
The root directory of the input dataset.
output_arg: `str`
The directory given using the `--output` command line argument.
rerun_arg: `str`
The subdirectory given using the `--rerun` command line argument. Must
be relative to `input_rerun`.
outputArg: `str`
The directory given using the `--output` command line argument. May
be None.
rerunArg: `str`
The subdirectory given using the `--rerun` command line argument. May
be None, otherwise must be relative to `inputDir`.
Raises
------
`ValueError`:
Neither `output_arg` nor `rerun_arg` is None, or both are.
Neither `outputArg` nor `rerunArg` is None, or both are.
"""
if output_arg and rerun_arg:
if outputArg and rerunArg:
raise ValueError('Cannot provide both --output and --rerun.')
if not output_arg and not rerun_arg:
if not outputArg and not rerunArg:
raise ValueError('Must provide either --output or --rerun.')
if output_arg:
return output_arg
if outputArg:
return outputArg
else:
return os.path.join(input_dir, "rerun", rerun_arg)
return os.path.join(inputDir, "rerun", rerunArg)


def _measure_final_properties(metadata, metrics_job):
def _measureFinalProperties(metadata, metricsJob):
"""Measure any metrics that apply to the final result of the AP pipeline,
rather than to a particular processing stage.
Parameters
----------
metadata: `lsst.daf.base.PropertySet`
The metadata produced by the AP pipeline.
metrics_job: `verify.Job`
metricsJob: `verify.Job`
The Job object to which to add any metric measurements made.
"""
measurements = measure_from_metadata(metadata)
measurements = measureFromMetadata(metadata)
for measurement in measurements:
metrics_job.measurements.insert(measurement)
metricsJob.measurements.insert(measurement)


def run_ap_verify():
def runApVerify():
lsst.log.configure()
log = lsst.log.Log.getLogger('ap.verify.ap_verify.main')
# TODO: what is LSST's policy on exceptions escaping into main()?
args = _VerifyApParser().parse_args()
check_squash_ready(args)
checkSquashReady(args)
log.debug('Command-line arguments: %s', args)

test_data = Dataset(args.dataset)
testData = Dataset(args.dataset)
log.info('Dataset %s set up.', args.dataset)
output = _get_output_dir(test_data.dataset_root, args.output, args.rerun)
test_data.make_output_repo(output)
output = _getOutputDir(testData.datasetRoot, args.output, args.rerun)
testData.makeOutputRepo(output)
log.info('Output repo at %s created.', output)

with AutoJob(args) as job:
log.info('Running pipeline...')
metadata = run_ap_pipe(test_data, output, args, job)
_measure_final_properties(metadata, job)
metadata = runApPipe(testData, output, args, job)
_measureFinalProperties(metadata, job)
16 changes: 8 additions & 8 deletions python/lsst/ap/verify/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,30 +37,30 @@ class Config(object):

def __init__(self):
path = Policy.defaultPolicyFile('ap_verify', 'dataset_config.yaml', 'config')
self._all_info = Policy(path)
self._allInfo = Policy(path)
self._validate()

def _validate(self):
"""Tests that the loaded configuration is correct, and raises
RuntimeError otherwise.
"""
try:
dataset_map = self._all_info['datasets']
if not isinstance(dataset_map, Policy):
datasetMap = self._allInfo['datasets']
if not isinstance(datasetMap, Policy):
raise TypeError('`datasets` is not a dictionary')
except (KeyError, TypeError) as e:
raise_from(RuntimeError('Invalid config file.'), e)

try:
measurement_map = self._all_info['measurements']
if not isinstance(measurement_map, Policy):
measurementMap = self._allInfo['measurements']
if not isinstance(measurementMap, Policy):
raise TypeError('`measurements` is not a dictionary')
timing_map = measurement_map['timing']
if not isinstance(timing_map, Policy):
timingMap = measurementMap['timing']
if not isinstance(timingMap, Policy):
raise TypeError('`measurements.timing` is not a dictionary')
except (KeyError, TypeError) as e:
raise_from(RuntimeError('Invalid config file.'), e)


# Hack, but I don't know how else to make Config.instance act like a dictionary of config options
Config.instance = Config()._all_info
Config.instance = Config()._allInfo
102 changes: 51 additions & 51 deletions python/lsst/ap/verify/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,30 +72,30 @@ class Dataset(object):
Parameters
----------
dataset_id : `str`
datasetId: `str`
A tag identifying the dataset.
Raises
------
`RuntimeError`:
`dataset_id` exists, but is not correctly organized or incomplete
`datasetId` exists, but is not correctly organized or incomplete
`ValueError`:
`dataset_id` is not a recognized dataset. No side effects if this
`datasetId` is not a recognized dataset. No side effects if this
exception is raised.
"""

def __init__(self, dataset_id):
def __init__(self, datasetId):
try:
dataset_package = self._getDatasetInfo()[dataset_id]
datasetPackage = self._getDatasetInfo()[datasetId]
except KeyError:
raise ValueError('Unsupported dataset: ' + dataset_id)
raise ValueError('Unsupported dataset: ' + datasetId)

self._data_root_dir = getPackageDir(dataset_package)
self._validate_package()
self._dataRootDir = getPackageDir(datasetPackage)
self._validatePackage()

self._init_package(dataset_package)
self._initPackage(datasetPackage)

def _init_package(self, name):
def _initPackage(self, name):
"""Load the package backing this dataset.
Parameters
Expand All @@ -106,7 +106,7 @@ def _init_package(self, name):
Eups().setup(name)

@staticmethod
def get_supported_datasets():
def getSupportedDatasets():
"""The dataset IDs that can be passed to this class's constructor.
Returns
Expand Down Expand Up @@ -137,119 +137,119 @@ def _getDatasetInfo():
`RuntimeError`:
the config file exists, but does not contain the expected data
"""
if not hasattr(Dataset, '_dataset_config'):
Dataset._dataset_config = Config.instance['datasets']
if not hasattr(Dataset, '_datasetConfig'):
Dataset._datasetConfig = Config.instance['datasets']

return Dataset._dataset_config
return Dataset._datasetConfig

@property
def dataset_root(self):
def datasetRoot(self):
"""The parent directory containing everything related to the dataset.
Returns
-------
a string giving the location of the base directory
"""
return self._data_root_dir
return self._dataRootDir

@property
def data_location(self):
def rawLocation(self):
"""The directory containing the "raw" input data.
Returns
-------
a string giving the location of the top-level directory for telescope output files
"""
return os.path.join(self.dataset_root, 'raw')
return os.path.join(self.datasetRoot, 'raw')

@property
def calib_location(self):
def calibLocation(self):
"""The directory containing the calibration data.
Returns
-------
a string giving the location of the top-level directory for master calibration files
"""
return os.path.join(self.dataset_root, 'calib')
return os.path.join(self.datasetRoot, 'calib')

@property
def defect_location(self):
def defectLocation(self):
"""The directory containing defect files.
Returns
-------
a string giving the location of the top-level directory for defect files
"""
return self.calib_location
return self.calibLocation

@property
def refcats_location(self):
def refcatsLocation(self):
"""The directory containing external reference catalogs.
Returns
-------
a string giving the location of the top-level directory for astrometric and photometric catalogs
"""
return os.path.join(self.dataset_root, 'refcats')
return os.path.join(self.datasetRoot, 'refcats')

@property
def template_location(self):
def templateLocation(self):
"""The directory containing the image subtraction templates.
Returns
-------
a string giving the location of the top-level directory for precomputed templates
"""
return os.path.join(self.dataset_root, 'templates')
return os.path.join(self.datasetRoot, 'templates')

@property
def _stub_input_repo(self):
def _stubInputRepo(self):
"""The directory containing the data set's input stub.
Returns
-------
a string giving the location of the stub input repo
"""
return os.path.join(self.dataset_root, 'repo')
return os.path.join(self.datasetRoot, 'repo')

def _validate_package(self):
def _validatePackage(self):
"""Confirm that the dataset directory satisfies all assumptions.
Requires that self._data_root_dir has been initialized.
Requires that self._dataRootDir has been initialized.
Raises
------
`RuntimeError`:
if any problems are found with the package
"""
if not os.path.exists(self.dataset_root):
raise RuntimeError('Could not find dataset at ' + self.dataset_root)
if not os.path.exists(self.data_location):
raise RuntimeError('Dataset at ' + self.dataset_root + 'is missing data directory')
if not os.path.exists(self.calib_location):
raise RuntimeError('Dataset at ' + self.dataset_root + 'is missing calibration directory')
if not os.path.exists(self.defect_location):
raise RuntimeError('Dataset at ' + self.dataset_root + 'is missing defect directory')
# Template and refcat directories might not be subdirectories of self.dataset_root
if not os.path.exists(self.template_location):
raise RuntimeError('Dataset is missing template directory at ' + self.template_location)
if not os.path.exists(self.refcats_location):
raise RuntimeError('Dataset is missing reference catalog directory at ' + self.refcats_location)
if not os.path.exists(self._stub_input_repo):
raise RuntimeError('Dataset at ' + self.dataset_root + 'is missing stub repo')
if not os.path.exists(os.path.join(self._stub_input_repo, '_mapper')):
raise RuntimeError('Stub repo at ' + self._stub_input_repo + 'is missing mapper file')

def make_output_repo(self, output_dir):
if not os.path.exists(self.datasetRoot):
raise RuntimeError('Could not find dataset at ' + self.datasetRoot)
if not os.path.exists(self.rawLocation):
raise RuntimeError('Dataset at ' + self.datasetRoot + 'is missing data directory')
if not os.path.exists(self.calibLocation):
raise RuntimeError('Dataset at ' + self.datasetRoot + 'is missing calibration directory')
if not os.path.exists(self.defectLocation):
raise RuntimeError('Dataset at ' + self.datasetRoot + 'is missing defect directory')
# Template and refcat directories might not be subdirectories of self.datasetRoot
if not os.path.exists(self.templateLocation):
raise RuntimeError('Dataset is missing template directory at ' + self.templateLocation)
if not os.path.exists(self.refcatsLocation):
raise RuntimeError('Dataset is missing reference catalog directory at ' + self.refcatsLocation)
if not os.path.exists(self._stubInputRepo):
raise RuntimeError('Dataset at ' + self.datasetRoot + 'is missing stub repo')
if not os.path.exists(os.path.join(self._stubInputRepo, '_mapper')):
raise RuntimeError('Stub repo at ' + self._stubInputRepo + 'is missing mapper file')

def makeOutputRepo(self, outputDir):
"""Set up a directory as an output repository compatible with this dataset.
If the directory already exists, any files required by the dataset will
be added if absent; otherwise the directory will remain unchanged.
Parameters
----------
output_dir: `str`
outputDir: `str`
The directory where the output repository will be created.
"""
# shutil.copytree has wrong behavior for existing destinations, do it by hand
_nicecopy(self._stub_input_repo, output_dir)
_nicecopy(self._stubInputRepo, outputDir)

0 comments on commit c8b686a

Please sign in to comment.