Skip to content

Commit

Permalink
PEP8 and Flake8 fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
timj committed Aug 8, 2016
1 parent b2c5fdd commit 3cb2a7c
Show file tree
Hide file tree
Showing 40 changed files with 496 additions and 448 deletions.
20 changes: 9 additions & 11 deletions python/lsst/daf/persistence/access.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,4 @@
from future import standard_library
standard_library.install_aliases()
from builtins import object
#!/usr/bin/env python

#
# LSST Data Management System
# Copyright 2016 LSST Corporation.
Expand All @@ -24,27 +20,29 @@
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#

import pickle
import collections
import os
from future import standard_library
standard_library.install_aliases()
from builtins import object

from lsst.daf.persistence import Policy

import yaml


class AccessCfg(Policy, yaml.YAMLObject):
yaml_tag = u"!AccessCfg"

def __init__(self, cls, storageCfg):
super(AccessCfg, self).__init__({'storageCfg':storageCfg, 'cls':cls})
super(AccessCfg, self).__init__({'storageCfg': storageCfg, 'cls': cls})


class Access(object):
"""Implements an butler framework interface for Transport, Storage, and Registry
.. warning::
Access is 'wet paint' and very likely to change. Use of it in production code other than via the 'old butler'
API is strongly discouraged.
Access is 'wet paint' and very likely to change. Use of it in production
code other than via the 'old butler' API is strongly discouraged.
"""

Expand Down
54 changes: 23 additions & 31 deletions python/lsst/daf/persistence/butler.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,31 +33,31 @@

import collections
import copy
import pickle
import inspect
import itertools
import os

import yaml

import lsst.pex.logging as pexLog
import lsst.pex.policy as pexPolicy
from . import StorageList, LogicalLocation, ReadProxy, ButlerSubset, ButlerDataRef, Persistence, Repository, \
Access, Storage, Policy, NoResults, MultipleResults, Repository, DataId, RepositoryCfg, \
RepositoryArgs, listify, setify, sequencify, doImport
from . import LogicalLocation, ReadProxy, ButlerSubset, ButlerDataRef, Persistence, \
Storage, Policy, NoResults, Repository, DataId, RepositoryCfg, \
RepositoryArgs, listify, setify, sequencify, doImport


class ButlerCfg(Policy, yaml.YAMLObject):
"""Represents a Butler configuration.
.. warning::
cfg is 'wet paint' and very likely to change. Use of it in production code other than via the 'old butler'
API is strongly discouraged.
cfg is 'wet paint' and very likely to change. Use of it in production
code other than via the 'old butler' API is strongly discouraged.
"""
yaml_tag = u"!ButlerCfg"

def __init__(self, cls, repoCfg):
super(ButlerCfg, self).__init__({'repoCfg':repoCfg, 'cls':cls})
super(ButlerCfg, self).__init__({'repoCfg': repoCfg, 'cls': cls})


class RepoData(object):
"""Container object for repository data used by Butler"""
Expand Down Expand Up @@ -89,6 +89,7 @@ def __repr__(self):

class RepoDataContainer(object):
"""Container object for RepoData instances owned by a Butler instance."""

def __init__(self):
self.byRepoRoot = collections.OrderedDict() # {repo root, RepoData}
self.byCfgRoot = {} # {repo cfgRoot, RepoData}
Expand Down Expand Up @@ -225,8 +226,8 @@ def __init__(self, root=None, mapper=None, inputs=None, outputs=None, **mapperAr
:return:
"""
self._initArgs = {'root':root, 'mapper':mapper, 'inputs':inputs, 'outputs':outputs,
'mapperArgs':mapperArgs}
self._initArgs = {'root': root, 'mapper': mapper, 'inputs': inputs, 'outputs': outputs,
'mapperArgs': mapperArgs}

isLegacyRepository = inputs is None and outputs is None

Expand Down Expand Up @@ -358,8 +359,8 @@ def _addRepo(self, args, inout, defaultMapper=None, butlerIOParents=None, tags=N
"Existing output repository parents do not match butler's inputs.")
if not cfg.matchesArgs(args):
raise RuntimeError(
"Persisted RepositoryCfg and passed-in RepositoryArgs have conflicting parameters:\n" +
"\t%s\n\t%s", (cfg, args))
"Persisted RepositoryCfg and passed-in RepositoryArgs have"
" conflicting parameters:\n" + "\t%s\n\t%s", (cfg, args))
if args.mapperArgs is not None:
if cfg.mapperArgs is None:
cfg.mapperArgs = args.mapperArgs
Expand All @@ -386,7 +387,6 @@ def _addRepo(self, args, inout, defaultMapper=None, butlerIOParents=None, tags=N
args = RepositoryArgs(cfgRoot=parent, mode='r')
self._addRepo(args=args, inout='in', tags=tags)


def __repr__(self):
return 'Butler(datasetTypeAliasDict=%s, repos=%s, persistence=%s)' % (
self.datasetTypeAliasDict, self._repos, self.persistence)
Expand Down Expand Up @@ -424,7 +424,6 @@ def getMapperClass(root):
moved entirely into Butler Access, or made more dynamic, and the API will very likely change."""
return Storage.getMapperClass(root)


def defineAlias(self, alias, datasetType):
"""Register an alias that will be substituted in datasetTypes.
Expand All @@ -434,27 +433,26 @@ def defineAlias(self, alias, datasetType):
It may not contain '@'
"""

#verify formatting of alias:
# verify formatting of alias:
# it can have '@' as the first character (if not it's okay, we will add it) or not at all.
atLoc = alias.rfind('@')
if atLoc == -1:
alias = "@" + str(alias)
elif atLoc > 0:
raise RuntimeError("Badly formatted alias string: %s" %(alias,))
raise RuntimeError("Badly formatted alias string: %s" % (alias,))

# verify that datasetType does not contain '@'
if datasetType.count('@') != 0:
raise RuntimeError("Badly formatted type string: %s" %(datasetType))
raise RuntimeError("Badly formatted type string: %s" % (datasetType))

# verify that the alias keyword does not start with another alias keyword,
# and vice versa
for key in self.datasetTypeAliasDict:
if key.startswith(alias) or alias.startswith(key):
raise RuntimeError("Alias: %s overlaps with existing alias: %s" %(alias, key))
raise RuntimeError("Alias: %s overlaps with existing alias: %s" % (alias, key))

self.datasetTypeAliasDict[alias] = datasetType


def getKeys(self, datasetType=None, level=None, tag=None):
"""Returns a dict. The dict keys are the valid data id keys at or above the given level of hierarchy
for the dataset type or the entire collection if None. The dict values are the basic Python types
Expand Down Expand Up @@ -483,7 +481,6 @@ def getKeys(self, datasetType=None, level=None, tag=None):
break
return keys


def queryMetadata(self, datasetType, format=None, dataId={}, **rest):
"""Returns the valid values for one or more keys when given a partial
input collection data id.
Expand Down Expand Up @@ -528,7 +525,6 @@ def queryMetadata(self, datasetType, format=None, dataId={}, **rest):

return tuples


def datasetExists(self, datasetType, dataId={}, **rest):
"""Determines if a dataset file exists.
Expand All @@ -555,7 +551,7 @@ def datasetExists(self, datasetType, dataId={}, **rest):
additionalData = location.getAdditionalData()
storageName = location.getStorageName()
if storageName in ('BoostStorage', 'FitsStorage', 'PafStorage',
'PickleStorage', 'ConfigStorage', 'FitsCatalogStorage'):
'PickleStorage', 'ConfigStorage', 'FitsCatalogStorage'):
locations = location.getLocations()
for locationString in locations:
logLoc = LogicalLocation(locationString, additionalData).locString()
Expand All @@ -568,11 +564,10 @@ def datasetExists(self, datasetType, dataId={}, **rest):
return False
return True
self.log.log(pexLog.Log.WARN,
"datasetExists() for non-file storage %s, dataset type=%s, keys=%s" %
(storageName, datasetType, str(dataId)))
"datasetExists() for non-file storage %s, dataset type=%s, keys=%s" %
(storageName, datasetType, str(dataId)))
return True


def get(self, datasetType, dataId=None, immediate=False, **rest):
"""Retrieves a dataset given an input collection data id.
Expand Down Expand Up @@ -620,7 +615,6 @@ def get(self, datasetType, dataId=None, immediate=False, **rest):
return callback()
return ReadProxy(callback)


def put(self, obj, datasetType, dataId={}, doBackup=False, **rest):
"""Persists a dataset given an output collection data id.
Expand Down Expand Up @@ -674,7 +668,6 @@ def subset(self, datasetType, level=None, dataId={}, **rest):
dataId.update(**rest)
return ButlerSubset(self, datasetType, level, dataId)


def dataRef(self, datasetType, level=None, dataId={}, **rest):
"""Returns a single ButlerDataRef.
Expand All @@ -694,12 +687,11 @@ def dataRef(self, datasetType, level=None, dataId={}, **rest):
subset = self.subset(datasetType, level, dataId, **rest)
if len(subset) != 1:
raise RuntimeError("No unique dataset for: Dataset type:%s Level:%s Data ID:%s Keywords:%s" %
(str(datasetType), str(level), str(dataId), str(rest)))
(str(datasetType), str(level), str(dataId), str(rest)))
return ButlerDataRef(subset, subset.cache[0])


def _read(self, location):
trace = pexLog.BlockTimingLog(self.log, "read", pexLog.BlockTimingLog.INSTRUM+1)
trace = pexLog.BlockTimingLog(self.log, "read", pexLog.BlockTimingLog.INSTRUM + 1)
results = location.repository.read(location)
if len(results) == 1:
results = results[0]
Expand All @@ -724,7 +716,7 @@ def _resolveDatasetTypeAlias(self, datasetType):

# If an alias specifier can not be resolved then throw.
if datasetType.find('@') != -1:
raise RuntimeError("Unresolvable alias specifier in datasetType: %s" %(datasetType))
raise RuntimeError("Unresolvable alias specifier in datasetType: %s" % (datasetType))

return datasetType

Expand Down
5 changes: 5 additions & 0 deletions python/lsst/daf/persistence/butlerExceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,20 @@
# see <http://www.lsstcorp.org/LegalNotices/>.
#


class NoMapperException(Exception):
pass


class NoResults(RuntimeError):

def __init__(self, message, datasetType, dataId):
message += ' datasetType:' + datasetType + ' dataId:' + str(dataId)
super(NoResults, self).__init__(message)


class MultipleResults(RuntimeError):

def __init__(self, message, datasetType, dataId, locations):
message += ' datasetType:' + datasetType + ' dataId:' + str(dataId) + ' locations:'
for location in locations:
Expand Down
3 changes: 2 additions & 1 deletion python/lsst/daf/persistence/butlerFactory.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
"""This module defines the ButlerFactory class."""
from builtins import object

from lsst.daf.persistence import Butler, RepositoryArgs, PosixStorage
from lsst.daf.persistence import Butler


class ButlerFactory(object):
"""ButlerFactory creates data Butlers containing data mappers. Use of it
Expand Down
16 changes: 10 additions & 6 deletions python/lsst/daf/persistence/butlerLocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,10 @@ class ButlerLocation(yaml.YAMLObject):

def __repr__(self):
return \
'ButlerLocation(pythonType=%r, cppType=%r, storageName=%r, locationList=%r, additionalData=%r, mapper=%r)' % \
(self.pythonType, self.cppType, self.storageName, self.locationList, self.additionalData, self.mapper)
'ButlerLocation(pythonType=%r, cppType=%r, storageName=%r, locationList=%r,' \
' additionalData=%r, mapper=%r)' % \
(self.pythonType, self.cppType, self.storageName, self.locationList,
self.additionalData, self.mapper)

def __init__(self, pythonType, cppType, storageName, locationList, dataId, mapper, storage=None):
self.pythonType = pythonType
Expand All @@ -58,11 +60,11 @@ def __init__(self, pythonType, cppType, storageName, locationList, dataId, mappe
self.additionalData = dafBase.PropertySet()
for k, v in dataId.items():
self.additionalData.set(k, v)
self.dataId=dataId
self.dataId = dataId

def __str__(self):
s = "%s at %s(%s)" % (self.pythonType, self.storageName,
", ".join(self.locationList))
", ".join(self.locationList))
return s

@staticmethod
Expand All @@ -73,8 +75,10 @@ def to_yaml(dumper, obj):
:return:
"""
return dumper.represent_mapping(ButlerLocation.yaml_tag,
{'pythonType':obj.pythonType, 'cppType':obj.cppType, 'storageName':obj.storageName,
'locationList':obj.locationList, 'mapper':obj.mapper, 'storage':obj.storage, 'dataId':obj.dataId})
{'pythonType': obj.pythonType, 'cppType': obj.cppType,
'storageName': obj.storageName,
'locationList': obj.locationList, 'mapper': obj.mapper,
'storage': obj.storage, 'dataId': obj.dataId})

@staticmethod
def from_yaml(loader, node):
Expand Down
27 changes: 15 additions & 12 deletions python/lsst/daf/persistence/butlerSubset.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

from . import DataId


class ButlerSubset(object):

"""ButlerSubset is a container for ButlerDataRefs. It represents a
Expand Down Expand Up @@ -124,6 +125,7 @@ def __iter__(self):

return ButlerSubsetIterator(self)


class ButlerSubsetIterator(object):
"""
An iterator over the ButlerDataRefs in a ButlerSubset.
Expand All @@ -139,6 +141,7 @@ def __iter__(self):
def __next__(self):
return ButlerDataRef(self.butlerSubset, next(self.iter))


class ButlerDataRef(object):
"""
A ButlerDataRef is a reference to a potential dataset or group of datasets
Expand Down Expand Up @@ -179,7 +182,7 @@ def __init__(self, butlerSubset, dataId):
self.dataId = dataId

def __repr__(self):
return 'ButlerDataRef(butlerSubset=%s, dataId=%s)' %(self.butlerSubset, self.dataId)
return 'ButlerDataRef(butlerSubset=%s, dataId=%s)' % (self.butlerSubset, self.dataId)

def get(self, datasetType=None, **rest):
"""
Expand Down Expand Up @@ -220,15 +223,15 @@ def subLevels(self):
@returns (iterable) list of strings with level keys."""

return set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
tag=self.butlerSubset.dataId.tag).keys()
) - set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
self.butlerSubset.level,
tag=self.butlerSubset.dataId.tag).keys()
)
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
tag=self.butlerSubset.dataId.tag).keys()
) - set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
self.butlerSubset.level,
tag=self.butlerSubset.dataId.tag).keys()
)

def subItems(self, level=None):
"""
Expand Down Expand Up @@ -256,7 +259,7 @@ def subItems(self, level=None):
if level is None:
return ()
return self.butlerSubset.butler.subset(self.butlerSubset.datasetType,
level, self.dataId)
level, self.dataId)

def datasetExists(self, datasetType=None, **rest):
"""
Expand All @@ -270,7 +273,7 @@ def datasetExists(self, datasetType=None, **rest):
if datasetType is None:
datasetType = self.butlerSubset.datasetType
return self.butlerSubset.butler.datasetExists(
datasetType, self.dataId, **rest)
datasetType, self.dataId, **rest)

def getButler(self):
"""
Expand Down

0 comments on commit 3cb2a7c

Please sign in to comment.