Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-7069: Port to Python 3 #19

Merged
merged 18 commits into from
Aug 9, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ doc/*.inc
doc/doxygen.conf
doc/xml
tests/.tests
tests/.cache
version.py
foo1.pickle
foo3.pickle
Expand Down
1 change: 0 additions & 1 deletion python/lsst/daf/persistence/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,3 @@
from .butler import *
from .butlerFactory import *
from .version import *

19 changes: 10 additions & 9 deletions python/lsst/daf/persistence/access.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python

#
# LSST Data Management System
# Copyright 2016 LSST Corporation.
Expand All @@ -21,27 +20,29 @@
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#

import cPickle
import collections
import os
from future import standard_library
standard_library.install_aliases()
from builtins import object

from lsst.daf.persistence import Policy

import yaml


class AccessCfg(Policy, yaml.YAMLObject):
yaml_tag = u"!AccessCfg"

def __init__(self, cls, storageCfg):
super(AccessCfg, self).__init__({'storageCfg':storageCfg, 'cls':cls})
super(AccessCfg, self).__init__({'storageCfg': storageCfg, 'cls': cls})


class Access:
class Access(object):
"""Implements an butler framework interface for Transport, Storage, and Registry

.. warning::

Access is 'wet paint' and very likely to change. Use of it in production code other than via the 'old butler'
API is strongly discouraged.
Access is 'wet paint' and very likely to change. Use of it in production
code other than via the 'old butler' API is strongly discouraged.

"""

Expand Down
148 changes: 72 additions & 76 deletions python/lsst/daf/persistence/butler.py

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions python/lsst/daf/persistence/butlerExceptions.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from builtins import str
#!/usr/bin/env python

#
Expand All @@ -22,15 +23,20 @@
# see <http://www.lsstcorp.org/LegalNotices/>.
#


class NoMapperException(Exception):
pass


class NoResults(RuntimeError):

def __init__(self, message, datasetType, dataId):
message += ' datasetType:' + datasetType + ' dataId:' + str(dataId)
super(NoResults, self).__init__(message)


class MultipleResults(RuntimeError):

def __init__(self, message, datasetType, dataId, locations):
message += ' datasetType:' + datasetType + ' dataId:' + str(dataId) + ' locations:'
for location in locations:
Expand Down
4 changes: 3 additions & 1 deletion python/lsst/daf/persistence/butlerFactory.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@
# -*- python -*-

"""This module defines the ButlerFactory class."""
from builtins import object

from lsst.daf.persistence import Butler

from lsst.daf.persistence import Butler, RepositoryArgs, PosixStorage

class ButlerFactory(object):
"""ButlerFactory creates data Butlers containing data mappers. Use of it
Expand Down
24 changes: 13 additions & 11 deletions python/lsst/daf/persistence/butlerLocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

import yaml

from . import iterify


class ButlerLocation(yaml.YAMLObject):
Expand All @@ -44,27 +45,26 @@ class ButlerLocation(yaml.YAMLObject):

def __repr__(self):
return \
'ButlerLocation(pythonType=%r, cppType=%r, storageName=%r, locationList=%r, additionalData=%r, mapper=%r)' % \
(self.pythonType, self.cppType, self.storageName, self.locationList, self.additionalData, self.mapper)
'ButlerLocation(pythonType=%r, cppType=%r, storageName=%r, locationList=%r,' \
' additionalData=%r, mapper=%r)' % \
(self.pythonType, self.cppType, self.storageName, self.locationList,
self.additionalData, self.mapper)

def __init__(self, pythonType, cppType, storageName, locationList, dataId, mapper, storage=None):
self.pythonType = pythonType
self.cppType = cppType
self.storageName = storageName
self.mapper = mapper
self.storage = storage
if hasattr(locationList, '__iter__'):
self.locationList = locationList
else:
self.locationList = [locationList]
self.locationList = iterify(locationList)
self.additionalData = dafBase.PropertySet()
for k, v in dataId.iteritems():
for k, v in dataId.items():
self.additionalData.set(k, v)
self.dataId=dataId
self.dataId = dataId

def __str__(self):
s = "%s at %s(%s)" % (self.pythonType, self.storageName,
", ".join(self.locationList))
", ".join(self.locationList))
return s

@staticmethod
Expand All @@ -75,8 +75,10 @@ def to_yaml(dumper, obj):
:return:
"""
return dumper.represent_mapping(ButlerLocation.yaml_tag,
{'pythonType':obj.pythonType, 'cppType':obj.cppType, 'storageName':obj.storageName,
'locationList':obj.locationList, 'mapper':obj.mapper, 'storage':obj.storage, 'dataId':obj.dataId})
{'pythonType': obj.pythonType, 'cppType': obj.cppType,
'storageName': obj.storageName,
'locationList': obj.locationList, 'mapper': obj.mapper,
'storage': obj.storage, 'dataId': obj.dataId})

@staticmethod
def from_yaml(loader, node):
Expand Down
40 changes: 22 additions & 18 deletions python/lsst/daf/persistence/butlerSubset.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@

"""This module defines the ButlerSubset class and the ButlerDataRefs contained
within it as well as an iterator over the subset."""

from __future__ import with_statement
from builtins import next
from builtins import range
from builtins import object

from . import DataId


class ButlerSubset(object):

"""ButlerSubset is a container for ButlerDataRefs. It represents a
Expand Down Expand Up @@ -79,7 +81,7 @@ def __init__(self, butler, datasetType, level, dataId):
keys = self.butler.getKeys(datasetType, level, tag=dataId.tag)
if keys is None:
return
fmt = list(keys.iterkeys())
fmt = list(keys.keys())

# Don't query if we already have a complete dataId
completeId = True
Expand All @@ -97,7 +99,7 @@ def __init__(self, butler, datasetType, level, dataId):
if len(fmt) == 1:
tempId[fmt[0]] = idTuple
else:
for i in xrange(len(fmt)):
for i in range(len(fmt)):
tempId[fmt[i]] = idTuple[i]
self.cache.append(tempId)

Expand All @@ -123,6 +125,7 @@ def __iter__(self):

return ButlerSubsetIterator(self)


class ButlerSubsetIterator(object):
"""
An iterator over the ButlerDataRefs in a ButlerSubset.
Expand All @@ -135,8 +138,9 @@ def __init__(self, butlerSubset):
def __iter__(self):
return self

def next(self):
return ButlerDataRef(self.butlerSubset, self.iter.next())
def __next__(self):
return ButlerDataRef(self.butlerSubset, next(self.iter))


class ButlerDataRef(object):
"""
Expand Down Expand Up @@ -178,7 +182,7 @@ def __init__(self, butlerSubset, dataId):
self.dataId = dataId

def __repr__(self):
return 'ButlerDataRef(butlerSubset=%s, dataId=%s)' %(self.butlerSubset, self.dataId)
return 'ButlerDataRef(butlerSubset=%s, dataId=%s)' % (self.butlerSubset, self.dataId)

def get(self, datasetType=None, **rest):
"""
Expand Down Expand Up @@ -219,15 +223,15 @@ def subLevels(self):
@returns (iterable) list of strings with level keys."""

return set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
tag=self.butlerSubset.dataId.tag).keys()
) - set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
self.butlerSubset.level,
tag=self.butlerSubset.dataId.tag).keys()
)
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
tag=self.butlerSubset.dataId.tag).keys()
) - set(
self.butlerSubset.butler.getKeys(
self.butlerSubset.datasetType,
self.butlerSubset.level,
tag=self.butlerSubset.dataId.tag).keys()
)

def subItems(self, level=None):
"""
Expand Down Expand Up @@ -255,7 +259,7 @@ def subItems(self, level=None):
if level is None:
return ()
return self.butlerSubset.butler.subset(self.butlerSubset.datasetType,
level, self.dataId)
level, self.dataId)

def datasetExists(self, datasetType=None, **rest):
"""
Expand All @@ -269,7 +273,7 @@ def datasetExists(self, datasetType=None, **rest):
if datasetType is None:
datasetType = self.butlerSubset.datasetType
return self.butlerSubset.butler.datasetExists(
datasetType, self.dataId, **rest)
datasetType, self.dataId, **rest)

def getButler(self):
"""
Expand Down
25 changes: 17 additions & 8 deletions python/lsst/daf/persistence/dataId.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,21 @@
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from past.builtins import basestring

# On Python 3 collections.UserDict is iterable but on Python 2
# we have to use UserDict.IterableUserDict. Since collections.UserDict
# exists on Python 2 we try the Python 2 variant first.
try:
from UserDict import IterableUserDict as UserDict
except ImportError:
from collections import UserDict

import copy
import UserDict

class DataId(UserDict.IterableUserDict):
"""DataId is used to pass scientifically meaningful key-value pairs. It may be tagged as applicable only

class DataId(UserDict):
"""DataId is used to pass scientifically meaningful key-value pairs. It may be tagged as applicable only
to repositories that are tagged with the same value"""

def __init__(self, initialdata=None, tag=None, **kwargs):
Expand All @@ -37,13 +46,13 @@ def __init__(self, initialdata=None, tag=None, **kwargs):
initialdata : dict or dataId
A dict of initial data for the DataId
tag : any type, or a container of any type
A value or container of values used to restrict the DataId to one or more repositories that
share that tag value. It will be stored in a set for comparison with the set of tags assigned to
A value or container of values used to restrict the DataId to one or more repositories that
share that tag value. It will be stored in a set for comparison with the set of tags assigned to
repositories.
kwargs : any values
key-value pairs to be used as part of the DataId's data.
"""
UserDict.UserDict.__init__(self, initialdata)
UserDict.__init__(self, initialdata)
try:
self.tag = copy.deepcopy(initialdata.tag)
except AttributeError:
Expand All @@ -57,8 +66,8 @@ def __init__(self, initialdata=None, tag=None, **kwargs):
self.tag.update(tag)
except TypeError:
self.tag.update([tag])

self.data.update(kwargs)

def __repr__(self):
return "DataId(initialdata=%s, tag=%s)" %(self.data.__repr__(), self.tag)
return "DataId(initialdata=%s, tag=%s)" % (self.data.__repr__(), self.tag)