From 238ff91be2c6c20a886f76e58de46c436b079348 Mon Sep 17 00:00:00 2001 From: andamian Date: Tue, 24 Oct 2017 12:53:49 -0700 Subject: [PATCH] Flake (#96) * Enabled flake8 on vos and fixed the codestyle errors --- .travis.yml | 9 +- vofs/dev_requirements.txt | 3 +- vofs/setup.py | 2 +- vofs/vofs/CacheMetaData.py | 27 +- vofs/vofs/CadcCache.py | 307 ++++---- vofs/vofs/SharedLock.py | 5 +- vofs/vofs/__init__.py | 2 +- vofs/vofs/mountvofs.py | 109 +-- vofs/vofs/tests/__init__.py | 4 - vofs/vofs/tests/setup_package.py | 3 - vofs/vofs/tests/test_cache_metadata.py | 24 +- vofs/vofs/tests/test_cadc_cache.py | 208 +++--- vofs/vofs/tests/test_vofs.py | 182 ++--- vofs/vofs/utils.py | 6 +- vofs/vofs/vofs.py | 173 +++-- vos/dev_requirements.txt | 1 + vos/setup.py | 2 +- vos/vos/__init__.py | 22 +- vos/vos/commands/__init__.py | 4 +- vos/vos/commands/interrupt_exception.py | 8 +- vos/vos/commands/tests/data/help_vcat.txt | 23 + vos/vos/commands/tests/data/help_vchmod.txt | 39 + vos/vos/commands/tests/data/help_vcp.txt | 47 ++ vos/vos/commands/tests/data/help_vln.txt | 34 + vos/vos/commands/tests/data/help_vlock.txt | 24 + vos/vos/commands/tests/data/help_vls.txt | 28 + vos/vos/commands/tests/data/help_vmkdir.txt | 23 + vos/vos/commands/tests/data/help_vmv.txt | 23 + vos/vos/commands/tests/data/help_vrm.txt | 22 + vos/vos/commands/tests/data/help_vrmdir.txt | 24 + vos/vos/commands/tests/data/help_vsync.txt | 54 ++ vos/vos/commands/tests/data/help_vtag.txt | 37 + vos/vos/commands/tests/data/vcat.txt | 4 + vos/vos/commands/tests/data/vchmod.txt | 4 + vos/vos/commands/tests/data/vcp.txt | 5 + vos/vos/commands/tests/data/vln.txt | 4 + vos/vos/commands/tests/data/vlock.txt | 4 + vos/vos/commands/tests/data/vls.txt | 4 + vos/vos/commands/tests/data/vmkdir.txt | 4 + vos/vos/commands/tests/data/vmv.txt | 4 + vos/vos/commands/tests/data/vrm.txt | 4 + vos/vos/commands/tests/data/vrmdir.txt | 4 + vos/vos/commands/tests/data/vsync.txt | 7 + vos/vos/commands/tests/data/vtag.txt | 4 + vos/vos/commands/tests/test_cli.py | 64 ++ vos/vos/commands/tests/test_vsync.py | 35 - vos/vos/commands/vcat.py | 17 +- vos/vos/commands/vchmod.py | 55 +- vos/vos/commands/vcp.py | 187 +++-- vos/vos/commands/vln.py | 24 +- vos/vos/commands/vlock.py | 20 +- vos/vos/commands/vls.py | 54 +- vos/vos/commands/vmkdir.py | 19 +- vos/vos/commands/vmv.py | 10 +- vos/vos/commands/vrm.py | 12 +- vos/vos/commands/vrmdir.py | 16 +- vos/vos/commands/vsync.py | 190 +++-- vos/vos/commands/vtag.py | 36 +- vos/vos/commonparser.py | 48 +- vos/vos/logExceptions.py | 6 +- vos/vos/md5_cache.py | 67 +- vos/vos/node_cache.py | 44 +- vos/vos/tests/setup_package.py | 3 - vos/vos/tests/test_commonparser.py | 3 - vos/vos/tests/test_md5_cache.py | 38 +- vos/vos/tests/test_node_cache.py | 7 +- vos/vos/tests/test_vofile.py | 70 +- vos/vos/tests/test_vos.py | 237 +++--- vos/vos/vos.py | 754 +++++++++++++------- 69 files changed, 2320 insertions(+), 1227 deletions(-) delete mode 100644 vofs/vofs/tests/__init__.py delete mode 100644 vofs/vofs/tests/setup_package.py create mode 100644 vos/vos/commands/tests/data/help_vcat.txt create mode 100644 vos/vos/commands/tests/data/help_vchmod.txt create mode 100644 vos/vos/commands/tests/data/help_vcp.txt create mode 100644 vos/vos/commands/tests/data/help_vln.txt create mode 100644 vos/vos/commands/tests/data/help_vlock.txt create mode 100644 vos/vos/commands/tests/data/help_vls.txt create mode 100644 vos/vos/commands/tests/data/help_vmkdir.txt create mode 100644 vos/vos/commands/tests/data/help_vmv.txt create mode 100644 vos/vos/commands/tests/data/help_vrm.txt create mode 100644 vos/vos/commands/tests/data/help_vrmdir.txt create mode 100644 vos/vos/commands/tests/data/help_vsync.txt create mode 100644 vos/vos/commands/tests/data/help_vtag.txt create mode 100644 vos/vos/commands/tests/data/vcat.txt create mode 100644 vos/vos/commands/tests/data/vchmod.txt create mode 100644 vos/vos/commands/tests/data/vcp.txt create mode 100644 vos/vos/commands/tests/data/vln.txt create mode 100644 vos/vos/commands/tests/data/vlock.txt create mode 100644 vos/vos/commands/tests/data/vls.txt create mode 100644 vos/vos/commands/tests/data/vmkdir.txt create mode 100644 vos/vos/commands/tests/data/vmv.txt create mode 100644 vos/vos/commands/tests/data/vrm.txt create mode 100644 vos/vos/commands/tests/data/vrmdir.txt create mode 100644 vos/vos/commands/tests/data/vsync.txt create mode 100644 vos/vos/commands/tests/data/vtag.txt create mode 100644 vos/vos/commands/tests/test_cli.py delete mode 100644 vos/vos/commands/tests/test_vsync.py delete mode 100644 vos/vos/tests/setup_package.py diff --git a/.travis.yml b/.travis.yml index 919ba54cb..825d2babb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,14 @@ install: - pip install coveralls script: - - for i in $(ls -d */); do cd $i; pytest --cov $i || exit 1; cd ..; done + - for i in $(ls -d */); do + cd $i; + pytest --cov $i || exit 1; + if [[ $TRAVIS_PYTHON_VERSION == '3.5' ]]; then + flake8 -v $i || break -1; + fi; + cd ..; + done after_success: # If coveralls.io is set up for this package, uncomment the line diff --git a/vofs/dev_requirements.txt b/vofs/dev_requirements.txt index 9a64ec631..244cf1af7 100644 --- a/vofs/dev_requirements.txt +++ b/vofs/dev_requirements.txt @@ -2,6 +2,7 @@ -e . pytest>=3.0.5 pytest-cov>=2.5.1 +flake8>=3.4.1 mock==2.0.0 future==0.16.0 -unittest2==1.1.0 \ No newline at end of file +unittest2==1.1.0 diff --git a/vofs/setup.py b/vofs/setup.py index 1304cd44d..f3a271b97 100755 --- a/vofs/setup.py +++ b/vofs/setup.py @@ -42,7 +42,7 @@ def readme(): # generate the version file with open(os.path.join(PACKAGENAME, 'version.py'), 'w') as f: - f.write('version = \'{}\''.format(VERSION)) + f.write('version = \'{}\'\n'.format(VERSION)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) diff --git a/vofs/vofs/CacheMetaData.py b/vofs/vofs/CacheMetaData.py index 55b89c639..d96e4f80f 100644 --- a/vofs/vofs/CacheMetaData.py +++ b/vofs/vofs/CacheMetaData.py @@ -8,8 +8,8 @@ logger = logging.getLogger('cache') -class CacheMetaData(object): +class CacheMetaData(object): def __init__(self, metaDataFile, blocks, md5sum, size): """ Creates an instance of CacheMetaData for the given file. If the same @@ -31,7 +31,7 @@ def __init__(self, metaDataFile, blocks, md5sum, size): f = open(self.metaDataFile, 'rb') persisted = pickle.load(f) if self.md5sum is None or persisted.md5sum == self.md5sum: - #persisted bitmap still valid. Used that instead + # persisted bitmap still valid. Used that instead self.bitmap = persisted.bitmap self.size = persisted.size self.md5sum = persisted.md5sum @@ -42,16 +42,19 @@ def __init__(self, metaDataFile, blocks, md5sum, size): def __str__(self): """To create a print representation that is informative.""" - return "CacheMetaData: metaDataFile=%r bitmap=%r md5sum=%r size=%r" % (str(self.metaDataFile), - self.bitmap, - self.md5sum, - self.size) + return "CacheMetaData: metaDataFile=%r bitmap=%r md5sum=%r size=%r" % \ + (str(self.metaDataFile), + self.bitmap, + self.md5sum, + self.size) def __repr__(self): - return "CacheMetaData(metaDataFile=%r, blocks=%r, md5sum=%r, size=%r)" % (str(self.metaDataFile), - self.blocks, - self.md5sum, - self.size) + return ("CacheMetaData(metaDataFile=%r, blocks=%r, " + "md5sum=%r, size=%r)") % (str(self.metaDataFile), + self.blocks, + self.md5sum, + self.size) + def setReadBlocks(self, start, end): """ To mark several blocks as read (start and end inclusive). """ startBlock = start @@ -62,7 +65,7 @@ def setReadBlocks(self, start, end): endBlock = self.bitmap.length() + end if startBlock > endBlock: raise ValueError('''Incorrect interval, max is %d > %d''' % - (startBlock, endBlock)) + (startBlock, endBlock)) for i in range(startBlock, endBlock + 1): self.setReadBlock(i) @@ -103,7 +106,7 @@ def getRange(self, start, end): startBlock = i break if i == endBlock: - #all the blocks are cached already + # all the blocks are cached already return (None, None) for i in reversed(range(startBlock, endBlock + 1)): diff --git a/vofs/vofs/CadcCache.py b/vofs/vofs/CadcCache.py index 67b499963..0809589a9 100644 --- a/vofs/vofs/CadcCache.py +++ b/vofs/vofs/CadcCache.py @@ -12,12 +12,10 @@ from six.moves.reprlib import repr import traceback import errno -from errno import EACCES, EIO, ENOENT, EISDIR, ENOTDIR, ENOTEMPTY, EPERM, \ - EEXIST, ENODATA, ECONNREFUSED, EAGAIN, ENOTCONN +from errno import ENOENT, ENOTEMPTY, EPERM, EEXIST, EAGAIN import ctypes import ctypes.util import logging -import datetime from .SharedLock import SharedLock as SharedLock from .CacheMetaData import CacheMetaData as CacheMetaData @@ -69,7 +67,7 @@ def __enter__(self): def __exit__(self, a1, a2, a3): """To support the with construct. """ - #logger.debug("{}".format(kwargs)) + # logger.debug("{}".format(kwargs)) self.release() return @@ -88,8 +86,8 @@ def release(self): def wait(self): """Wait for the condition:""" - if (not hasattr(self.threadSpecificData, 'endTime') or - self.threadSpecificData.endTime is None): + if(not hasattr(self.threadSpecificData, 'endTime') or + self.threadSpecificData.endTime is None): self.myCondition.wait() else: time_left = self.threadSpecificData.endTime - time.time() @@ -107,19 +105,22 @@ class Cache(object): """ Manages the cache for the vofs. - The cache is the location where vofs will store the reads from VOSpace. Once a file is written into the - cache subsequent reads from vofs will deliver the cache content rather than reconnecting to the server. + The cache is the location where vofs will store the reads from VOSpace. + Once a file is written into the cache subsequent reads from vofs will + deliver the cache content rather than reconnecting to the server. - The cache maintains status of files stored in the cache area so that vofs can compare that information against - node info coming from the service. + The cache maintains status of files stored in the cache area so that + vofs can compare that information against node info coming from the + service. - The vofs calls will want a response within vofs.cacheTimeOut seconds so cache should raise a CacheTimeout before - the filesystem times out. + The vofs calls will want a response within vofs.cacheTimeOut seconds so + cache should raise a CacheTimeout before the filesystem times out. """ IO_BLOCK_SIZE = 2 ** 14 - def __init__(self, cacheDir, maxCacheSize, read_only=False, timeout=60, maxFlushThreads=10): + def __init__(self, cacheDir, maxCacheSize, read_only=False, timeout=60, + maxFlushThreads=10): """Initialize the Cache Object Parameters: @@ -127,7 +128,8 @@ def __init__(self, cacheDir, maxCacheSize, read_only=False, timeout=60, maxFlush @param cacheDir: The directory for the cache. @param maxCacheSize: The maximum cache size in megabytes @param read_only: Is the cached data read-only - @param timeout: number of seconds to wait before timing-out a cache read. + @param timeout: number of seconds to wait before timing-out a cache + read. @param maxFlushThreads: Maximum number of nodes to flush simultaneously """ @@ -142,7 +144,6 @@ def __init__(self, cacheDir, maxCacheSize, read_only=False, timeout=60, maxFlush self.read_only = read_only self.fileHandleDict = {} - # When cache locks and file locks need to be held at the same time, # always acquire the cache lock first. self.cacheLock = threading.RLock() @@ -157,7 +158,8 @@ def __init__(self, cacheDir, maxCacheSize, read_only=False, timeout=60, maxFlush # ensure that the cache areas exist and have the desired permissions. utils.mkdir_p(self.dataDir, stat.S_IRWXU) utils.mkdir_p(self.metaDataDir, stat.S_IRWXU) - # logger.debug("Initialized data and meta data Cache areas: {0} {1}".format(self.dataDir, self.metaDataDir)) + # logger.debug("Initialized data and meta data Cache areas: {0} {1}". + # format(self.dataDir, self.metaDataDir)) def __enter__(self): """ @@ -174,7 +176,8 @@ def __exit__(self, type, value, traceback): pass def __str__(self): - return "DataCache: {0}, MetaDataCache: {1}, CacheSize: {2}".format(self.dataDir, self.metaDataDir, self.determineCacheSize()) + return "DataCache: {0}, MetaDataCache: {1}, CacheSize: {2}".format( + self.dataDir, self.metaDataDir, self.determineCacheSize()) @logExceptions() def open(self, path, isNew, mustExist, ioObject, trustMetaData): @@ -192,15 +195,18 @@ def open(self, path, isNew, mustExist, ioObject, trustMetaData): ioObject - the object that provides access to the backing store """ - # logger.debug("Getting filehandle for {0} {1} {2} {3} {4}".format(path, isNew, mustExist, ioObject, trustMetaData)) + # logger.debug("Getting filehandle for {0} {1} {2} {3} {4}". + # format(path, isNew, mustExist, ioObject, trustMetaData)) # logger.debug(str(self)) fileHandle = self.getFileHandle(path, isNew, ioObject) # logger.debug("Got filehandle: {0}".format(fileHandle)) with fileHandle.fileCondition: # logger.debug( - # "Opening file {0}: isnew {1}: id {2}: Fully Cached {3}: Must Exist {4}: Trust MetaData {5}:".format( - # path, isNew, id(fileHandle), fileHandle.fullyCached, mustExist, trustMetaData)) + # "Opening file {0}: isnew {1}: id {2}: Fully Cached {3}: + # Must Exist {4}: Trust MetaData {5}:".format( + # path, isNew, id(fileHandle), fileHandle.fullyCached, + # mustExist, trustMetaData)) # If this is a new file, initialize the cache state, otherwise # leave it alone. if not fileHandle.fullyCached: @@ -208,10 +214,13 @@ def open(self, path, isNew, mustExist, ioObject, trustMetaData): fileHandle.fileModified = True fileHandle.setHeader(0, ZERO_LENGTH_MD5) elif os.path.exists(fileHandle.cacheMetaDataFile): - fileHandle.metaData = CacheMetaData(fileHandle.cacheMetaDataFile, None, None, None) - if fileHandle.metaData.getNumReadBlocks() == len(fileHandle.metaData.bitmap): + fileHandle.metaData = CacheMetaData( + fileHandle.cacheMetaDataFile, None, None, None) + if fileHandle.metaData.getNumReadBlocks() == len( + fileHandle.metaData.bitmap): fileHandle.fullyCached = True - fileHandle.fileSize = os.path.getsize(fileHandle.cacheDataFile) + fileHandle.fileSize = os.path.getsize( + fileHandle.cacheDataFile) else: fileHandle.fullyCached = False fileHandle.fileSize = fileHandle.metaData.size @@ -226,33 +235,41 @@ def open(self, path, isNew, mustExist, ioObject, trustMetaData): fileHandle.fullyCached = False if (not fileHandle.fullyCached and - (fileHandle.metaData is None or fileHandle.metaData.getNumReadBlocks() == 0)): + (fileHandle.metaData is None or + fileHandle.metaData.getNumReadBlocks() == 0)): # If the cache file should be empty, empty it. with fileHandle.ioObject.cacheFileDescriptorLock: - os.ftruncate(fileHandle.ioObject.cacheFileDescriptor, 0) + os.ftruncate(fileHandle.ioObject.cacheFileDescriptor, + 0) os.fsync(fileHandle.ioObject.cacheFileDescriptor) fileHandle.fullyCached = True # For an existing file, start a data transfer to get the size and # md5sum unless the information is available and is trusted. - # logger.debug("RefCount: {0}, gotHeader: {1}, fileModified: {2}, trustMetaData: {3}".format( - # fileHandle.refCount, fileHandle.gotHeader, fileHandle.fileModified, trustMetaData + # logger.debug("RefCount: {0}, gotHeader: {1}, fileModified: {2}, + # trustMetaData: {3}".format( + # fileHandle.refCount, fileHandle.gotHeader, + # fileHandle.fileModified, trustMetaData # )) - if ((fileHandle.refCount == 1 or not fileHandle.gotHeader) and not fileHandle.fileModified and + if ((fileHandle.refCount == 1 or not fileHandle.gotHeader) and + not fileHandle.fileModified and (fileHandle.metaData is None or not trustMetaData)): # logger.debug("Doing a readData.") fileHandle.readData(0, 0, None) - while (not fileHandle.gotHeader and - fileHandle.readException is None): + while(not fileHandle.gotHeader and + fileHandle.readException is None): fileHandle.fileCondition.wait() if fileHandle.readException is not None: # If the file doesn't exist and is not required to exist, then # an ENOENT error is ok and not propegated. All other errors # are propegated. - if not (isinstance(fileHandle.readException[1], EnvironmentError) and - fileHandle.readException[1].errno == errno.ENOENT and not mustExist): + if not (isinstance(fileHandle.readException[1], + EnvironmentError) and + fileHandle.readException[1].errno == errno.ENOENT and + not mustExist): raise fileHandle.readException[0]('{} - {}'.format( - fileHandle.readException[1], fileHandle.readException[2])) + fileHandle.readException[1], + fileHandle.readException[2])) # The file didn't exist on the backing store but its ok fileHandle.fullyCached = True fileHandle.gotHeader = True @@ -260,7 +277,7 @@ def open(self, path, isNew, mustExist, ioObject, trustMetaData): try: self.checkCacheSpace() - except: + except Exception: pass return fileHandle @@ -269,13 +286,14 @@ def getFileHandle(self, path, createFile, ioObject): """Find an existing file handle, or create one if necessary. @param path: location of the file, relative to cache root. @param createFile: should the file be created if it doesn't exist - @param ioObject: the ioObject is used to read/write this file from the backing (VOSpace) - @rtype : FileHandle A file_like object that enables reading and writing to the cache object. + @param ioObject: the ioObject is used to read/write this file from + the backing (VOSpace) + @rtype : FileHandle A file_like object that enables reading and + writing to the cache object. """ - # logger.debug("Getting fileHandle for path:{0} createFile:{1} ioObject:{2}".format(path, - # createFile, - # ioObject)) + # logger.debug("Getting fileHandle for path:{0} createFile:{1} + # ioObject:{2}".format(path, createFile, ioObject)) if createFile and self.read_only: raise OSError(EPERM, 'Create denied, cache marked readonly.') @@ -311,7 +329,7 @@ def getFileHandle(self, path, createFile, ioObject): newFileHandle.refCount += 1 else: newFileHandle.refCount += 1 - # logger.debug("RefCount: {0}".format(newFileHandle.refCount)) + # logger.debug("RefCount: {0}".format(newFileHandle.refCount)) return newFileHandle @logExceptions() @@ -323,18 +341,23 @@ def checkCacheSpace(self): # multiple threads do this is bad. It should also be done on a # schedule to allow for files which grow. (oldest_file, cacheSize) = self.determineCacheSize() - while (cacheSize / 1024 / 1024 > self.maxCacheSize and oldest_file is not None): + while(cacheSize / 1024 / 1024 > self.maxCacheSize and + oldest_file is not None): with self.cacheLock: if oldest_file[len(self.dataDir):] not in self.fileHandleDict: - # logger.debug("Removing file %s from the local cache" % oldest_file) + # logger.debug("Removing file %s from the local cache" % + # oldest_file) try: os.unlink(oldest_file) - os.unlink(self.metaDataDir + oldest_file[len(self.dataDir):]) + os.unlink( + self.metaDataDir + oldest_file[len(self.dataDir):]) except OSError: pass self.removeEmptyDirs(os.path.dirname(oldest_file)) - self.removeEmptyDirs(os.path.dirname(self.metaDataDir + oldest_file[len(self.dataDir):])) - # TODO - Tricky - have to get a path to the meta data given the path to the data. + self.removeEmptyDirs(os.path.dirname( + self.metaDataDir + oldest_file[len(self.dataDir):])) + # TODO - Tricky - have to get a path to the meta data given the + # path to the data. (oldest_file, cacheSize) = self.determineCacheSize() def removeEmptyDirs(self, dirName): @@ -357,7 +380,8 @@ def removeEmptyDirs(self, dirName): def determineCacheSize(self): """Determine how much disk space is being used by the local cache""" - # TODO This needs to be cleaned up. There has to be a more efficient way to clean up the cache. + # TODO This needs to be cleaned up. There has to be a more efficient + # way to clean up the cache. start_path = self.dataDir total_size = 0 @@ -369,10 +393,11 @@ def determineCacheSize(self): for f in filenames: fp = os.path.join(dirpath, f) with self.cacheLock: - inFileHandleDict = fp[len(self.dataDir):] not in self.fileHandleDict + inFileHandleDict = fp[len( + self.dataDir):] not in self.fileHandleDict try: osStat = os.stat(fp) - except: + except Exception: continue if inFileHandleDict and oldest_time > osStat.st_atime: oldest_time = osStat.st_atime @@ -420,7 +445,8 @@ def renameFile(self, oldPath, newPath): if not os.path.isabs(newPath): raise ValueError("Path '%s' is not an absolute path." % newPath) if os.path.isdir(newPath): - raise ValueError("Cannot rename '%s' file to '%s' directory." % (oldPath, newPath)) + raise ValueError("Cannot rename '%s' file to '%s' directory." % ( + oldPath, newPath)) if oldPath == newPath: return newDataPath = self.dataDir + newPath @@ -474,7 +500,7 @@ def atomicRename(*renames): os.rename(pair[0], pair[1]) os.utime(pair[1], None) renamedList.append(pair) - except: + except Exception: for pair in renamedList: os.rename(pair[1], pair[0]) raise @@ -527,8 +553,9 @@ def renameDir(self, oldPath, newPath): # Change the data file name and meta data file name in # the file handle. start = len(oldDataPath) - fh.cacheDataFile = os.path.abspath(self.dataDir + - newPath + fh.cacheDataFile[start:]) + fh.cacheDataFile = \ + os.path.abspath(self.dataDir + newPath + + fh.cacheDataFile[start:]) start = len(oldMetaDataPath) fh.cacheMetaDataFile = os.path.abspath( self.metaDataDir + newPath + @@ -554,8 +581,9 @@ def getAttr(self, path): with fileHandle.fileLock: try: f = os.stat(fileHandle.cacheDataFile) - except Exception as e: - # error in accessing the cached version of the file. Remove from cache + except Exception: + # error in accessing the cached version of the file. + # Remove from cache self.unlinkFile(fileHandle.cacheDataFile) return None if fileHandle.fileModified: @@ -575,8 +603,8 @@ def pathExists(path): try: os.stat(path) except Exception as e: - if isinstance(e, OSError) and (e.errno == errno.EEXIST or - e.errno == errno.ENOENT): + if isinstance(e, OSError) and \ + (e.errno == errno.EEXIST or e.errno == errno.ENOENT): return False else: raise @@ -680,17 +708,19 @@ def writeToCache(self, buffer, offset): aborted. """ - if (self.currentWriteOffset is not None and - self.currentWriteOffset != offset): + if(self.currentWriteOffset is not None and + self.currentWriteOffset != offset): # Only allow seeks to block boundaries if (offset % self.cache.IO_BLOCK_SIZE != 0 or (self.currentWriteOffset % self.cache.IO_BLOCK_SIZE != 0 and self.currentWriteOffset != self.cacheFile.fileSize)): - raise CacheError("Only seeks to block boundaries are " - "permitted when writing to cache: %d %d %d %d" % (offset, - self.currentWriteOffset, - self.cache.IO_BLOCK_SIZE, - self.cacheFile.fileSize)) + raise CacheError( + "Only seeks to block boundaries are " + "permitted when writing to cache: %d %d %d %d" % ( + offset, + self.currentWriteOffset, + self.cache.IO_BLOCK_SIZE, + self.cacheFile.fileSize)) self.currentWriteOffset = offset if offset + len(buffer) > self.cacheFile.fileSize: @@ -712,20 +742,25 @@ def writeToCache(self, buffer, offset): # Set the mask bits corresponding to any completely read blocks. lastCompleteByte = offset + len(buffer) if lastCompleteByte != self.cacheFile.fileSize: - lastCompleteByte = lastCompleteByte - (lastCompleteByte % - self.cache.IO_BLOCK_SIZE) + lastCompleteByte = \ + lastCompleteByte - \ + (lastCompleteByte % self.cache.IO_BLOCK_SIZE) firstBlock, numBlocks = self.blockInfo(offset, lastCompleteByte - offset) if numBlocks > 0: - self.cacheFile.metaData.setReadBlocks(firstBlock, - firstBlock + numBlocks - 1) + self.cacheFile.metaData.setReadBlocks( + firstBlock, firstBlock + numBlocks - 1) self.cacheFile.fileCondition.notify_all() self.currentWriteOffset = offset + len(buffer) - # Check to see if the current read has been aborted or the cache file removed while we weren't looking - # and if so, throw an exception - if self.cacheFile is None or (self.cacheFile.readThread.aborted and self.cacheFile.readThread.mandatoryEnd <= lastCompleteByte <= self.cacheFile.fileSize): + # Check to see if the current read has been aborted or the cache + # file removed while we weren't looking and if so, + # throw an exception + if self.cacheFile is None or\ + (self.cacheFile.readThread.aborted and + self.cacheFile.readThread.mandatoryEnd <= lastCompleteByte <= + self.cacheFile.fileSize): # logger.debug("reading to cache aborted for %s" % # self.cacheFile.path) raise CacheAborted("Read to cache aborted.") @@ -757,10 +792,12 @@ class FileHandle(object): def __init__(self, path, cache, ioObject): self.path = path self.cache = cache - # logger.debug("creating a new File Handle for {0} using cache: {1}".format(path, cache)) + # logger.debug("creating a new File Handle for {0} using cache: {1}". + # format(path, cache)) if not os.path.isabs(path): raise ValueError("Path '%s' is not an absolute path." % path) - # TODO this part of the code assumed the VOSpace path serpartor and the local FS are the same. FIXME + # TODO this part of the code assumed the VOSpace path serpartor and + # the local FS are the same. FIXME self.cacheDataFile = os.path.abspath(self.cache.dataDir + path) self.cacheMetaDataFile = os.path.abspath(self.cache.metaDataDir + path) self.metaData = None @@ -814,7 +851,8 @@ def fullyCached(self, fullyCached): import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) - logger.debug("{} set to fullyCached to: {}".format(calframe[1][3], fullyCached)) + logger.debug( + "{} set to fullyCached to: {}".format(calframe[1][3], fullyCached)) self._fullyCached = fullyCached def __enter__(self): @@ -828,8 +866,9 @@ def setHeader(self, size, md5): # logger.debug("size: %s md5: %s" % (size, md5)) import inspect curframe = inspect.currentframe() - calframe = inspect.getouterframes(curframe, 2) - logger.debug("set header called with size: {} and md5: {}".format(size, md5)) + inspect.getouterframes(curframe, 2) + logger.debug( + "set header called with size: {} and md5: {}".format(size, md5)) if self.gotHeader: return @@ -850,9 +889,12 @@ def setHeader(self, size, md5): md5, size) self.fullyCached = False - # logger.debug("metaData: {0} fullCached: {1}".format(self.metaData, self.fullyCached)) - # mark the object as fully cached if there are 0 blocks to read and a read thread hasn't started. - if not self.fullyCached and self.metaData.getNumReadBlocks() == 0 and self.readThread is None: + # logger.debug("metaData: {0} fullCached: {1}".format(self.metaData, + # self.fullyCached)) + # mark the object as fully cached if there are 0 blocks to read and a + # read thread hasn't started. + if not self.fullyCached and self.metaData.getNumReadBlocks() == 0 and\ + self.readThread is None: # If the cache file should be empty, empty it. with self.ioObject.cacheFileDescriptorLock: os.ftruncate(self.ioObject.cacheFileDescriptor, 0) @@ -876,7 +918,8 @@ def flush(self): if self.ioObject.exception is not None: raise self.ioObject.exception - # logger.debug("Flushing node %s: id %d: refCount %d: modified %s: obsolete: %s" % + # logger.debug("Flushing node %s: id %d: refCount %d: modified %s: + # obsolete: %s" % # (self.path, id(self), # self.refCount, # self.fileModified, @@ -885,8 +928,8 @@ def flush(self): # logger.debug("using the condition lock acquires the fileLock") with self.fileCondition: if self.refCount > 1: - raise OSError(errno.EBUSY, "File refcount indicates files handle busy.") - # logger.debug("Got the lock. Flushing: {0}".format(self.flushQueued)) + raise OSError(errno.EBUSY, + "File refcount indicates files handle busy.") try: # Tell any running write thread to abort. @@ -894,13 +937,13 @@ def flush(self): self.writeAborted = True # logger.debug("Waiting for queued flush to complete.") self.fileCondition.wait() - except Exception as e: + except Exception: pass # logger.debug("{0}".format(e)) try: self.writeAborted = False - except Exception as e: + except Exception: pass # logger.debug("{0}".format(e)) @@ -910,7 +953,8 @@ def flush(self): # If flushing is not already in progress, submit to the thread # queue. - if self.flushQueued is None and self.fileModified and not self.obsolete: + if self.flushQueued is None and self.fileModified and\ + not self.obsolete: self.refCount += 1 # Acquire the writer lock exclusively. This will prevent @@ -935,7 +979,7 @@ def flush(self): # % self.cache.flushNodeQueue.qsize()) while (self.flushQueued is not None or - self.readThread is not None): + self.readThread is not None): # Wait for the flush to complete. # logger.debug("flushQueued: %s, readThread: %s" % # (self.flushQueued, self.readThread)) @@ -944,8 +988,9 @@ def flush(self): # Look for write failures. if self.flushException is not None: - raise self.flushException[0]('{} - {}'.format(self.flushException[1], - self.flushException[2])) + raise self.flushException[0]( + '{} - {}'.format(self.flushException[1], + self.flushException[2])) return 0 @@ -961,8 +1006,10 @@ def release(self): def deref(self): # logger.debug("Getting locks so we can do a deref and close.") - with self.cache.cacheLock, self.fileLock, self.ioObject.cacheFileDescriptorLock: - # logger.debug("Lock acquired now doing the ref-reduction and close.") + with self.cache.cacheLock, self.fileLock,\ + self.ioObject.cacheFileDescriptorLock: + # logger.debug("Lock acquired now doing the ref-reduction + # and close.") self.refCount -= 1 if self.refCount == 0: # logger.debug("Closing the cache object now.") @@ -1017,7 +1064,8 @@ def flushNode(self): # Update the meta data md5 blocks, numBlocks = self.ioObject.blockInfo(0, size) - self.metaData = CacheMetaData(self.cacheMetaDataFile, numBlocks, md5, size) + self.metaData = CacheMetaData(self.cacheMetaDataFile, numBlocks, + md5, size) if numBlocks > 0: self.metaData.setReadBlocks(0, numBlocks - 1) @@ -1025,7 +1073,7 @@ def flushNode(self): self.metaData.persist() self.fileModified = False - except Exception as e: + except Exception: # logger.debug("Flush node failed") self.flushException = sys.exc_info() # logger.debug(str(self.flushException)) @@ -1033,11 +1081,12 @@ def flushNode(self): self.flushQueued = None try: self.writerLock.release() - except: + except Exception: pass self.deref() _flush_thread_count = _flush_thread_count - 1 - # logger.debug("finished flushing node %s, working thread count is %i " \ + # logger.debug("finished flushing node %s, working thread count + # is %i " \ # % (self.path, _flush_thread_count)) # Wake up any threads waiting for the flush to finish with self.fileCondition: @@ -1045,7 +1094,7 @@ def flushNode(self): try: self.cache.checkCacheSpace() - except: + except Exception: pass return @@ -1056,9 +1105,6 @@ def write(self, data, size, offset): than the timeout. """ - # logger.debug("writting %d bytes at %d to %d " % (size, offset, - # self.ioObject.cacheFileDescriptor)) - if self.ioObject.exception is not None: raise self.ioObject.exception @@ -1098,7 +1144,6 @@ def write(self, data, size, offset): return wroteBytes @logExceptions() - def read(self, size, offset, cbuffer=None): """Read data from the file. This method will raise a CacheRetry error if the response takes longer @@ -1126,19 +1171,8 @@ def read(self, size, offset, cbuffer=None): if retsize < 0: raise CacheError("Failed to read from cache file") - # if this read didn't work try again, some bytes will come if there is a readThread active. - # while retsize == 0 and self.readThread is not None: - # logger.debug("Sleeping while we wait for data to arrive due to {}.".format(self.readThread)) - # time.sleep(5) - # os.lseek(r, offset, os.SEEK_SET) - # retsize = libc.read(r, cbuffer, size) - # - # # and one more try as there could be a race on the final write into cache. - # if retsize == 0: - # os.lseek(r, offset, os.SEEK_SET) - # retsize = libc.read(r, cbuffer, size) - - logger.debug("got {} bytes from {} after all that.".format(retsize, os.lseek(r, 0, os.SEEK_CUR))) + logger.debug("got {} bytes from {} after all that.". + format(retsize, os.lseek(r, 0, os.SEEK_CUR))) return retsize @@ -1150,10 +1184,13 @@ def makeCached(self, offset, size): than the timeout. """ firstBlock, numBlock = self.ioObject.blockInfo(offset, size) - logger.debug("Looking for blocks in range {} to {}".format(firstBlock, firstBlock+numBlock)) + logger.debug("Looking for blocks in range {} to {}". + format(firstBlock, firstBlock + numBlock)) # If the whole file is cached, return if self.fullyCached or numBlock == 0: - logger.debug("fully cached: {}, numBlocks: {}".format(self.fullyCached, numBlock)) + logger.debug( + "fully cached: {}, numBlocks: {}".format(self.fullyCached, + numBlock)) return lastBlock = firstBlock + numBlock - 1 @@ -1168,7 +1205,8 @@ def makeCached(self, offset, size): # data, modify the mandatory read range of the read thread. # Acquiring self.fileCondition acquires self.fileLock - logger.debug("Waiting for lock to see if we should abort current read thread.") + logger.debug( + "Waiting for lock to see if we should abort current read thread.") with self.fileCondition: logger.debug("Checking if a new read thread is the best option.") if self.readThread is not None: @@ -1187,8 +1225,8 @@ def makeCached(self, offset, size): self.fileCondition.wait() else: while (self.metaData.getRange(firstBlock, lastBlock) != - (None, None) and - self.readThread is not None): + (None, None) and + self.readThread is not None): self.fileCondition.wait() if (self.metaData.getRange(firstBlock, lastBlock) == @@ -1220,7 +1258,7 @@ def makeCached(self, offset, size): # Wait for the data be be available. while (self.metaData.getRange(firstBlock, lastBlock) != - (None, None) and self.readThread is not None): + (None, None) and self.readThread is not None): self.fileCondition.wait() def fsync(self): @@ -1316,8 +1354,8 @@ def isNewReadBest(self, start, size): self.mandatoryEnd = self.optionEnd return True readRef = max(self.mandatoryEnd, self.currentByte) - if (start <= readRef) or ((start - readRef) - <= CacheReadThread.CONTINUE_MAX_SIZE): + if (start <= readRef) or ((start - readRef) <= + CacheReadThread.CONTINUE_MAX_SIZE): self.mandatoryEnd = max(self.mandatoryEnd, start + size) return False return True @@ -1329,14 +1367,16 @@ def execute(self): self.fileHandle.ioObject.readFromBacking(self.optionSize, self.startByte) with self.fileHandle.fileCondition: - logger.debug("setFullyCached? %s %s %s %s" % (self.aborted, - self.startByte, self.optionSize, - self.fileHandle.fileSize)) + logger.debug("setFullyCached? %s %s %s %s" % + (self.aborted, + self.startByte, + self.optionSize, + self.fileHandle.fileSize)) if self.aborted: return elif (self.startByte == 0 and - (self.optionSize is None or - self.optionSize == self.fileHandle.fileSize)): + (self.optionSize is None or + self.optionSize == self.fileHandle.fileSize)): self.fileHandle.fullyCached = True # logger.debug("setFullyCached") elif self.fileHandle.fileSize == 0: @@ -1349,14 +1389,15 @@ def execute(self): firstBlock, firstBlock + numBlocks - 1) if requiredRange == (None, None): self.fileHandle.fullyCached = True - # TODO - The file is fully cached, verify that the file - # matches the vospace content. Is that overly strict - the - # original VOFS did this, but it was subject to a much - # smaller read window. Also it is not invalid for the file - # to be replaced in vospace, and for this client to - # continue to serve the existing file. + # TODO - The file is fully cached, verify that the + # file matches the vospace content. Is that overly + # strict - the original VOFS did this, but it was + # subject to a much smaller read window. Also it is + # not invalid for the file to be replaced in + # vospace, and for this client to continue to + # serve the existing file. except Exception as e: - logger.error("Exception in thread started at:\n%s" % \ + logger.error("Exception in thread started at:\n%s" % ''.join(traceback.format_list(self.traceback))) logger.error(str(e)) logger.error(traceback.format_exc()) @@ -1392,8 +1433,8 @@ def __init__(self, maxFlushThreads=10): t.daemon = True t.start() - # logger.debug("started a FlushNodeQueue with %i workers" \ - # % self.maxFlushThreads) + # logger.debug("started a FlushNodeQueue with %i workers" \ + # % self.maxFlushThreads) def join(self): # logger.debug("FlushNodeQueue waiting until all work is done") diff --git a/vofs/vofs/SharedLock.py b/vofs/vofs/SharedLock.py index 4d7886266..6c2125a1b 100644 --- a/vofs/vofs/SharedLock.py +++ b/vofs/vofs/SharedLock.py @@ -11,6 +11,7 @@ def __init__(self, value): def __str__(self): return repr(self.value) + class RecursionError(Exception): def __init__(self, value): self.value = value @@ -18,6 +19,7 @@ def __init__(self, value): def __str__(self): return repr(self.value) + class StealError(Exception): def __init__(self, value): self.value = value @@ -91,7 +93,7 @@ def acquire(self, timeout=None, shared=True): waitTime = time.time() - waitStart if len(self.lockersList) > 0 and waitTime > timeout: raise TimeoutError( - "Timeout waiting for a shared lock") + "Timeout waiting for a shared lock") self.exclusiveLock = threading.current_thread() def steal(self): @@ -102,7 +104,6 @@ def steal(self): else: raise StealError("It is only possible to steal an exclusive lock") - def release(self): """Release a previously acquired lock. """ diff --git a/vofs/vofs/__init__.py b/vofs/vofs/__init__.py index 4d9965a20..c841a5f99 100644 --- a/vofs/vofs/__init__.py +++ b/vofs/vofs/__init__.py @@ -4,4 +4,4 @@ This application mounts a VOSpace service to the local file system using fuse. """ -from vofs import * +from vofs import * # noqa diff --git a/vofs/vofs/mountvofs.py b/vofs/vofs/mountvofs.py index b78f614ec..b39f09bbc 100755 --- a/vofs/vofs/mountvofs.py +++ b/vofs/vofs/mountvofs.py @@ -12,11 +12,11 @@ from .vofs import VOFS from .vofs import MyFuse from vos.commonparser import CommonParser, set_logging_level_from_args + DAEMON_TIMEOUT = 60 def mountvofs(): - parser = CommonParser(description='mount vospace as a filesystem.') # mountvofs specific options @@ -24,37 +24,49 @@ def mountvofs(): parser.add_option("--mountpoint", help="the mountpoint on the local filesystem", default="/tmp/vospace") - parser.add_option("-f", "--foreground", action="store_true", - help="Mount the filesystem as a foreground opperation and " + - "produce copious amounts of debuging information") + parser.add_option( + "-f", "--foreground", action="store_true", + help="Mount the filesystem as a foreground opperation and " + + "produce copious amounts of debuging information") parser.add_option("--log", action="store", - help="File to store debug log to", default="/tmp/vos.err") - parser.add_option("--cache_limit", action="store", type=int, - help="upper limit on local diskspace to use for file caching (in MBytes)", - default=50 * 2 ** (10 + 10 + 10)) + help="File to store debug log to", + default="/tmp/vos.err") + parser.add_option( + "--cache_limit", action="store", type=int, + help=("upper limit on local diskspace to " + "use for file caching (in MBytes)"), + default=50 * 2 ** (10 + 10 + 10)) parser.add_option("--cache_dir", action="store", - help="local directory to use for file caching", default=None) + help="local directory to use for file caching", + default=None) parser.add_option("--readonly", action="store_true", help="mount vofs readonly", default=False) - parser.add_option("--cache_nodes", action="store_true", default=False, - help="cache dataNode properties, containerNodes are not cached") + parser.add_option( + "--cache_nodes", action="store_true", default=False, + help="cache dataNode properties, containerNodes are not cached") parser.add_option("--allow_other", action="store_true", default=False, help="Allow all users access to this mountpoint") parser.add_option("--max_flush_threads", action="store", type=int, help="upper limit on number of flush (upload) threads", default=10) - parser.add_option("--secure_get", action="store_true", default=False, - help="Ensure HTTPS instead of HTTP is used to retrieve data (slower)") - parser.add_option("--nothreads", help="Only run in a single thread, causes some blocking.", action="store_true") + parser.add_option( + "--secure_get", action="store_true", default=False, + help="Ensure HTTPS instead of HTTP is used to retrieve data (slower)") + parser.add_option( + "--nothreads", + help="Only run in a single thread, causes some blocking.", + action="store_true") opt = parser.parse_args() set_logging_level_from_args(opt) - log_format = ("%(asctime)s %(thread)d vos-"+str(version)+" %(module)s.%(funcName)s.%(lineno)d %(message)s") + log_format = ("%(asctime)s %(thread)d vos-" + str(version) + + " %(module)s.%(funcName)s.%(lineno)d %(message)s") - username = getpass.getuser() # not to be used for access control + username = getpass.getuser() # not to be used for access control lf = logging.Formatter(fmt=log_format) - fh = logging.FileHandler(filename=os.path.abspath('/tmp/vos.{}.exceptions'.format(username))) + fh = logging.FileHandler( + filename=os.path.abspath('/tmp/vos.{}.exceptions'.format(username))) fh.formatter = lf # send the 'logException' statements to a seperate log file. @@ -77,7 +89,8 @@ def mountvofs(): logger.debug("Checking connection to VOSpace ") if not os.access(opt.certfile, os.F_OK): - # setting this to 'blank' instead of None since 'None' implies use the default. + # setting this to 'blank' instead of None since 'None' implies use + # the default. certfile = "" else: certfile = os.path.abspath(opt.certfile) @@ -101,33 +114,35 @@ def mountvofs(): if not os.access(mount, os.F_OK): os.makedirs(mount) try: - if platform == "darwin": - fuse = MyFuse(VOFS(root, opt.cache_dir, opt, conn=conn, - cache_limit=opt.cache_limit, cache_nodes=opt.cache_nodes, - cache_max_flush_threads=opt.max_flush_threads, - secure_get=opt.secure_get), - mount, - fsname=root, - volname=root, - nothreads=opt.nothreads, - defer_permissions=True, - daemon_timeout=DAEMON_TIMEOUT, - readonly=readonly, - user_allow_other=opt.allow_other, - noapplexattr=True, - noappledouble=True, - debug=opt.foreground, - foreground=opt.foreground) - else: - fuse = MyFuse(VOFS(root, opt.cache_dir, opt, conn=conn, - cache_limit=opt.cache_limit, cache_nodes=opt.cache_nodes, - cache_max_flush_threads=opt.max_flush_threads, - secure_get=opt.secure_get), - mount, - fsname=root, - nothreads=opt.nothreads, - readonly=readonly, - user_allow_other=opt.allow_other, - foreground=opt.foreground) + if platform == "darwin": + MyFuse(VOFS(root, opt.cache_dir, opt, conn=conn, + cache_limit=opt.cache_limit, + cache_nodes=opt.cache_nodes, + cache_max_flush_threads=opt.max_flush_threads, + secure_get=opt.secure_get), + mount, + fsname=root, + volname=root, + nothreads=opt.nothreads, + defer_permissions=True, + daemon_timeout=DAEMON_TIMEOUT, + readonly=readonly, + user_allow_other=opt.allow_other, + noapplexattr=True, + noappledouble=True, + debug=opt.foreground, + foreground=opt.foreground) + else: + MyFuse(VOFS(root, opt.cache_dir, opt, conn=conn, + cache_limit=opt.cache_limit, + cache_nodes=opt.cache_nodes, + cache_max_flush_threads=opt.max_flush_threads, + secure_get=opt.secure_get), + mount, + fsname=root, + nothreads=opt.nothreads, + readonly=readonly, + user_allow_other=opt.allow_other, + foreground=opt.foreground) except Exception as ex: - logger.error(str(ex)) + logger.error(str(ex)) diff --git a/vofs/vofs/tests/__init__.py b/vofs/vofs/tests/__init__.py deleted file mode 100644 index 2ff7e2a6b..000000000 --- a/vofs/vofs/tests/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Licensed under a 3-clause BSD style license - see LICENSE.rst -""" -This packages contains affiliated package tests. -""" diff --git a/vofs/vofs/tests/setup_package.py b/vofs/vofs/tests/setup_package.py deleted file mode 100644 index f2fd9ed48..000000000 --- a/vofs/vofs/tests/setup_package.py +++ /dev/null @@ -1,3 +0,0 @@ -def get_package_data(): - return { - _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc']} diff --git a/vofs/vofs/tests/test_cache_metadata.py b/vofs/vofs/tests/test_cache_metadata.py index 0d601ec2a..e2b9d21c5 100644 --- a/vofs/vofs/tests/test_cache_metadata.py +++ b/vofs/vofs/tests/test_cache_metadata.py @@ -17,7 +17,8 @@ def tearDown(self): self.cleanupCacheMetadataDir() def cleanupCacheMetadataDir(self): - for root, dirs, files in os.walk(TestCacheMetaData.TEST_CACHE_PATH, topdown=False): + for root, dirs, files in os.walk(TestCacheMetaData.TEST_CACHE_PATH, + topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: @@ -125,10 +126,11 @@ def testRange(self): except ValueError: expected = 1 - file1_repr = str("CacheMetaData(metaDataFile='{}file1', " \ - "blocks=10, md5sum=9029, size=1025)".format( - TestCacheMetaData.TEST_CACHE_PATH)) - aRepr.maxother = len(file1_repr) # avoid repr limitting the size of the return representation + file1_repr = str("CacheMetaData(metaDataFile='{}file1', " + "blocks=10, md5sum=9029, size=1025)". + format(TestCacheMetaData.TEST_CACHE_PATH)) + # avoid repr limiting the size of the return representation + aRepr.maxother = len(file1_repr) self.assertEquals(str(file1_repr), repr(file1)) self.assertEquals(1, expected) @@ -139,15 +141,17 @@ def testRange(self): self.assertTrue(not os.path.exists( TestCacheMetaData.TEST_CACHE_PATH + "file1")) - file2 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", - 10, 0x2345, 1024) + file2 = CacheMetaData( + TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", + 10, 0x2345, 1024) self.assertEquals(TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", file2.metaDataFile) self.assertEquals(10, file2.bitmap.length()) self.assertEquals(0x2345, file2.md5sum) file2.persist() - file3 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", - 10, 0x2345, 1025) + file3 = CacheMetaData( + TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", + 10, 0x2345, 1025) self.assertEquals(TestCacheMetaData.TEST_CACHE_PATH + "/test/file1", file3.metaDataFile) self.assertEquals(10, file3.bitmap.length()) @@ -158,9 +162,9 @@ def run(): suite = unittest.TestLoader().loadTestsFromTestCase(TestCacheMetaData) return unittest.TextTestRunner(verbosity=2).run(suite) + if __name__ == '__main__': run() - if __name__ == '__main__': unittest.main() diff --git a/vofs/vofs/tests/test_cadc_cache.py b/vofs/vofs/tests/test_cadc_cache.py index fbabf7f37..90419dd2d 100644 --- a/vofs/vofs/tests/test_cadc_cache.py +++ b/vofs/vofs/tests/test_cadc_cache.py @@ -1,7 +1,8 @@ from __future__ import (absolute_import, division, print_function, unicode_literals) -from future import standard_library -standard_library.install_aliases() +# from future import standard_library + +# standard_library.install_aliases() from builtins import str from builtins import object import copy @@ -18,10 +19,11 @@ import os import stat import six -#from six.moves import queue +# from six.moves import queue from mock import Mock, MagicMock, patch from vofs import CadcCache -from vofs.SharedLock import SharedLock, TimeoutError, RecursionError, StealError +from vofs.SharedLock import SharedLock, TimeoutError, RecursionError, \ + StealError # To run individual tests, set the value of skipTests to True, and comment # out the @unittest.skipIf line at the top of the test to be run. @@ -61,7 +63,8 @@ def readFromBacking(self, offset=None, size=None): class IOProxyFor100K(CadcCache.IOProxy): """ Subclass of the CadcCache.IOProxy class. Used for both testing the - IOProxy class and as an IOProxy object when testing the CadcCache.Cache class. + IOProxy class and as an IOProxy object when testing the CadcCache.Cache + class. """ def delNode(self, force=False): @@ -102,11 +105,12 @@ def tearDown(self): """ Test the IOProxy class. """ + @unittest.skipIf(skipTests, "Individual tests") def test_basic(self): """Test the IOProxy abstract methods """ - with CadcCache.Cache(self.testdir, 100, True) as testCache: + with CadcCache.Cache(self.testdir, 100, True): testIOProxy = CadcCache.IOProxy() with self.assertRaises(NotImplementedError): testIOProxy.get_md5() @@ -130,7 +134,8 @@ def test_writeToCache(self): testIOProxy.cacheFile = Object() testIOProxy.cacheFile.readThread = Object() testIOProxy.cacheFile.readThread.aborted = False - testIOProxy.cacheFile.readThread.mandatoryEnd = testCache.IO_BLOCK_SIZE + testIOProxy.cacheFile.readThread.mandatoryEnd = \ + testCache.IO_BLOCK_SIZE testIOProxy.cacheFile.metaData = Object() testIOProxy.cacheFile.metaData.setReadBlocks = Mock() testIOProxy.cacheFile.fileLock = threading.RLock() @@ -140,21 +145,23 @@ def test_writeToCache(self): testIOProxy.cacheFileDescriptor = 1 # Write to beginning of the file - with patch('os.lseek') as mock_lseek, patch('os.write') as mock_write, \ + with patch('os.lseek') as mock_lseek, patch( + 'os.write') as mock_write, \ patch('os.fsync') as mock_fsync: mock_lseek.return_value = 0 mock_write.return_value = 3 mock_fsync.return_value = 0 testIOProxy.cacheFile.fileSize = 3 self.assertEqual(3, testIOProxy.writeToCache("abc", 0)) - testIOProxy.cacheFile.metaData.setReadBlocks.assert_called_once_with( - 0, 0) + testIOProxy.cacheFile.metaData.setReadBlocks.\ + assert_called_once_with(0, 0) mock_lseek.assert_called_once_with(1, 0, os.SEEK_SET) mock_write.assert_called_once_with(1, "abc") mock_fsync.assert_called_once_with(1) # Write to after the beginning of the output - with patch('os.lseek') as mock_lseek, patch('os.write') as mock_write, \ + with patch('os.lseek') as mock_lseek, patch( + 'os.write') as mock_write, \ patch('os.fsync') as mock_fsync: mock_lseek.return_value = 0 mock_write.return_value = 3 @@ -173,7 +180,7 @@ def test_writeToCache(self): with patch('os.lseek'), patch('os.write') as mocks: mock_lseek = mocks[0] mock_lseek.return_value = 0 - with self.assertRaises(CadcCache.CacheError) as cm: + with self.assertRaises(CadcCache.CacheError): testIOProxy.writeToCache("abc", 3) # Test an attempt to write past the end of the file. @@ -182,16 +189,18 @@ def test_writeToCache(self): mock_lseek.return_value = 0 testIOProxy.cacheFile.fileSize = 3 testIOProxy.currentWriteOffset = 0 - with self.assertRaises(CadcCache.CacheError) as cm: + with self.assertRaises(CadcCache.CacheError): testIOProxy.writeToCache("abcdef", 0) # Write a partial block of data. - with patch('os.lseek') as mock_lseek, patch('os.write') as mock_write, \ + with patch('os.lseek') as mock_lseek, patch( + 'os.write') as mock_write, \ patch('os.fsync') as mock_fsync: mock_lseek.return_value = 0 mock_fsync.return_value = 1 testIOProxy.currentWriteOffset = 0 - testIOProxy.cacheFile.fileSize = testCache.IO_BLOCK_SIZE * 2 + 10 + testIOProxy.cacheFile.fileSize = \ + testCache.IO_BLOCK_SIZE * 2 + 10 testIOProxy.currentWriteOffset = 0 mock_write.return_value = 6 testIOProxy.cacheFile.metaData.setReadBlocks.call_count = 0 @@ -204,27 +213,30 @@ def test_writeToCache(self): # Write the second block, and the first 10 bytes of the third block # (to the nd of file). This should result in the second and third # blocks being marked complete. - with patch('os.lseek') as mock_lseek, patch('os.write') as mock_write, \ + with patch('os.lseek') as mock_lseek, patch( + 'os.write') as mock_write, \ patch('os.fsync') as mock_fsync: testIOProxy.currentWriteOffset = 0 - testIOProxy.cacheFile.fileSize = ( - testCache.IO_BLOCK_SIZE * 2) + 10 + testIOProxy.cacheFile.fileSize = \ + (testCache.IO_BLOCK_SIZE * 2) + 10 mock_lseek.return_value = 0 mock_write.return_value = testCache.IO_BLOCK_SIZE + 10 mock_fsync.return_value = 1 buffer = bytearray(testCache.IO_BLOCK_SIZE + 10) testIOProxy.cacheFile.metaData.setReadBlocks.call_count = 0 self.assertEqual(testCache.IO_BLOCK_SIZE + 10, - testIOProxy.writeToCache(buffer, testCache.IO_BLOCK_SIZE)) - testIOProxy.cacheFile.metaData.setReadBlocks.assert_called_once_with( - 1, 2) + testIOProxy.writeToCache( + buffer, testCache.IO_BLOCK_SIZE)) + testIOProxy.cacheFile.metaData.setReadBlocks.\ + assert_called_once_with(1, 2) # do a write which gets aborted testIOProxy.cacheFile.readThread.aborted = True - with patch('os.lseek') as mock_lseek, patch('os.write') as mock_write, \ + with patch('os.lseek') as mock_lseek, patch( + 'os.write') as mock_write, \ patch('os.fsync') as mock_fsync: - testIOProxy.cacheFile.fileSize = ( - testCache.IO_BLOCK_SIZE * 2) + 10 + testIOProxy.cacheFile.fileSize = \ + (testCache.IO_BLOCK_SIZE * 2) + 10 mock_lseek.return_value = 0 mock_write.return_value = testCache.IO_BLOCK_SIZE + 10 mock_fsync.return_value = 1 @@ -232,8 +244,8 @@ def test_writeToCache(self): testIOProxy.cacheFile.metaData.setReadBlocks.call_count = 0 with self.assertRaises(CadcCache.CacheAborted): testIOProxy.writeToCache(buffer, testCache.IO_BLOCK_SIZE) - testIOProxy.cacheFile.metaData.setReadBlocks.assert_called_once_with( - 1, 2) + testIOProxy.cacheFile.metaData.setReadBlocks.\ + assert_called_once_with(1, 2) @unittest.skipIf(skipTests, "Individual tests") def test_blockInfo(self): @@ -246,28 +258,29 @@ def test_blockInfo(self): self.assertEqual((0, 1), testIOProxy.blockInfo(0, 1)) self.assertEqual((0, 1), testIOProxy.blockInfo(1, 1)) self.assertEqual((0, 1), - testIOProxy.blockInfo(testCache.IO_BLOCK_SIZE - 1, 1)) + testIOProxy.blockInfo(testCache.IO_BLOCK_SIZE - 1, + 1)) self.assertEqual((1, 1), testIOProxy.blockInfo(testCache.IO_BLOCK_SIZE, 1)) self.assertEqual((0, 1), testIOProxy.blockInfo(0, testCache.IO_BLOCK_SIZE)) - self.assertEqual((0, 2), - testIOProxy.blockInfo(0, testCache.IO_BLOCK_SIZE + 1)) - self.assertEqual((0, 2), - testIOProxy.blockInfo(100, testCache.IO_BLOCK_SIZE)) - self.assertEqual((2, 3), - testIOProxy.blockInfo(testCache.IO_BLOCK_SIZE * 2, - testCache.IO_BLOCK_SIZE * 3)) + self.assertEqual((0, 2), testIOProxy.blockInfo( + 0, testCache.IO_BLOCK_SIZE + 1)) + self.assertEqual((0, 2), testIOProxy.blockInfo( + 100, testCache.IO_BLOCK_SIZE)) + self.assertEqual((2, 3), testIOProxy.blockInfo( + testCache.IO_BLOCK_SIZE * 2, testCache.IO_BLOCK_SIZE * 3)) self.assertEqual((2, 4), - testIOProxy.blockInfo(100 + testCache.IO_BLOCK_SIZE * 2, - testCache.IO_BLOCK_SIZE * 3 + 100)) + testIOProxy.blockInfo( + 100 + testCache.IO_BLOCK_SIZE * 2, + testCache.IO_BLOCK_SIZE * 3 + 100)) self.assertEqual((None, None), - testIOProxy.blockInfo(100 + testCache.IO_BLOCK_SIZE * 2, - None)) + testIOProxy.blockInfo( + 100 + testCache.IO_BLOCK_SIZE * 2, + None)) class TestCacheError(unittest.TestCase): - @unittest.skipIf(skipTests, "Individual tests") def test_str(self): e = CadcCache.CacheError(str("a string")) @@ -275,7 +288,6 @@ def test_str(self): class TestCacheRetry(unittest.TestCase): - @unittest.skipIf(skipTests, "Individual tests") def test_str(self): e = CadcCache.CacheRetry(str("a string")) @@ -283,7 +295,6 @@ def test_str(self): class TestCacheAborted(unittest.TestCase): - @unittest.skipIf(skipTests, "Individual tests") def test_str(self): e = CadcCache.CacheAborted(str("a string")) @@ -324,7 +335,7 @@ def test_simpleLock(self): self.assertTrue(lock.exclusiveLock is None) # Try to acquire a lock twice. Should fail. - with self.assertRaises(RecursionError) as e: + with self.assertRaises(RecursionError): lock.acquire(timeout=5) self.assertTrue(lock.exclusiveLock is None) lock.release() @@ -357,7 +368,6 @@ def test_simpleLock2(self, mock_current_thread): @unittest.skipIf(skipTests, "Individual tests") @patch('threading.current_thread') def test_exclusiveLock(self, mock_current_thread): - # Try to acquire an exclusive lock. mock_current_thread.return_value = 'thread1' lock = SharedLock() @@ -432,7 +442,7 @@ def test_steal(self, mock_current_thread): # Get a shared lock and then attempt to steal it. Should fail. lock = SharedLock() lock.acquire() - with self.assertRaises(StealError) as e: + with self.assertRaises(StealError): lock.steal() lock.release() @@ -472,7 +482,6 @@ def test_with(self): class TestCacheCondtion(unittest.TestCase): - @unittest.skipIf(skipTests, "Individual tests") def test_all(self): lock = threading.Lock() @@ -552,7 +561,8 @@ def test_04_unlink(self): testCache.unlinkFile("/dir1/dir2/nosuchfile") # Unlink a file which is open - with testCache.open("/dir1/dir2/file", True, False, testIOProxy, False): + with testCache.open("/dir1/dir2/file", True, False, testIOProxy, + False): with patch('os.remove') as mockedRemove: testCache.unlinkFile("/dir1/dir2/file") self.assertEqual(mockedRemove.call_count, 2) @@ -589,7 +599,8 @@ def test_04_renameFile(self): self.assertTrue(os.path.exists(newMetaDataFile)) # Rename an existing active file. - with testCache.open("/dir1/dir2/file2", True, False, testIOProxy, False) as fh: + with testCache.open("/dir1/dir2/file2", True, False, testIOProxy, + False) as fh: testCache.renameFile("/dir1/dir2/file2", "/dir1/dir3/file3") self.assertEqual("/dir1/dir3/file3", fh.path) self.assertEqual(fh.cacheDataFile, os.path.join( @@ -681,13 +692,12 @@ def test_04_renameFile(self): os.remove(dataFile) os.remove(metaDataFile) fh.release() - except: + except Exception: pass @unittest.skipIf(skipTests, "Individual tests") def test_04_renameFile2(self): - testIOProxy = IOProxyForTest() with CadcCache.Cache(self.testdir, 100, True) as testCache: # Rename an existing but inactive file. renaming the meta data file @@ -717,7 +727,6 @@ def test_04_renameFile2(self): with self.assertRaises(ValueError): testCache.renameFile("/dir1/dir2", "/") - @unittest.skipIf(skipTests, "Individual tests") def test_04_renameDir(self): """Rename a whole directory. @@ -733,9 +742,12 @@ def test_04_renameDir(self): with self.assertRaises(ValueError): testCache.renameDir("/adir", "anotherDir") - with testCache.open("/dir1/dir2/file1", True, False, testIOProxy1, False) as fh1: - with testCache.open("/dir1/dir2/file2", True, False, testIOProxy2, False) as fh2: - with testCache.open("/dir2/dir2/file1", True, False, testIOProxy3, False) as fh3: + with testCache.open("/dir1/dir2/file1", True, False, testIOProxy1, + False) as fh1: + with testCache.open("/dir1/dir2/file2", True, False, + testIOProxy2, False) as fh2: + with testCache.open("/dir2/dir2/file1", True, False, + testIOProxy3, False) as fh3: with self.assertRaises(ValueError): testCache.renameDir( "/dir1/dir2/file1", "/dir1/dir3") @@ -755,7 +767,8 @@ def test_04_renameDir(self): self.assertTrue(os.path.exists(fh1.cacheDataFile)) self.assertTrue(os.path.exists(fh2.cacheDataFile)) self.assertTrue(os.path.exists(fh3.cacheDataFile)) - with testCache.open("/dir1/dir2/file2", True, False, testIOProxy2, False) as fh2: + with testCache.open("/dir1/dir2/file2", True, False, testIOProxy2, + False) as fh2: with self.assertRaises(OSError): testCache.renameDir("/dir1/dir2", "/dir1/dir3") @@ -787,7 +800,8 @@ def test_04_getAttr(self): # Try to get the attribute of an existing, open and modified file. # This should return the cache file attributes. - with testCache.open("/dir1/dir2/file", True, False, testIOProxy, False) as fh: + with testCache.open("/dir1/dir2/file", True, False, testIOProxy, + False) as fh: self.assertTrue(fh.fileModified) self.assertTrue(os.path.exists( testCache.dataDir + "/dir1/dir2/file")) @@ -803,7 +817,8 @@ def test_04_getAttr(self): # Test when a file is opened but not modified. Should return none # since vospace is the better source for information. - with testCache.open("/dir1/dir2/file", False, False, testIOProxy, False) as fh: + with testCache.open("/dir1/dir2/file", False, False, testIOProxy, + False) as fh: self.assertFalse(fh.fileModified) self.assertEqual(testCache.getAttr("/dir1/dir2/file"), None) testCache.flushNodeQueue.join() @@ -911,7 +926,8 @@ def test_00_constructor3(self): @unittest.skipIf(skipTests, "Individual tests") def test_00_constructor4(self): - """ Constructor with a file where the cache data directory should be.""" + """ Constructor with a file where the cache data directory + should be.""" open(self.testdir + "/data", 'a').close() with self.assertRaises(OSError) as cm: @@ -921,7 +937,8 @@ def test_00_constructor4(self): @unittest.skipIf(skipTests, "Individual tests") def test_00_constructor5(self): - """ Constructor with a read-only directory where the cache data directory should be. + """ Constructor with a read-only directory where the cache data + directory should be. Constructor should reset the permissions on that directory.""" cache_dir = os.path.join(self.testdir, 'data') @@ -935,7 +952,7 @@ def test_00_constructor5(self): @unittest.skipIf(skipTests, "Individual tests") def test_00_constructor6(self): - """ Constructor with a file where the cache meta data directory + """ Constructor with a file where the cache meta data directory should be. """ open(self.testdir + "/metaData", 'a').close() @@ -965,7 +982,7 @@ def setUp_testdirectory(self): for dir in directories: os.mkdir("/".join([self.testdir, dir])) for f in files: - fh = open("/".join([self.testdir, dir, f]), 'a') + fh = open("/".join([self.testdir, dir, f]), 'a') fh.seek(1000) fh.write("a") fh.close() @@ -1066,7 +1083,7 @@ def test_03_release1(self): fh.fileModified = True fh.fileCondition.set_timeout = Mock( side_effect=Exception("failed")) - with self.assertRaises(Exception) as cm: + with self.assertRaises(Exception): fh.release() def makeTestFile(self, name, size): @@ -1082,7 +1099,6 @@ def makeTestFile(self, name, size): @unittest.skipIf(skipTests, "Individual tests") def test_03_release2(self): class IOProxy_writeToBacking_slow(IOProxyForTest): - def verifyMetaData(self, md5sum): """ test returns False """ return False @@ -1095,7 +1111,7 @@ def writeToBacking(self, fh, mtime): # Release a slow to write file. ioObject = IOProxy_writeToBacking_slow() _thread.start_new_thread(self.release2_sub1, - (testObject, ioObject)) + (testObject, ioObject)) time.sleep(1) ioObject2 = IOProxyForTest() @@ -1124,7 +1140,7 @@ def test_03_release3(self): self.makeTestFile(os.path.join(testObject.dataDir, "dir1/dir2/file"), self.testSize) fh.fileModified = True - info = os.stat(fh.cacheDataFile) + os.stat(fh.cacheDataFile) fh.release() ioObject.writeToBacking.assert_called_once_with() testObject.flushNodeQueue.join() @@ -1165,7 +1181,7 @@ def test_03_release5(self): self.makeTestFile(os.path.join(testObject.dataDir, "dir1/dir2/file"), self.testSize) fh.fileModified = True - info = os.stat(fh.cacheDataFile) + os.stat(fh.cacheDataFile) with self.assertRaises(Exception): fh.release() @@ -1193,14 +1209,14 @@ def test_03_release7(self): False) try: raise OSError() - except: + except Exception: errInfo = sys.exc_info() fh.flushException = errInfo with self.assertRaises(OSError): fh.release() @unittest.skipIf(skipTests, "Individual tests") - def test_03_release3(self): + def test_03_release8(self): """Successful write to backing after unlink""" with CadcCache.Cache(self.testdir, 100) as testObject: @@ -1213,7 +1229,6 @@ def test_03_release3(self): self.makeTestFile(os.path.join(testObject.dataDir, "dir1/dir2/file"), self.testSize) fh.fileModified = True - info = os.stat(fh.cacheDataFile) testObject.unlinkFile("/dir1/dir2/file") fh.obsolete = False fh.release() @@ -1245,8 +1260,9 @@ def test_03_flushNode2(self): ioObject.writeToBacking = MagicMock(side_effect=OSError( errno.ENOENT, "No such file *EXPECTED*")) fh.flushNode() - # turns out that OSError, based on teh errno argument, returns the specific subclass, in this - # case the FileNotFoundError. However, FileNotFoundError is not defined in Python2.7 + # turns out that OSError, based on teh errno argument, returns + # the specific subclass, in this case the FileNotFoundError. + # However, FileNotFoundError is not defined in Python2.7 self.assertTrue(isinstance(fh.flushException[1], OSError)) @unittest.skipIf(skipTests, "Individual tests") @@ -1259,8 +1275,9 @@ def test_03_flushNode3(self): fh = testObject.open("/dir1/dir2/file", False, False, ioObject, False) fh.writerLock.acquire(shared=False) - testObject.checkCacheSpace = Mock(side_effect=OSError(errno.ENOENT, - "checkCacheSpaceError *EXPECTED*")) + testObject.checkCacheSpace = \ + Mock(side_effect=OSError(errno.ENOENT, + "checkCacheSpaceError *EXPECTED*")) fh.flushNode() @unittest.skipIf(skipTests, "Individual tests") @@ -1274,10 +1291,10 @@ def test_04_read1(self): False, ioObject, False) fh.fullyCached = False buf = ctypes.create_string_buffer(4) - retsize = fh.read(size=100, offset=0, cbuffer=buf) + fh.read(size=100, offset=0, cbuffer=buf) # Read beyond the end of the file. with self.assertRaises(ValueError): - data = fh.read(100, 1024 * 1024, buf) + fh.read(100, 1024 * 1024, buf) fh.release() @unittest.skipIf(skipTests, "Individual tests") @@ -1289,12 +1306,12 @@ def test_04_read2(self): fh = testCache.open("/dir1/dir2/file", False, False, ioObject, False) buf = ctypes.create_string_buffer(100) - retsize = fh.read(size=100, offset=0, cbuffer=buf) + fh.read(size=100, offset=0, cbuffer=buf) with patch('vofs.CadcCache.libc.read') as mockedRead: mockedRead.return_value = -1 with self.assertRaises(CadcCache.CacheError): - retsize = fh.read(0, 1024 * 1024, buf) + fh.read(0, 1024 * 1024, buf) fh.release() @unittest.skipIf(skipTests, "Individual tests") @@ -1308,7 +1325,7 @@ def test_04_read3(self): ioObject.exception = OSError() buf = ctypes.create_string_buffer(100) with self.assertRaises(OSError): - retsize = fh.read(100, 0, buf) + fh.read(100, 0, buf) ioObject.exception = None fh.release() @@ -1320,7 +1337,8 @@ def test_04_write1(self): with CadcCache.Cache(self.testdir, 100) as testCache: testCache.flushNodeQueue = CadcCache.FlushNodeQueue() ioObject = IOProxyFor100K() - with testCache.open("/dir1/dir2/file", True, False, ioObject, False) as fh: + with testCache.open("/dir1/dir2/file", True, False, ioObject, + False) as fh: data = b"abcd" buf = ctypes.create_string_buffer(len(data)) fh.write(data, len(data), 30000) @@ -1413,7 +1431,8 @@ def threadExecuteMock(thread): with CadcCache.Cache(self.testdir, 100, timeout=2) as testCache: ioObject = IOProxyFor100K() # Fully cached, makeCached does mostly nothing. - with testCache.open("/dir1/dir2/file", False, False, ioObject, False) as fh: + with testCache.open("/dir1/dir2/file", False, False, ioObject, + False) as fh: fh.fullyCached = True oldMetaData = copy.deepcopy(fh.metaData) fh.metaData.getRange = Mock() @@ -1442,7 +1461,8 @@ def threadExecuteMock(thread): # The required range is cached. The fn exists after calling # getRange. - with testCache.open("/dir1/dir2/file", False, False, ioObject, False) as fh: + with testCache.open("/dir1/dir2/file", False, False, ioObject, + False) as fh: fh.readThread = CadcCache.CacheReadThread(0, 0, 0, fh) oldMetaData = fh.metaData fh.metaData = copy.deepcopy(oldMetaData) @@ -1457,7 +1477,8 @@ def threadExecuteMock(thread): # Check that the block range correctly maps to bytes when # isNewReadBest is called. The call exits when data is available. - with testCache.open("/dir1/dir2/file", False, False, ioObject, False) as fh: + with testCache.open("/dir1/dir2/file", False, False, ioObject, + False) as fh: oldMetaData = copy.deepcopy(fh.metaData) fh.metaData.persist = Mock() fh.readThread = CadcCache.CacheReadThread(0, 0, 0, fh) @@ -1517,7 +1538,6 @@ def threadExecuteMock(thread): args=[fh.fileCondition, fh]) t1.start() with patch('vofs.CadcCache.CacheReadThread') as mockedClass: - realClass = mockedClass.return_value mockedClass.return_value = CacheReadThreadMock(fh) fh.metaData.setReadBlocks(6, 6) fh.metaData.md5sum = 12345 @@ -1674,7 +1694,8 @@ def test_04_truncate(self): with CadcCache.Cache(self.testdir, 100, timeout=2) as testCache: # Expand a new file testCache.flushNodeQueue = CadcCache.FlushNodeQueue() - with testCache.open("/dir1/dir2/file", True, False, testIOProxy, False) as testFile: + with testCache.open("/dir1/dir2/file", True, False, testIOProxy, + False) as testFile: testFile.truncate(10) self.assertTrue(testFile.fileModified) self.assertTrue(testFile.fullyCached) @@ -1687,7 +1708,8 @@ def test_04_truncate(self): testIOProxy.writeToBacking.reset_mock() testIOProxy.readFromBacking.reset_mock() self.assertEqual(os.path.getsize(testFile.cacheDataFile), 10) - with testCache.open("/dir1/dir2/file", False, False, testIOProxy, True) as testFile: + with testCache.open("/dir1/dir2/file", False, False, testIOProxy, + True) as testFile: testFile.truncate(10) self.assertFalse(testFile.fileModified) self.assertTrue(testFile.fullyCached) @@ -1698,7 +1720,8 @@ def test_04_truncate(self): # Expand a file from 10 bytes testIOProxy.writeToBacking.reset_mock() testIOProxy.writeToBacking = Mock(return_value='0x123456') - with testCache.open("/dir1/dir2/file", False, False, testIOProxy, False) as testFile: + with testCache.open("/dir1/dir2/file", False, False, testIOProxy, + False) as testFile: testIOProxy.readFromBacking.reset_mock() testFile.truncate(testCache.IO_BLOCK_SIZE * 2 + 20) self.assertTrue(testFile.fileModified) @@ -1714,9 +1737,11 @@ def test_04_truncate(self): # read from backing. testIOProxy.writeToBacking.reset_mock() testIOProxy.readFromBacking.reset_mock() - with testCache.open("/dir1/dir2/file", False, False, testIOProxy, False) as testFile: + with testCache.open("/dir1/dir2/file", False, False, testIOProxy, + False) as testFile: def clear_thread(): testFile.readThread = None + self.assertFalse(testFile.fileModified) testFile.readThread = Object() testFile.readThread.aborted = False @@ -1752,15 +1777,14 @@ def clear_thread(): class TestCadcCacheReadThread(unittest.TestCase): """Test the CadcCache.CacheTreadThread class """ - class MyIoObject(CadcCache.IOProxy): + class MyIoObject(CadcCache.IOProxy): def readFromBacking(self, size=None, offset=0, blockSize=CadcCache.Cache.IO_BLOCK_SIZE): self.cacheFile.setHeader(100, "1234") pass class MyFileHandle(CadcCache.FileHandle): - def __init__(self, path, cache, ioObject): CadcCache.FileHandle.__init__(self, path, cache, ioObject) self.ioObject = ioObject @@ -1776,7 +1800,7 @@ def tearDown(self): @unittest.skipIf(skipTests, "Individual tests") def test_constructor(self): - with CadcCache.Cache(self.testdir, 100, timeout=2) as testCache: + with CadcCache.Cache(self.testdir, 100, timeout=2): crt = CadcCache.CacheReadThread(1, 2, 3, 4) self.assertEqual(crt.startByte, 1) self.assertEqual(crt.mandatoryEnd, 1 + 2) @@ -1968,11 +1992,13 @@ def run(): suite4 = unittest.TestLoader().loadTestsFromTestCase(TestCacheRetry) suite5 = unittest.TestLoader().loadTestsFromTestCase(TestCacheAborted) suite6 = unittest.TestLoader().loadTestsFromTestCase(TestIOProxy) - suite7 = unittest.TestLoader().loadTestsFromTestCase(TestCadcCacheReadThread) + suite7 = unittest.TestLoader().loadTestsFromTestCase( + TestCadcCacheReadThread) suite8 = unittest.TestLoader().loadTestsFromTestCase(TestCadcCache) alltests = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6, suite7, suite8]) - return(unittest.TextTestRunner(verbosity=2).run(alltests)) + return (unittest.TextTestRunner(verbosity=2).run(alltests)) + if __name__ == "__main__": run() diff --git a/vofs/vofs/tests/test_vofs.py b/vofs/vofs/tests/test_vofs.py index 965a9095e..4db4f6f2a 100644 --- a/vofs/vofs/tests/test_vofs.py +++ b/vofs/vofs/tests/test_vofs.py @@ -7,13 +7,14 @@ import shutil import stat import ctypes -from fuse import FUSE, FuseOSError, fuse_operations -from errno import EIO, EAGAIN, EPERM, ENOENT +from fuse import FuseOSError, fuse_operations +from errno import EIO, EPERM, ENOENT import unittest2 as unittest from mock import Mock, MagicMock, patch, ANY import vos from cadcutils import exceptions -from vofs.CadcCache import Cache, CacheRetry, CacheAborted, FileHandle, IOProxy, FlushNodeQueue +from vofs.CadcCache import Cache, CacheRetry, CacheAborted, FileHandle, \ + IOProxy, FlushNodeQueue from vos.node_cache import NodeCache from vofs.vofs import HandleWrapper, MyFuse, VOFS from vofs import vofs @@ -26,7 +27,6 @@ class Object(object): class MyFileHandle(FileHandle): - def __init__(self, path, cache, ioObject): anIOProxy = IOProxy() anIOProxy.writeToBacking = Mock() @@ -41,7 +41,6 @@ def readData(self, start, mandatory, optional): class MyFileHandle2(FileHandle): - def __init__(self, path, cache, ioObject): anIOProxy = IOProxy() anIOProxy.writeToBacking = Mock() @@ -145,7 +144,8 @@ def testWrite3(self): # fileHandle.cache_file_handle.write = Mock() # fileHandle.cache_file_handle.write.side_effect = CacheRetry("fake") # with self.assertRaises(FuseOSError) as e: - # testfs.write( "/dir1/dir2/file", "abcd", 4, 2048, fileHandle.get_id()) + # testfs.write( "/dir1/dir2/file", "abcd", 4, 2048, + # fileHandle.get_id()) # self.assertEqual(e.exception.errno, EAGAIN) @unittest.skipIf(skipTests, "Individual tests") @@ -156,7 +156,7 @@ def testWrite5(self): testfs.write("/dir1/dir2/file", "abcd", 4, 2048, -1) self.assertEqual(e.exception.errno, EIO) - #@unittest.skipIf(skipTests, "Individual tests") + # @unittest.skipIf(skipTests, "Individual tests") def testRead1(self): testfs = vofs.VOFS(self.testMountPoint, self.testCacheDir, opt) @@ -184,7 +184,8 @@ def testRead1(self): # Read from an invalid file handle. with self.assertRaises(FuseOSError) as e: - testfs.read(path="/dir1/dir2/file", buf=buf, size=4, offset=2048, file_id=-1) + testfs.read(path="/dir1/dir2/file", buf=buf, size=4, offset=2048, + file_id=-1) self.assertEqual(e.exception.errno, EIO) @unittest.skipIf(skipTests, "Individual tests") @@ -206,7 +207,8 @@ def test_open(self): wraps=mockFileHandle.return_value.readData) fh = myVofs.open(file, os.O_RDWR | os.O_CREAT, None) self.assertEqual(self.testCacheDir + "/data" + file, - HandleWrapper.file_handle(fh) .cache_file_handle.cacheDataFile) + HandleWrapper.file_handle( + fh).cache_file_handle.cacheDataFile) self.assertEqual(self.testCacheDir + "/metaData" + file, HandleWrapper.file_handle(fh).cache_file_handle. cacheMetaDataFile) @@ -227,10 +229,11 @@ def test_open(self): # test file in the cache already myVofs.cache.getAttr = Mock() myVofs.cache.getAttr.return_value = Mock() - #fhMock = Mock() - #myVofs.cache.open = Mock() - #myVofs.cache.open.return_value = fhMock - with patch('vofs.vofs.MyIOProxy') as myIOProxy, patch('vofs.CadcCache.FileHandle') as mockFileHandle: + # fhMock = Mock() + # myVofs.cache.open = Mock() + # myVofs.cache.open.return_value = fhMock + with patch('vofs.vofs.MyIOProxy') as myIOProxy, patch( + 'vofs.CadcCache.FileHandle') as mockFileHandle: mockFileHandle.return_value = MyFileHandle( file, myVofs.cache, None) mockFileHandle.return_value.readData = Mock( @@ -243,7 +246,8 @@ def test_open(self): self.assertFalse(HandleWrapper.file_handle( fh).cache_file_handle.fileModified) # TODO fullyCached = True or False? - # self.assertFalse(HandleWrapper.file_handle(fh).cache_file_handle.fullyCached) + # self.assertFalse( + # HandleWrapper.file_handle(fh).cache_file_handle.fullyCached) # test a read-only file mockFileHandle.return_value.readData.reset_mock() @@ -298,12 +302,12 @@ def test_create(self): ('/dir1/dir2/file',): node, ('/dir1/dir2',): parentNode}, name="testfs.getNode")) with patch('vos.vos.Client.create', Mock(return_value=node)): - with self.assertRaises(FuseOSError) as e: + with self.assertRaises(FuseOSError): testfs.create(file, os.O_RDWR) testfs.getNode = Mock(side_effect=FuseOSError(errno=5)) with patch('vos.vos.Client.create', Mock(return_value=node)): - with self.assertRaises(FuseOSError) as e: + with self.assertRaises(FuseOSError): testfs.create(file, os.O_RDWR) node.props.get = Mock(return_value=False) @@ -327,6 +331,7 @@ def mockRelease(): raise CacheRetry("Exception") else: return + mockRelease.callCount = 0 # Raise a timeout exception @@ -336,7 +341,7 @@ def mockRelease(): basefh.path = file fh = HandleWrapper(basefh, False) myVofs = vofs.VOFS("vos:", self.testCacheDir, opt) - with self.assertRaises(CacheRetry) as e: + with self.assertRaises(CacheRetry): myVofs.release(file, fh.get_id()) # Raise an IO error. @@ -344,14 +349,14 @@ def mockRelease(): basefh.fileModified = True basefh.path = file fh = HandleWrapper(basefh, False) - with self.assertRaises(Exception) as e: + with self.assertRaises(Exception): myVofs.release(file, fh.get_id()) # Release an invalid file descriptor # TODO should this throw a FuseOSError? - with self.assertRaises(KeyError) as e: + with self.assertRaises(KeyError): myVofs.release(file, -1) - # self.assertEqual(e.exception.errno, EIO) + # self.assertEqual(e.exception.errno, EIO) @unittest.skipIf(skipTests, "Individual tests") def test_getattr(self): @@ -637,8 +642,9 @@ def test_fsync2(self): Mock(wraps=HandleWrapper.file_handle(fh).cache_file_handle. fsync) testfs.fsync(file, False, fh) - self.assertEqual(HandleWrapper.file_handle(fh).cache_file_handle.fsync. - call_count, 0) + self.assertEqual( + HandleWrapper.file_handle(fh).cache_file_handle.fsync. + call_count, 0) testfs.release(file, fh) @unittest.skipIf(skipTests, "Individual tests") @@ -667,7 +673,7 @@ def test_rename(self): testfs.client.move.reset_mock() testfs.cache.renameFile.reset_mock() testfs.client.move.side_effect = Exception("str") - testfs.get_node = Mock(side_effect = exceptions.NotFoundException()) + testfs.get_node = Mock(side_effect=exceptions.NotFoundException()) with self.assertRaises(Exception): self.assertEqual(testfs.rename(src, dest), -1) testfs.client.move.assert_called_once_with(src, dest) @@ -678,7 +684,7 @@ def test_rename(self): testfs.cache.renameFile.reset_mock() testfs.client.move.side_effect = Exception( "the node is NodeLocked so won't work") - with self.assertRaises(OSError) as e: + with self.assertRaises(OSError): self.assertEqual(testfs.rename(src, dest), -1) testfs.client.move.assert_called_once_with(src, dest) self.assertEqual(testfs.cache.renameFile.call_count, 0) @@ -693,6 +699,7 @@ def mock_read(block_size): return "1234" else: return None + file = "/dir1/dir2/file" testfs = vofs.VOFS(self.testMountPoint, self.testCacheDir, opt) node = Mock(spec=vos.Node) @@ -723,7 +730,7 @@ def mock_read(block_size): origRelease = FileHandle.release origTruncate = FileHandle.truncate with patch('vofs.CadcCache.FileHandle.release') as mockRelease, \ - patch('vofs.CadcCache.FileHandle') as mockFileHandle: + patch('vofs.CadcCache.FileHandle') as mockFileHandle: mockFileHandle.return_value = MyFileHandle( file, testfs.cache, None) mockFileHandle.return_value.readData = \ @@ -758,7 +765,7 @@ def mock_read(block_size): # Truncate with an exception returned by the CadcCache truncate testfs.cache.open.reset_mock() with patch('vofs.CadcCache.FileHandle.release') as mockRelease, \ - patch('vofs.CadcCache.FileHandle.readData') as mockReadData: + patch('vofs.CadcCache.FileHandle.readData'): mockRelease.wraps = origRelease # TODO This doesn't really work, # release is not called and so open # files are being leaked @@ -772,14 +779,15 @@ def mock_read(block_size): # Truncate an already opened file given the file handle. with patch('vofs.CadcCache.FileHandle.release') as mockRelease, \ - patch('vofs.CadcCache.FileHandle.readData') as mockReadData: + patch('vofs.CadcCache.FileHandle.readData'): mockRelease.wraps = origRelease # TODO This doesn't really work, # release is not called and so open # files are being leaked try: fh = testfs.open(file, os.O_RDWR | os.O_CREAT, None) testfs.cache.open.reset_mock() - with patch('vofs.CadcCache.FileHandle.truncate') as mockTruncate: + with patch( + 'vofs.CadcCache.FileHandle.truncate') as mockTruncate: mockTruncate.wraps = origTruncate # TODO Same issue as the # mockRelease TODO above. testfs.truncate(file, 20, fh) @@ -796,12 +804,11 @@ def mock_read(block_size): testfs2 = vofs.VOFS(self.testMountPoint, self.testCacheDir, opt) testfs2.client = testfs.client - testfs = None testfs2.cache.open = Mock(wraps=testfs2.cache.open) # Truncate a read only file handle. with patch('vofs.CadcCache.FileHandle.release') as mockRelease, \ - patch('vofs.CadcCache.FileHandle') as mockFileHandle: + patch('vofs.CadcCache.FileHandle') as mockFileHandle: mockRelease.wraps = origRelease mockFileHandle.return_value = MyFileHandle(file, testfs2.cache, None) @@ -810,7 +817,8 @@ def mock_read(block_size): try: fh = testfs2.open(file, os.O_RDONLY, None) testfs2.cache.open.reset_mock() - with patch('vofs.CadcCache.FileHandle.truncate') as mockTruncate: + with patch( + 'vofs.CadcCache.FileHandle.truncate') as mockTruncate: mockTruncate.wraps = origTruncate with self.assertRaises(FuseOSError): testfs2.truncate(file, 20, fh) @@ -822,7 +830,7 @@ def mock_read(block_size): testfs2.release(file, fh) # Truncate with an invalid file descriptor. - with self.assertRaises(KeyError) as e: + with self.assertRaises(KeyError): testfs2.truncate(file, 20, -1) def test_getNode(self): @@ -871,13 +879,13 @@ def __call__(self, *args, **keywords): name = "" else: name = self.name - raise ValueError("Mock side effect " + name + " arguments not in Controller: " - + str(args) + ":" + str(keywords) + ": " + - str(self.controller) + "***") + raise ValueError( + "Mock side effect " + name + " arguments not in Controller: " + + str(args) + ":" + str(keywords) + ": " + + str(self.controller) + "***") class TestMyIOProxy(unittest.TestCase): - @unittest.skipIf(skipTests, "Individual tests") def testWriteToBacking(self): # Submit a write request for the whole file. @@ -937,8 +945,9 @@ def mock_read(block_size): # Submit a request for the whole file testProxy.readFromBacking() - client.open.assert_called_once_with(path, mode=os.O_RDONLY, - view="data", size=None, range=None) + client.open.assert_called_once_with( + path, mode=os.O_RDONLY, view="data", size=None, + range=None) self.assertEqual(vos_VOFILE.close.call_count, 1) self.assertEqual(vos_VOFILE.read.call_count, 2) @@ -948,8 +957,8 @@ def mock_read(block_size): callCount[0] = 0 testProxy.readFromBacking(100, 200) self.assertEqual(client.open.call_count, 1) - vos_VOFILE.open.assert_called_once_with("url0", - bytes="bytes=200-299") + vos_VOFILE.open.assert_called_once_with( + "url0", bytes="bytes=200-299") self.assertEqual(vos_VOFILE.close.call_count, 1) self.assertEqual(vos_VOFILE.read.call_count, 2) @@ -962,8 +971,8 @@ def mock_read(block_size): "aborted") testProxy.readFromBacking(150, 200) self.assertEqual(client.open.call_count, 1) - vos_VOFILE.open.assert_called_once_with("url0", - bytes="bytes=200-349") + vos_VOFILE.open.assert_called_once_with( + "url0", bytes="bytes=200-349") self.assertEqual(vos_VOFILE.close.call_count, 1) self.assertEqual(vos_VOFILE.read.call_count, 1) @@ -1021,6 +1030,7 @@ def test_readFromBackingErrorHandling(self): def side_effect(*args, **kwds): return returns.pop(0) + vos_VOFILE.read = MagicMock(side_effect=side_effect) vos_VOFILE.close = Mock() client.open = Mock(return_value=vos_VOFILE) @@ -1061,46 +1071,43 @@ def test_getSize(self): testProxy.size = 27 self.assertEqual(testProxy.getSize(), 27) - - + @unittest.skipIf(skipTests, "Individual tests") @patch("fuse._libfuse") def test_MyFuse(self, mock_libfuse): - #Tests the ctor to make sure the arguments are passed - #correctly to the os + # Tests the ctor to make sure the arguments are passed + # correctly to the os conn = MagicMock() mock_libfuse.fuse_main_real.return_value = False fuseops = fuse_operations() - buf = ctypes.create_string_buffer(4) - fuse = MyFuse(VOFS(":vos", "/tmp/vos_", None, conn=conn, - cache_limit=100, - cache_max_flush_threads=3), - "/tmp/vospace", - fsname="vos:", - nothreads=5, - foreground=False) - #if it was easy to pick inside the args memory structures we would - #have checked the the real arguments passed to the fuse library - #instead of ANY - mock_libfuse.fuse_main_real.assert_called_with(5, ANY, ANY, - ctypes.sizeof(fuseops), + MyFuse(VOFS(":vos", "/tmp/vos_", None, conn=conn, + cache_limit=100, + cache_max_flush_threads=3), + "/tmp/vospace", + fsname="vos:", + nothreads=5, + foreground=False) + # if it was easy to pick inside the args memory structures we would + # have checked the the real arguments passed to the fuse library + # instead of ANY + mock_libfuse.fuse_main_real.assert_called_with(5, ANY, ANY, + ctypes.sizeof(fuseops), None) - - fuse = MyFuse(VOFS(":vos", "/tmp/vos_", None, conn=conn, - cache_limit=100, - cache_max_flush_threads=3), - "/tmp/vospace", - fsname="vos:", - nothreads=5, - readonly=True, - user_allow_other=True, - foreground=False) + + MyFuse(VOFS(":vos", "/tmp/vos_", None, conn=conn, + cache_limit=100, + cache_max_flush_threads=3), + "/tmp/vospace", + fsname="vos:", + nothreads=5, + readonly=True, + user_allow_other=True, + foreground=False) mock_libfuse.fuse_main_real.assert_called_with(6, ANY, ANY, ctypes.sizeof(fuseops), None) - @unittest.skipIf(skipTests, "Individual tests") @patch("vofs.vofs.FUSE.__init__") def test_readMyFuse(self, mock_fuse): @@ -1108,12 +1115,12 @@ def test_readMyFuse(self, mock_fuse): conn = MagicMock() buf = ctypes.create_string_buffer(4) fuse = MyFuse(VOFS("vos:/anode", "/tmp/vos_", None, conn=conn, - cache_limit=100, - cache_max_flush_threads=3), - "/tmp/vospace", - fsname="vos:", - nothreads=5, - foreground=False) + cache_limit=100, + cache_max_flush_threads=3), + "/tmp/vospace", + fsname="vos:", + nothreads=5, + foreground=False) fuse.raw_fi = True fuse.encoding = 'ascii' fuse.operations = Mock() @@ -1122,11 +1129,9 @@ def test_readMyFuse(self, mock_fuse): fip.contents = 'somevalue' retsize = fuse.read("/some/path".encode('utf-8'), buf, 10, 1, fip) fuse.operations.assert_called_once_with( - 'read', '/some/path', 10, 1, 'somevalue', buf) + 'read', '/some/path', 10, 1, 'somevalue', buf) self.assertEqual(3, retsize, "Wrong buffer size") - - @unittest.skipIf(skipTests, "Individual tests") @patch("vofs.vofs.FUSE.__init__") def test_writeMyFuse(self, mock_fuse): @@ -1134,12 +1139,12 @@ def test_writeMyFuse(self, mock_fuse): conn = MagicMock() buf = ctypes.create_string_buffer(4) fuse = MyFuse(VOFS("vos:/anode", "/tmp/vos_", None, conn=conn, - cache_limit=100, - cache_max_flush_threads=3), - "/tmp/vospace", - fsname="vos:", - nothreads=5, - foreground=False) + cache_limit=100, + cache_max_flush_threads=3), + "/tmp/vospace", + fsname="vos:", + nothreads=5, + foreground=False) fuse.raw_fi = True fuse.encoding = 'ascii' fuse.operations = Mock() @@ -1150,11 +1155,11 @@ def test_writeMyFuse(self, mock_fuse): fip.contents = mock_contents retsize = fuse.write("/some/path".encode('utf-8'), buf, 10, 1, fip) fuse.operations.assert_called_once_with( - 'write', '/some/path', buf, 10, 1, fh_mock) + 'write', '/some/path', buf, 10, 1, fh_mock) self.assertEqual(3, retsize, "Wrong buffer size") -class TestHandleWrapper(unittest.TestCase): +class TestHandleWrapper(unittest.TestCase): @unittest.skipIf(skipTests, "Individual tests") def testAll(self): # Get the hand wrapper's id @@ -1180,8 +1185,9 @@ def run(): suite1 = unittest.TestLoader().loadTestsFromTestCase(TestVOFS) suite2 = unittest.TestLoader().loadTestsFromTestCase(TestMyIOProxy) suite3 = unittest.TestLoader().loadTestsFromTestCase(TestHandleWrapper) - alltests = unittest.TestSuite([suite1, suite2, suite3]) + unittest.TestSuite([suite1, suite2, suite3]) return unittest.TextTestRunner(verbosity=2).run(suite1) + if __name__ == "__main__": run() diff --git a/vofs/vofs/utils.py b/vofs/vofs/utils.py index c87fe2766..0e0abe181 100644 --- a/vofs/vofs/utils.py +++ b/vofs/vofs/utils.py @@ -11,6 +11,8 @@ def mkdir_p(path, mode): if exc.errno == errno.EEXIST and os.path.isdir(path): os.chmod(path, mode) elif exc.errno == errno.EACCES: - raise OSError(errno.EACCES, "permission denied to create {0}".format(path)) + raise OSError(errno.EACCES, + "permission denied to create {0}".format(path)) else: - raise OSError(errno.ENOTDIR, "{0} exists and is not a directory".format(path)) + raise OSError(errno.ENOTDIR, + "{0} exists and is not a directory".format(path)) diff --git a/vofs/vofs/vofs.py b/vofs/vofs/vofs.py index 260b215f3..2faeaaf8d 100755 --- a/vofs/vofs/vofs.py +++ b/vofs/vofs/vofs.py @@ -20,9 +20,8 @@ import logging import six.moves - logger = logging.getLogger('vofs') -#logger.setLevel(logging.DEBUG) +# logger.setLevel(logging.DEBUG) if sys.version_info[1] > 6: logger.addHandler(logging.NullHandler()) @@ -41,7 +40,6 @@ def flag2mode(flags): class MyIOProxy(IOProxy): - def delNode(self, force=False): raise NotImplementedError("MyIOProxy.delNode") @@ -56,25 +54,28 @@ def __init__(self, vofs, path): self.condition = CacheCondition(None) def __str__(self): - return "Path:{0} Size:{1} MD5:{2} condition:{3}".format(self.path, - self.size, - self.md5, - self.condition) + return "Path:{0} Size:{1} MD5:{2} condition:{3}".\ + format(self.path, self.size, self.md5, self.condition) @logExceptions() def writeToBacking(self): """ Write a file in the cache to the remote file. """ - logger.debug("PUSHING %s to VOSpace @ %s" % (self.cacheFile.cacheDataFile, self.cacheFile.path)) - logger.debug("opening a new vo file for {0}".format(self.cacheFile.path)) + logger.debug("PUSHING %s to VOSpace @ %s" % + (self.cacheFile.cacheDataFile, self.cacheFile.path)) + logger.debug( + "opening a new vo file for {0}".format(self.cacheFile.path)) dest_uri = self.vofs.get_node(self.cacheFile.path).uri - foo = self.vofs.client.copy(self.cacheFile.cacheDataFile, dest_uri, send_md5=True) + foo = self.vofs.client.copy(self.cacheFile.cacheDataFile, dest_uri, + send_md5=True) return foo - # return self.vofs.client.copy(self.cacheFile.cacheDataFile, dest_uri, send_md5=True) + # return self.vofs.client.copy(self.cacheFile.cacheDataFile, dest_uri, + # send_md5=True) @logExceptions() - def readFromBacking(self, size=None, offset=0, block_size=Cache.IO_BLOCK_SIZE): + def readFromBacking(self, size=None, offset=0, + block_size=Cache.IO_BLOCK_SIZE): """ Read from VOSpace into cache """ @@ -91,16 +92,22 @@ def readFromBacking(self, size=None, offset=0, block_size=Cache.IO_BLOCK_SIZE): logger.debug("reading range: %s" % (str(byte_range))) if self.lastVOFile is None: - logger.debug("Opening a new vo file on {0}".format(self.cacheFile.path)) + logger.debug( + "Opening a new vo file on {0}".format(self.cacheFile.path)) self.lastVOFile = self.vofs.client.open(self.cacheFile.path, - mode=os.O_RDONLY, view="data", size=size, byte_range=byte_range, + mode=os.O_RDONLY, + view="data", size=size, + byte_range=byte_range, possible_partial_read=True) else: - logger.debug("Opening a existing vo file on {0}".format(self.lastVOFile.URLs[self.lastVOFile.urlIndex])) + logger.debug("Opening a existing vo file on {0}".format( + self.lastVOFile.URLs[self.lastVOFile.urlIndex])) self.lastVOFile.open( - self.lastVOFile.URLs[self.lastVOFile.urlIndex], byte_range=byte_range, possible_partial_read=True) + self.lastVOFile.URLs[self.lastVOFile.urlIndex], + byte_range=byte_range, possible_partial_read=True) try: - logger.debug("reading from {0}".format(self.lastVOFile.URLs[self.lastVOFile.urlIndex])) + logger.debug("reading from {0}".format( + self.lastVOFile.URLs[self.lastVOFile.urlIndex])) try: resp = self.lastVOFile.read(return_response=True) except OSError: @@ -112,7 +119,8 @@ def readFromBacking(self, size=None, offset=0, block_size=Cache.IO_BLOCK_SIZE): # to client self.lastVOFile = self.vofs.client.open( self.cacheFile.path, mode=os.O_RDONLY, view="data", - size=size, byte_range=byte_range, full_negotiation=True, possible_partial_read=True) + size=size, byte_range=byte_range, full_negotiation=True, + possible_partial_read=True) resp = self.lastVOFile.read(return_response=True) if not self.cacheFile.gotHeader: @@ -135,7 +143,8 @@ def readFromBacking(self, size=None, offset=0, block_size=Cache.IO_BLOCK_SIZE): self.lastVOFile.close() self.lastVOFile = None - logger.debug("Wrote: %d bytes to cache for %s" % (offset, self.cacheFile.path)) + logger.debug( + "Wrote: %d bytes to cache for %s" % (offset, self.cacheFile.path)) def get_md5(self): if self.md5 is None: @@ -215,11 +224,13 @@ class VOFS(Operations): removexattr = None def setxattr(self, path, name, value, options, position=0): - logger.warning("Extended attributes not supported: {0} {1} {2} {3} {4}".format(path, - name, - value, - options, - position)) + logger.warning( + "Extended attributes not supported: {0} {1} {2} {3} {4}".format( + path, + name, + value, + options, + position)) def __init__(self, root, cache_dir, options, conn=None, cache_limit=1024, cache_nodes=False, @@ -251,7 +262,8 @@ def __init__(self, root, cache_dir, options, conn=None, # connection. try: self.client = vos.Client(root_node=root, conn=conn, - transfer_shortcut=True, secure_get=secure_get) + transfer_shortcut=True, + secure_get=secure_get) except Exception as e: e = FuseOSError(getattr(e, 'errno', EIO)) e.filename = root @@ -287,7 +299,7 @@ def access(self, path, mode): logger.debug("Checking if -->{0}<-- is accessible".format(path)) try: self.getNode(path) - except: + except Exception: return -1 return 0 @@ -338,17 +350,20 @@ def chmod(self, path, mode): @logExceptions() def create(self, path, flags, fi=None): - """Create a node. Currently ignores the ownership mode and mapping to a filehandle (fi) is not supported. + """Create a node. Currently ignores the ownership mode and mapping to + a filehandle (fi) is not supported. :param path: the container/dataNode in VOSpace to be created :param flags: Read/Write settings (eg. 600) :param fi: integer handle to assign to created file. """ - logger.debug("Creating a node: {0} with flags {1}".format(path, str(flags))) + logger.debug( + "Creating a node: {0} with flags {1}".format(path, str(flags))) if fi is not None: - warnings.warn("setting fi on call to create is not supported", NotImplemented) + warnings.warn("setting fi on call to create is not supported", + NotImplemented) # Create is handle by the client. # This should fail if the base path doesn't exist @@ -401,12 +416,15 @@ def get_node(self, path, force=False, limit=0): @type limit: int or None @rtype : vos.Node @param path: the VOSpace node to get - @param force: force retrieval (true) or provide cached version if available (false)? - @param limit: Number of child nodes to retrieve per request, if limit is None then get max returned by service. + @param force: force retrieval (true) or provide cached version if + available (false)? + @param limit: Number of child nodes to retrieve per request, if limit + is None then get max returned by service. """ # Pull the node meta data from VOSpace. - logger.debug("requesting node {0} from VOSpace. Force: {1}".format(path, force)) + logger.debug( + "requesting node {0} from VOSpace. Force: {1}".format(path, force)) node = self.client.get_node(path, force=force, limit=limit) logger.debug("Got node {0}".format(node.name)) return node @@ -421,14 +439,17 @@ def getattr(self, path, file_id=None): # Try to get the attributes from the cache first. This will only return # a result if the files has been modified and not flushed to vospace. attr = self.cache.getAttr(path) - return attr is not None and attr or self.getNode(path, limit=0, force=False).attr + return attr is not None and attr or self.getNode(path, limit=0, + force=False).attr def init(self, path): """Called on filesystem initialization. (Path is always /) - Here is where we start the worker threads for the queue that flushes nodes. + Here is where we start the worker threads for the queue that flushes + nodes. """ - self.cache.flushNodeQueue = FlushNodeQueue(maxFlushThreads=self.cache.maxFlushThreads) + self.cache.flushNodeQueue = FlushNodeQueue( + maxFlushThreads=self.cache.maxFlushThreads) @logExceptions() def mkdir(self, path, mode): @@ -441,7 +462,7 @@ def mkdir(self, path, mode): if "read-only mode" in str(os_error): raise FuseOSError(EPERM) raise FuseOSError(getattr(os_error, 'errno', EFAULT)) - # self.chmod(path, mode) + # self.chmod(path, mode) # @logExceptions() def open(self, path, flags, *mode): @@ -489,7 +510,8 @@ def open(self, path, flags, *mode): if not read_only and node and not locked: if node.type == "vos:DataNode": - parent_node = self.get_node(os.path.dirname(path), force=False, limit=1) + parent_node = self.get_node(os.path.dirname(path), force=False, + limit=1) if parent_node.props.get('islocked', False): logger.debug("%s is locked by parent node." % path) locked = True @@ -497,17 +519,21 @@ def open(self, path, flags, *mode): try: # sometimes target_nodes aren't internal... so then not # locked - target_node = self.get_node(node.target, force=False, limit=1) + target_node = self.get_node(node.target, force=False, + limit=1) if target_node.props.get('islocked', False): logger.debug("{0} target node is locked.".format(path)) locked = True else: - target_parent_node = self.get_node(os.path.dirname(node.target), force=False, limit=1) + target_parent_node = self.get_node( + os.path.dirname(node.target), force=False, limit=1) if target_parent_node.props.get('islocked', False): - logger.debug("{0} parent node is locked.".format(path)) + logger.debug( + "{0} parent node is locked.".format(path)) locked = True except Exception as lock_exception: - logger.warn("Error while checking for lock: {0}".format(str(lock_exception))) + logger.warn("Error while checking for lock: {0}".format( + str(lock_exception))) pass if locked and not read_only: @@ -525,8 +551,10 @@ def open(self, path, flags, *mode): logger.debug("IO Proxy initialized:{0} in backing.".format(my_proxy)) - # new file in cache library or if no node information (node not in vospace). - handle = self.cache.open(path, flags & os.O_WRONLY != 0, must_exist, my_proxy, self.cache_nodes) + # new file in cache library or if no node information + # (node not in vospace). + handle = self.cache.open(path, flags & os.O_WRONLY != 0, must_exist, + my_proxy, self.cache_nodes) logger.debug("Creating file:{0} in backing.".format(path)) @@ -547,7 +575,8 @@ def read(self, path, size=0, offset=0, file_id=None, buf=None): if file_id is None: raise FuseOSError(EIO) - logger.debug("reading range: %s %d %d %d" % (path, size, offset, file_id)) + logger.debug( + "reading range: %s %d %d %d" % (path, size, offset, file_id)) while True: try: @@ -574,7 +603,8 @@ def readlink(self, path): Currently doesn't provide correct capabilty for VOSpace FS. """ - return self.get_node(path).name+"?link="+urllib.quote_plus(self.getNode(path).target) + return self.get_node(path).name + "?link=" + urllib.quote_plus( + self.getNode(path).target) @logExceptions() def readdir(self, path, file_id): @@ -583,15 +613,17 @@ def readdir(self, path, file_id): # reading from VOSpace can be slow, we'll do this in a thread with self.condition: if not self.loading_dir.get(path, False): - # TODO implement this as a multiprocess call instead of threading. + # TODO implement this as a multiprocess call instead of + # threading. self.loading_dir[path] = True - six.moves._thread.start_new_thread(self.load_dir, (path, )) + six.moves._thread.start_new_thread(self.load_dir, (path,)) while self.loading_dir.get(path, False): logger.debug("Waiting ... ") self.condition.wait() - return ['.', '..'] + [e.name for e in self.getNode(path, - force=False, - limit=None).node_list] + return ['.', '..'] + [e.name for e in + self.getNode(path, + force=False, + limit=None).node_list] @logExceptions() def load_dir(self, path): @@ -618,7 +650,9 @@ def flush(self, path, file_id): fh.cache_file_handle.flush() except CacheRetry as ce: logger.critical(str(ce)) - logger.critical("Push to VOSpace reached FUSE timeout, continuing VOSpace push in background.") + logger.critical( + "Push to VOSpace reached FUSE timeout, continuing VOSpace push" + "in background.") pass return 0 @@ -641,16 +675,16 @@ def release(self, path, file_id): @logExceptions() def rename(self, src, dest): """ - Rename a data node into a new container. This is called only when both src and dest - are part of the mount. + Rename a data node into a new container. This is called only when both + src and dest are part of the mount. """ logger.debug("Original %s -> %s" % (src, dest)) try: node = self.get_node(dest) if node.type == "vos:DataNode": - # destination is an existing data node. Must be deleted first from the server or - # otherwise remove will fail + # destination is an existing data node. Must be deleted first + # from the server or otherwise remove will fail self.unlink(dest) logger.debug('Deleted {}'.format(dest)) except exceptions.NotFoundException: @@ -691,16 +725,22 @@ def statfs(self, path): used = int(node.props.get('length', 2 ** 33)) free = n_bytes - used logger.debug("Got properties: {0}".format(node.props)) - sfs = {'f_bsize': block_size, 'f_frsize': block_size, 'f_blocks': int(n_bytes / block_size), - 'f_bfree': int(free / block_size), 'f_bavail': int(free / block_size), - 'f_files': len(node.node_list), 'f_ffree': 2 * 10, 'f_favail': 2 * 10, 'f_flags': 0, + sfs = {'f_bsize': block_size, 'f_frsize': block_size, + 'f_blocks': int(n_bytes / block_size), + 'f_bfree': int(free / block_size), + 'f_bavail': int(free / block_size), + 'f_files': len(node.node_list), 'f_ffree': 2 * 10, + 'f_favail': 2 * 10, 'f_flags': 0, 'f_namemax': 256} return sfs @logExceptions() def truncate(self, path, length, file_id=None): """Perform a file truncation to length bytes""" - logger.debug("Attempting to truncate {0}({1}) to length {2}".format(path, file_id, length)) + logger.debug( + "Attempting to truncate {0}({1}) to length {2}".format(path, + file_id, + length)) if file_id is None: # Ensure the file exists. @@ -751,27 +791,28 @@ def write(self, path, data, size, offset, file_id=None): logger.debug("%s -> %s" % (path, fh)) logger.debug("%d --> %d" % (offset, offset + size)) return fh.cache_file_handle.write(data, size, offset) - + class MyFuse(FUSE): - """ - Customization of FUSE class in order to speed up reads and writes by passing - the destination buffer from the file system directly to the read and write - operation to fill it in and return the actual size that was + """ + Customization of FUSE class in order to speed up reads and writes by + passing the destination buffer from the file system directly to the read + and write operation to fill it in and return the actual size that was filled in. """ # add the readonly options to the list of options in FUSE FUSE.OPTIONS = list(FUSE.OPTIONS) FUSE.OPTIONS.append(('readonly', '-r')) FUSE.OPTIONS = tuple(FUSE.OPTIONS) - + def read(self, path, buf, size, offset, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh - retsize = self.operations('read', path.decode(self.encoding), size, offset, fh, buf) + retsize = self.operations('read', path.decode(self.encoding), size, + offset, fh, buf) assert retsize <= size, \ 'actual amount read %d greater than expected %d' % (retsize, size) diff --git a/vos/dev_requirements.txt b/vos/dev_requirements.txt index 0510268ec..8a264dacc 100644 --- a/vos/dev_requirements.txt +++ b/vos/dev_requirements.txt @@ -1,6 +1,7 @@ -e . pytest>=3.0.5 pytest-cov>=2.5.1 +flake8>=3.4.1 mock==2.0.0 future==0.16.0 unittest2==1.1.0 diff --git a/vos/setup.py b/vos/setup.py index 1304cd44d..f3a271b97 100755 --- a/vos/setup.py +++ b/vos/setup.py @@ -42,7 +42,7 @@ def readme(): # generate the version file with open(os.path.join(PACKAGENAME, 'version.py'), 'w') as f: - f.write('version = \'{}\''.format(VERSION)) + f.write('version = \'{}\'\n'.format(VERSION)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) diff --git a/vos/vos/__init__.py b/vos/vos/__init__.py index 16153f765..b77e96c8c 100644 --- a/vos/vos/__init__.py +++ b/vos/vos/__init__.py @@ -2,14 +2,16 @@ """ A Virtual Observatory Space (VOSpace) client library. - - The vos package includes a set of library classes that are useful for interacting with a VOSpace web service: - (http://ivoa.net/documents/VOSpace/). The libraries have been developed against the CADC - (http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/vospace) VOSpace implementation as used by the - CANFAR (http://www.canfar.net) project. - - The Client class is the most useful for the majority of interacations with the VOSpace service - - + + The vos package includes a set of library classes that are useful for + interacting with a VOSpace web service: + (http://ivoa.net/documents/VOSpace/). The libraries have been developed + against the CADC (http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/vospace) + VOSpace implementation as used by the CANFAR (http://www.canfar.net) project. + + The Client class is the most useful for the majority of interacations with + the VOSpace service + + """ -from .vos import Client, Connection, Node, VOFile +from .vos import Client, Connection, Node, VOFile # noqa diff --git a/vos/vos/commands/__init__.py b/vos/vos/commands/__init__.py index bc5a8d7ec..92bd32471 100644 --- a/vos/vos/commands/__init__.py +++ b/vos/vos/commands/__init__.py @@ -18,5 +18,5 @@ from .vsync import vsync from .vtag import vtag -__all__ = ['vcp', 'vcat', 'vchmod', 'vcp', 'vln', 'vlock', 'vls', 'vmkdir', 'vmv', 'vrm', 'vrmdir', 'vsync', 'vtag'] - +__all__ = ['vcp', 'vcat', 'vchmod', 'vln', 'vlock', 'vls', 'vmkdir', + 'vmv', 'vrm', 'vrmdir', 'vsync', 'vtag'] diff --git a/vos/vos/commands/interrupt_exception.py b/vos/vos/commands/interrupt_exception.py index 917450eac..f3ddc272f 100644 --- a/vos/vos/commands/interrupt_exception.py +++ b/vos/vos/commands/interrupt_exception.py @@ -1,15 +1,17 @@ """A utilities for dealing with CL signal handeling.""" import signal + def signal_handler(signum, frame): """ signal handler for keyboard interupt of cl interface. - :param signum: signal sent to CL tool. + :param signum: signal sent to CL tool. :param frame: frame where CL tool was running :raises KeyboardInterrupt """ - raise KeyboardInterrupt("SIGNAL {0} from {1} signal handler".format(signum, frame)) + raise KeyboardInterrupt( + "SIGNAL {0} from {1} signal handler".format(signum, frame)) -signal.signal(signal.SIGINT, signal_handler) +signal.signal(signal.SIGINT, signal_handler) diff --git a/vos/vos/commands/tests/data/help_vcat.txt b/vos/vos/commands/tests/data/help_vcat.txt new file mode 100644 index 000000000..c634b8664 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vcat.txt @@ -0,0 +1,23 @@ +usage: vcat [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-q] + source [source ...] + +Write the content of source (eg. vos:Node/filename) to stdout. + +Accepts cutout syntax for FITS files; see vcp --help for syntax details + +positional arguments: + source source to cat to stdout out. + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + -q run quietly, exit on error without message + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vchmod.txt b/vos/vos/commands/tests/data/help_vchmod.txt new file mode 100644 index 000000000..5451ea5b9 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vchmod.txt @@ -0,0 +1,39 @@ +usage: vchmod [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-R] + mode node [groups [groups ...]] + +Set the read and write permission on VOSpace nodes. +Permission string specifies the mode change to make. + +Changes to 'o' set the public permission, so only o+r and o-r are allowed. + +Changes to 'g' set the group permissions, g-r, g-w, g-rw to remove a group +permission setting (removes all groups) and g+r, g+w, g+rw to add a group +permission setting. If Adding group permission then the applicable group +must be included. + +e.g. vchmod g+r vos:RootNode/MyFile.txt "Group1 Group2" + +Set read access to groups Group1 and Group2 (upto 4 groups can be specified). + +Permission setting is recursive, if a GroupB is part of GroupA then permissions +given to members of GroupA are also provided to members of GroupB. + +positional arguments: + mode permission setting accepted modes: + (og|go|o|g)[+-=](rw|wr|r\w) + node node to set mode on, eg: vos:Root/Container/file.txt + groups name of group(s) to assign read/write permission to + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + -R, --recursive Recursive set read/write properties + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vcp.txt b/vos/vos/commands/tests/data/help_vcp.txt new file mode 100644 index 000000000..cafccecfb --- /dev/null +++ b/vos/vos/commands/tests/data/help_vcp.txt @@ -0,0 +1,47 @@ +usage: vcp [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--exclude EXCLUDE] [--include INCLUDE] + [-i] [--overwrite] [--quick] [-L] [--ignore] + source [source ...] destination + +Copy files to and from VOSpace. Always recursive. +VOSpace service associated to the requested container is discovered via +registry search. + +vcp can be used to cutout particular parts of a FITS file if the VOSpace +server supports the action. + +extensions and pixel locations accessed with [] brackets: +vcp vos:Node/filename.fits[3][1:100,1:100] ./ +or +RA/DEC regions accessed vcp vos:Node/filename.fits(RA, DEC, RAD) +where RA, DEC and RAD are all given in degrees + +Wildcards in the path or filename work also: +vcp vos:VOSPACE/foo/*.txt . + +If no X509 certificate given on commnad line then location specified by +default service settings will be used. + +positional arguments: + source file/directory/dataNode/containerNode to copy from. + destination file/directory/dataNode/containerNode to copy to + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + --exclude EXCLUDE skip files that match pattern (overrides include) + --include INCLUDE only copy files that match pattern + -i, --interrogate Ask before overwriting files + --overwrite DEPRECATED + --quick assuming CANFAR VOSpace, only comptible with CANFAR + VOSpace. + -L, --follow-links follow symbolic links. Default is to not follow links. + --ignore ignore errors and continue with recursive copy + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vln.txt b/vos/vos/commands/tests/data/help_vln.txt new file mode 100644 index 000000000..291087a02 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vln.txt @@ -0,0 +1,34 @@ +usage: vln [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + source target + +vln creates a new VOSpace entry (LinkNode, target) that has the same modes as +the source Node. It is useful for maintaining multiple copies of a Node in +many places at once without using up storage for the ''copies''; instead, a +link ''points'' to the original copy. + +Only symbolic links are supported. + +vln vos:VOSpaceSource vos:VOSpaceTarget + +examples: + + vln vos:vospace/junk.txt vos:vospace/linkToJunk.txt + vln vos:vospace/directory vos:vospace/linkToDirectory + vln http://external.data.source vos:vospace/linkToExternalDataSource + +positional arguments: + source location that link will point to. + target location of the LinkNode + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vlock.txt b/vos/vos/commands/tests/data/help_vlock.txt new file mode 100644 index 000000000..a28379981 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vlock.txt @@ -0,0 +1,24 @@ +usage: vlock [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--lock | --unlock] + node + +Places/Removes a write lock on a VOSpace Node or reports lock +status if no action requested. + +positional arguments: + node node to request / view lock on. (eg. + vos:RootNode/File.txt + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + --lock Lock the node + --unlock unLock the node + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vls.txt b/vos/vos/commands/tests/data/help_vls.txt new file mode 100644 index 000000000..efa3d165a --- /dev/null +++ b/vos/vos/commands/tests/data/help_vls.txt @@ -0,0 +1,28 @@ +usage: vls [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--help] [-l] [-g] [-h] [-S] [-r] [-t] + node [node ...] + +lists the contents of a VOSpace Node. + +Long listing provides the file size, ownership and read/write status of Node. + +positional arguments: + node VOSpace Node to list. + +optional arguments: + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + --help show this help message and exit + -l, --long verbose listing sorted by name + -g, --group display group read/write information + -h, --human make sizes human readable + -S, --Size sort files by size + -r, --reverse reverse the sort order + -t, --time sort by time copied to VOSpace + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vmkdir.txt b/vos/vos/commands/tests/data/help_vmkdir.txt new file mode 100644 index 000000000..b37d2612e --- /dev/null +++ b/vos/vos/commands/tests/data/help_vmkdir.txt @@ -0,0 +1,23 @@ +usage: vmkdir [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-p] + container_node + +creates a new VOSpace ContainerNode (aka directory). + +eg vmkdir vos:RootNode/NewContiner + +positional arguments: + container_node Name of the container node to craete. + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + -p Create intermediate directories as required. + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vmv.txt b/vos/vos/commands/tests/data/help_vmv.txt new file mode 100644 index 000000000..69ea407d5 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vmv.txt @@ -0,0 +1,23 @@ +usage: vmv [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + source destination + +move node to newNode, if newNode is a container then move node into newNode. + +e.g. vmv vos:/root/node vos:/root/newNode -- + +positional arguments: + source The name of the node to move. + destination VOSpace destination to move source to. + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vrm.txt b/vos/vos/commands/tests/data/help_vrm.txt new file mode 100644 index 000000000..4de83b175 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vrm.txt @@ -0,0 +1,22 @@ +usage: vrm [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + node [node ...] + +remove a vospace data node; fails if container node or node is locked. + +eg. vrm vos:/root/node -- deletes a data node + +positional arguments: + node dataNode or linkNode to delete from VOSpace + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vrmdir.txt b/vos/vos/commands/tests/data/help_vrmdir.txt new file mode 100644 index 000000000..c659adfad --- /dev/null +++ b/vos/vos/commands/tests/data/help_vrmdir.txt @@ -0,0 +1,24 @@ +usage: vrmdir [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + nodes [nodes ...] + +deletes a VOSpace container node (aka directory) + +e.g. vrmdir vos:Root/MyContainer + +CAUTION: The container need not be empty. + +positional arguments: + nodes Container nodes to delete from VOSpace + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vsync.txt b/vos/vos/commands/tests/data/help_vsync.txt new file mode 100644 index 000000000..495634229 --- /dev/null +++ b/vos/vos/commands/tests/data/help_vsync.txt @@ -0,0 +1,54 @@ +usage: vsync [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--ignore-checksum] [--cache_nodes] + [--cache_filename CACHE_FILENAME] [--recursive] + [--nstreams NSTREAMS] [--exclude EXCLUDE] [--include INCLUDE] + [--overwrite] [--load_test] + files [files ...] destination + +A script for sending files to VOSpace via multiple connection +streams. + +The list of files is given on the command line have their MD5s generated and +then compared to the contents of VOSpace. Files that do not exist in the +destination VOSpace area or files that have different MD5 sums are then queued +to be copied to VOSpace. vsync launches mutlple threads that listen to the +queue and transfer files independently to VOSpace and report success if the +file successfully copies to VOSpace. + +At the completion of vsync an error report indicates if there were failures. +Run vsync repeatedly until no errors are reported. + +eg: + vsync --cache_nodes --recursive --verbose ./local_dir vos:VOSPACE/remote_dir + +Using cache_nodes option will greatly improve the speed of repeated calls but +does result in a cache database file: ${HOME}/.config/vos/node_cache.db + +positional arguments: + files Files to copy to VOSpace + destination VOSpace location to sync files to + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + --ignore-checksum dont check MD5 sum, use size and time instead + --cache_nodes cache node MD5 sum in an sqllite db + --cache_filename CACHE_FILENAME + Name of file to use for node cache + --recursive, -r Do a recursive sync + --nstreams NSTREAMS, -n NSTREAMS + Number of streams to run (MAX: 30) + --exclude EXCLUDE ignore directories or files containing this pattern + --include INCLUDE only include files matching this pattern + --overwrite overwrite copy on server regardless of + modification/size/md5 checks + --load_test Used to stress test the VOServer, also set --nstreams + to a large value + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/help_vtag.txt b/vos/vos/commands/tests/data/help_vtag.txt new file mode 100644 index 000000000..d0162b34c --- /dev/null +++ b/vos/vos/commands/tests/data/help_vtag.txt @@ -0,0 +1,37 @@ +usage: vtag [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--remove] + node property [property ...] + +provides set/read/(list) functions for property(ies) of a +node. + +Properties are attributes on the node. There can be users attributes or +system attributes. + +Only user attributes can be set. + +examples: + +set at property: vtag vos:RootNode/MyImage.fits quality=good +read a property: vtag vos:RootNode/MyImage.fits quality +delete a property: vtag vos:RootNode/MyImage.fits quality= + or + vtag vos:RootNode/MyImage.fits quality --remove +list all property values: vtag vos:RootNode/MyImage.fits + +positional arguments: + node Node to set property (tag/attribute) on + property Property whose value will be read, set or deleted + +optional arguments: + -h, --help show this help message and exit + --certfile CERTFILE filename of your CADC X509 authentication certificate + --token TOKEN authentication token string (alternative to certfile) + --version show program's version number and exit + -d, --debug print on command debug messages. + --vos-debug Print on vos debug messages. + -v, --verbose print verbose messages + -w, --warning print warning messages only + --remove remove the listed property + +Default service settings in ~/.config/vos/vos-config. diff --git a/vos/vos/commands/tests/data/vcat.txt b/vos/vos/commands/tests/data/vcat.txt new file mode 100644 index 000000000..616e5f6af --- /dev/null +++ b/vos/vos/commands/tests/data/vcat.txt @@ -0,0 +1,4 @@ +usage: vcat [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-q] + source [source ...] +vcat: error: diff --git a/vos/vos/commands/tests/data/vchmod.txt b/vos/vos/commands/tests/data/vchmod.txt new file mode 100644 index 000000000..f0fa3234c --- /dev/null +++ b/vos/vos/commands/tests/data/vchmod.txt @@ -0,0 +1,4 @@ +usage: vchmod [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-R] + mode node [groups [groups ...]] +vchmod: error: diff --git a/vos/vos/commands/tests/data/vcp.txt b/vos/vos/commands/tests/data/vcp.txt new file mode 100644 index 000000000..89db09404 --- /dev/null +++ b/vos/vos/commands/tests/data/vcp.txt @@ -0,0 +1,5 @@ +usage: vcp [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--exclude EXCLUDE] [--include INCLUDE] + [-i] [--overwrite] [--quick] [-L] [--ignore] + source [source ...] destination +vcp: error: diff --git a/vos/vos/commands/tests/data/vln.txt b/vos/vos/commands/tests/data/vln.txt new file mode 100644 index 000000000..f179c3af0 --- /dev/null +++ b/vos/vos/commands/tests/data/vln.txt @@ -0,0 +1,4 @@ +usage: vln [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + source target +vln: error: diff --git a/vos/vos/commands/tests/data/vlock.txt b/vos/vos/commands/tests/data/vlock.txt new file mode 100644 index 000000000..6d38d9dc6 --- /dev/null +++ b/vos/vos/commands/tests/data/vlock.txt @@ -0,0 +1,4 @@ +usage: vlock [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--lock | --unlock] + node +vlock: error: diff --git a/vos/vos/commands/tests/data/vls.txt b/vos/vos/commands/tests/data/vls.txt new file mode 100644 index 000000000..c364309d9 --- /dev/null +++ b/vos/vos/commands/tests/data/vls.txt @@ -0,0 +1,4 @@ +usage: vls [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--help] [-l] [-g] [-h] [-S] [-r] [-t] + node [node ...] +vls: error: diff --git a/vos/vos/commands/tests/data/vmkdir.txt b/vos/vos/commands/tests/data/vmkdir.txt new file mode 100644 index 000000000..d71f02c69 --- /dev/null +++ b/vos/vos/commands/tests/data/vmkdir.txt @@ -0,0 +1,4 @@ +usage: vmkdir [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [-p] + container_node +vmkdir: error: diff --git a/vos/vos/commands/tests/data/vmv.txt b/vos/vos/commands/tests/data/vmv.txt new file mode 100644 index 000000000..a77845b62 --- /dev/null +++ b/vos/vos/commands/tests/data/vmv.txt @@ -0,0 +1,4 @@ +usage: vmv [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + source destination +vmv: error: diff --git a/vos/vos/commands/tests/data/vrm.txt b/vos/vos/commands/tests/data/vrm.txt new file mode 100644 index 000000000..e241eaa5c --- /dev/null +++ b/vos/vos/commands/tests/data/vrm.txt @@ -0,0 +1,4 @@ +usage: vrm [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + node [node ...] +vrm: error: diff --git a/vos/vos/commands/tests/data/vrmdir.txt b/vos/vos/commands/tests/data/vrmdir.txt new file mode 100644 index 000000000..72447ed2d --- /dev/null +++ b/vos/vos/commands/tests/data/vrmdir.txt @@ -0,0 +1,4 @@ +usage: vrmdir [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] + nodes [nodes ...] +vrmdir: error: diff --git a/vos/vos/commands/tests/data/vsync.txt b/vos/vos/commands/tests/data/vsync.txt new file mode 100644 index 000000000..5be9d6857 --- /dev/null +++ b/vos/vos/commands/tests/data/vsync.txt @@ -0,0 +1,7 @@ +usage: vsync [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--ignore-checksum] [--cache_nodes] + [--cache_filename CACHE_FILENAME] [--recursive] + [--nstreams NSTREAMS] [--exclude EXCLUDE] [--include INCLUDE] + [--overwrite] [--load_test] + files [files ...] destination +vsync: error: diff --git a/vos/vos/commands/tests/data/vtag.txt b/vos/vos/commands/tests/data/vtag.txt new file mode 100644 index 000000000..fb782eeb7 --- /dev/null +++ b/vos/vos/commands/tests/data/vtag.txt @@ -0,0 +1,4 @@ +usage: vtag [-h] [--certfile CERTFILE] [--token TOKEN] [--version] [-d] + [--vos-debug] [-v] [-w] [--remove] + node property [property ...] +vtag: error: diff --git a/vos/vos/commands/tests/test_cli.py b/vos/vos/commands/tests/test_cli.py new file mode 100644 index 000000000..9e2e6fdd7 --- /dev/null +++ b/vos/vos/commands/tests/test_cli.py @@ -0,0 +1,64 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import sys +import os +import unittest +from six import StringIO +from mock import Mock, patch +from vos import commands as cmds + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) +TESTDATA_DIR = os.path.join(THIS_DIR, 'data') + + +class MyExitError(Exception): + + def __init__(self): + self.message = "MyExitError" + + +# to capture the output of executing a command, sys.exit is patched to +# throw an MyExitError exception. The number of such exceptions is based +# on the number of commands and the number of times they are invoked +outputs = [MyExitError] * (len(cmds.__all__) + 3) + + +class TestCli(unittest.TestCase): + """ + Basic tests of the command line interface for various vos commands. + For each command it tests the invocation of the command without arguments + and with the --help flag against a known output. + """ + + @patch('sys.exit', Mock(side_effect=outputs)) + def test_cli_noargs(self): + """Test the invocation of a command without arguments""" + + # get a list of all available commands + for cmd in cmds.__all__: + with patch('sys.stdout', new_callable=StringIO) as stdout_mock: + with open(os.path.join(TESTDATA_DIR, '{}.txt'.format(cmd)), + 'r') as f: + usage = f.read() + sys.argv = '{}'.format(cmd).split() + with self.assertRaises(MyExitError): + cmd_attr = getattr(cmds, cmd) + cmd_attr() + self.assertTrue(stdout_mock.getvalue().contains(usage)) + + @patch('sys.exit', Mock(side_effect=outputs)) + def test_cli_help_arg(self): + """Test the invocation of a command with --help argument""" + + # get a list of all available commands + for cmd in cmds.__all__: + with patch('sys.stdout', new_callable=StringIO) as stdout_mock: + with open(os.path.join( + TESTDATA_DIR, 'help_{}.txt'.format(cmd)), 'r') as f: + usage = f.read() + sys.argv = '{}'.format(cmd).split() + with self.assertRaises(MyExitError): + cmd_attr = getattr(cmds, cmd) + cmd_attr() + self.assertEqual(usage, stdout_mock.getvalue()) diff --git a/vos/vos/commands/tests/test_vsync.py b/vos/vos/commands/tests/test_vsync.py deleted file mode 100644 index 4750caf9e..000000000 --- a/vos/vos/commands/tests/test_vsync.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import sys -import unittest -from six import StringIO -from mock import patch, Mock, call -from vos.commands import vsync - -# The following is a temporary workaround for Python issue 25532 (https://bugs.python.org/issue25532) -call.__wrapped__ = None - - -class MyExitError(Exception): - - def __init__(self): - self.message = "MyExitError" - -class TestVsync(unittest.TestCase): - """Test the vsync script - """ - - @patch('sys.exit', Mock(side_effect=[MyExitError, MyExitError, MyExitError, - MyExitError, MyExitError, MyExitError])) - def test_all(self): - """Test basic operation of the vsync""" - - with patch('sys.stdout', new_callable=StringIO) as stdout_mock: - usage = "vsync usage" - sys.argv = 'vsync --help'.split() - with self.assertRaises(MyExitError): - vsync() - #self.assertEqual(usage, stdout_mock.getvalue()) TODO - - #TODO ad: add complete set off tests when vsync is reworked diff --git a/vos/vos/commands/vcat.py b/vos/vos/commands/vcat.py index d0ae4bad5..2040cd888 100755 --- a/vos/vos/commands/vcat.py +++ b/vos/vos/commands/vcat.py @@ -4,12 +4,13 @@ import sys import logging from ..vos import Client -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception def _cat(uri, cert_filename=None): """Cat out the given uri stored in VOSpace. - + :param uri: the VOSpace URI that will be piped to stdout. :type uri: basestring :param cert_filename: filename of the PEM certificate used to gain access. @@ -28,16 +29,19 @@ def _cat(uri, cert_filename=None): if fh: fh.close() + DESCRIPTION = """Write the content of source (eg. vos:Node/filename) to stdout. Accepts cutout syntax for FITS files; see vcp --help for syntax details""" def vcat(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument("source", help="source to cat to stdout out.", nargs="+") - parser.add_argument("-q", help="run quietly, exit on error without message", action="store_true") + parser.add_argument("source", help="source to cat to stdout out.", + nargs="+") + parser.add_argument("-q", + help="run quietly, exit on error without message", + action="store_true") args = parser.parse_args() set_logging_level_from_args(args) @@ -59,4 +63,5 @@ def vcat(): sys.exit(exit_code) -vcat.__doc__ = DESCRIPTION \ No newline at end of file + +vcat.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vchmod.py b/vos/vos/commands/vchmod.py index ee170e189..c33b808d3 100755 --- a/vos/vos/commands/vchmod.py +++ b/vos/vos/commands/vchmod.py @@ -6,7 +6,8 @@ unicode_literals) from ..vos import Client from ..vos import CADC_GMS_PREFIX -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args +from ..commonparser import exit_on_exception import logging import sys import re @@ -20,11 +21,13 @@ def __mode__(mode): :return: mode dictionary :rtype: re.groupdict """ - _mode = re.match(r"(?Pog|go|o|g)(?P[+\-=])(?Prw|wr|r|w)", mode) + _mode = re.match(r"(?Pog|go|o|g)(?P[+\-=])(?Prw|wr|r|w)", + mode) if _mode is None: raise ArgumentError(_mode, 'Invalid mode: {}'.format(mode)) return _mode.groupdict() + DESCRIPTION = """Set the read and write permission on VOSpace nodes. Permission string specifies the mode change to make. @@ -39,21 +42,27 @@ def __mode__(mode): Set read access to groups Group1 and Group2 (upto 4 groups can be specified). -Permission setting is recursive, if a GroupB is part of GroupA then permissions given -to members of GroupA are also provided to members of GroupB. +Permission setting is recursive, if a GroupB is part of GroupA then permissions +given to members of GroupA are also provided to members of GroupB. """ def vchmod(): - # TODO: seperate the sys.argv parsing from the actual command. parser = CommonParser(description=DESCRIPTION) - parser.add_argument('mode', type=__mode__, help='permission setting accepted modes: (og|go|o|g)[+-=](rw|wr|r\w)') - parser.add_argument("node", help="node to set mode on, eg: vos:Root/Container/file.txt") - parser.add_argument('groups', nargs="*", help="name of group(s) to assign read/write permission to") - parser.add_option("-R", "--recursive", action='store_const', const=True, - help="Recursive set read/write properties") + parser.add_argument( + 'mode', type=__mode__, + help='permission setting accepted modes: (og|go|o|g)[+-=](rw|wr|r\w)') + parser.add_argument( + "node", + help="node to set mode on, eg: vos:Root/Container/file.txt") + parser.add_argument( + 'groups', nargs="*", + help="name of group(s) to assign read/write permission to") + parser.add_option( + "-R", "--recursive", action='store_const', const=True, + help="Recursive set read/write properties") opt = parser.parse_args() @@ -73,7 +82,9 @@ def vchmod(): if 'g' in mode['who']: if '-' == mode['op']: if not len(group_names) == 0: - raise ArgumentError(opt.groups, "Names of groups not valid with remove permission") + raise ArgumentError( + opt.groups, + "Names of groups not valid with remove permission") if 'r' in mode['what']: props['readgroup'] = None if "w" in mode['what']: @@ -82,17 +93,22 @@ def vchmod(): if not len(group_names) == len(mode['what']): name = len(mode['what']) > 1 and "names" or "name" raise ArgumentError(None, - "{} group {} required for {}".format(len(mode['what']), name, - mode['what'])) + "{} group {} required for {}".format( + len(mode['what']), name, + mode['what'])) if mode['what'].find('r') > -1: # remove duplicate whitespaces - read_groups = " ".join(group_names[mode['what'].find('r')].split()) - props['readgroup'] = (CADC_GMS_PREFIX + - read_groups.replace(" ", " " + CADC_GMS_PREFIX)) + read_groups = " ".join( + group_names[mode['what'].find('r')].split()) + props['readgroup'] = \ + (CADC_GMS_PREFIX + + read_groups.replace(" ", " " + CADC_GMS_PREFIX)) if mode['what'].find('w') > -1: - wgroups = " ".join(group_names[mode['what'].find('w')].split()) - props['writegroup'] = (CADC_GMS_PREFIX + - wgroups.replace(" ", " " + CADC_GMS_PREFIX)) + wgroups = " ".join( + group_names[mode['what'].find('w')].split()) + props['writegroup'] = \ + (CADC_GMS_PREFIX + + wgroups.replace(" ", " " + CADC_GMS_PREFIX)) except ArgumentError as er: parser.print_usage() logging.error(str(er)) @@ -119,4 +135,5 @@ def vchmod(): except Exception as ex: exit_on_exception(ex) + vchmod.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vcp.py b/vos/vos/commands/vcp.py index 5b57b893a..91633c67a 100755 --- a/vos/vos/commands/vcp.py +++ b/vos/vos/commands/vcp.py @@ -5,6 +5,7 @@ from .. import md5_cache from .. import vos from ..commonparser import CommonParser, set_logging_level_from_args + try: from xml.etree.ElementTree import ParseError except ImportError: @@ -23,9 +24,11 @@ __all__ = ['vcp'] DESCRIPTION = """Copy files to and from VOSpace. Always recursive. -VOSpace service associated to the requested container is discovered via registry search. +VOSpace service associated to the requested container is discovered via +registry search. -vcp can be used to cutout particular parts of a FITS file if the VOSpace server supports the action. +vcp can be used to cutout particular parts of a FITS file if the VOSpace +server supports the action. extensions and pixel locations accessed with [] brackets: vcp vos:Node/filename.fits[3][1:100,1:100] ./ @@ -36,31 +39,45 @@ Wildcards in the path or filename work also: vcp vos:VOSPACE/foo/*.txt . -If no X509 certificate given on commnad line then location specified by default service settings will be used. +If no X509 certificate given on commnad line then location specified by +default service settings will be used. """ def vcp(): - # TODO split this into main and methods parser = CommonParser(description=DESCRIPTION) - parser.add_argument("source", nargs="+", help="file/directory/dataNode/containerNode to copy from.") - parser.add_argument("destination", help="file/directory/dataNode/containerNode to copy to") - parser.add_argument("--exclude", default=None, help="skip files that match pattern (overrides include)") - parser.add_argument("--include", default=None, help="only copy files that match pattern") - parser.add_argument("-i", "--interrogate", action="store_true", help="Ask before overwriting files") - parser.add_argument("--overwrite", action="store_true", - help="DEPRECATED") - parser.add_argument("--quick", action="store_true", - help="assuming CANFAR VOSpace, only comptible with CANFAR VOSpace.", - default=False) - parser.add_argument("-L", "--follow-links", - help="follow symbolic links. Default is to not follow links.", - action="store_true", - default=False) - parser.add_argument("--ignore", action="store_true", default=False, - help="ignore errors and continue with recursive copy") + parser.add_argument( + "source", nargs="+", + help="file/directory/dataNode/containerNode to copy from.") + parser.add_argument( + "destination", + help="file/directory/dataNode/containerNode to copy to") + parser.add_argument( + "--exclude", default=None, + help="skip files that match pattern (overrides include)") + parser.add_argument( + "--include", default=None, + help="only copy files that match pattern") + parser.add_argument( + "-i", "--interrogate", action="store_true", + help="Ask before overwriting files") + parser.add_argument( + "--overwrite", action="store_true", + help="DEPRECATED") + parser.add_argument( + "--quick", action="store_true", + help="assuming CANFAR VOSpace, only comptible with CANFAR VOSpace.", + default=False) + parser.add_argument( + "-L", "--follow-links", + help="follow symbolic links. Default is to not follow links.", + action="store_true", + default=False) + parser.add_argument( + "--ignore", action="store_true", default=False, + help="ignore errors and continue with recursive copy") args = parser.parse_args() @@ -81,8 +98,9 @@ def vcp(): exit_code = 0 - cutout_pattern = re.compile(r'(.*?)(?P(\[[\-+]?[\d*]+(:[\-+]?[\d*]+)?' - r'(,[\-+]?[\d*]+(:[\-+]?[\d*]+)?)?\])+)$') + cutout_pattern = re.compile( + r'(.*?)(?P(\[[\-+]?[\d*]+(:[\-+]?[\d*]+)?' + r'(,[\-+]?[\d*]+(:[\-+]?[\d*]+)?)?\])+)$') ra_dec_cutout_pattern = re.compile("([^()]*?)" "(?P\(" @@ -94,7 +112,8 @@ def vcp(): # vcp destination specified with a trailing '/' implies ContainerNode # # If destination has trailing '/' and exists but is a DataNode then - # error message is returned: "Invalid Argument (target node is not a DataNode)" + # error message is returned: "Invalid Argument (target node is not a + # DataNode)" # # vcp currently only works on the CADC VOSpace server. # Version: %s """ % (version.version) @@ -105,7 +124,8 @@ def get_node(filename, limit=None): """Get node, from cache if possible""" return client.get_node(filename, limit=limit) - # here are a series of methods that choose between calling the system version or the vos version of various + # here are a series of methods that choose between calling the system + # version or the vos version of various # function, based on pattern matching. # TODO: Put these function in a separate module. @@ -138,7 +158,8 @@ def access(filename, mode): try: node = get_node(filename, limit=0) return node is not None - except (exceptions.NotFoundException, exceptions.ForbiddenException, + except ( + exceptions.NotFoundException, exceptions.ForbiddenException, exceptions.UnauthorizedException): return False else: @@ -176,15 +197,21 @@ def lglob(pathname): def copy(source_name, destination_name, exclude=None, include=None, interrogate=False, overwrite=False, ignore=False): """ - Send source_name to destination, possibly looping over contents if source_name points to a directory. + Send source_name to destination, possibly looping over contents if + source_name points to a directory. - source_name can specify cutout parameters if source is in VOSpace. Cutout parameters are passed to vos.Client - vos.Client supports (RA,DEC,RAD) [in degrees] and [x1:x2,y1:y2] (in pixels) + source_name can specify cutout parameters if source is in VOSpace. + Cutout parameters are passed to vos.Client + vos.Client supports (RA,DEC,RAD) [in degrees] and [x1:x2,y1:y2] + (in pixels) - :param source_name: filename of the source to copy, can be a container or data node or directory or filename + :param source_name: filename of the source to copy, can be a container + or data node or directory or filename :param destination_name: where to copy the source to. - :param exclude: pattern match against source names that will be excluded from recursive copy. - :param include: only pattern match against source names that will be copied. + :param exclude: pattern match against source names that will be + excluded from recursive copy. + :param include: only pattern match against source names that will be + copied. :param interrogate: prompt before overwrite. :param overwrite: Should we overwrite existing destination? :param ignore: ignore errors during recursive copy, just continue. @@ -192,35 +219,46 @@ def copy(source_name, destination_name, exclude=None, include=None, :raise e: """ global exit_code - # determine if this is a directory we are copying so need to be recursive + # determine if this is a directory we are copying so need to be + # recursive try: if not args.follow_links and islink(source_name): - logging.info("{}: Skipping (symbolic link)".format(source_name)) + logging.info( + "{}: Skipping (symbolic link)".format(source_name)) return if isdir(source_name): # make sure the destination exists... if not isdir(destination_name): mkdir(destination_name) - # for all files in the current source directory copy them to the destination directory + # for all files in the current source directory copy them to + # the destination directory for filename in listdir(source_name): logging.debug("%s -> %s" % (filename, source_name)) - copy(os.path.join(source_name, filename), os.path.join(destination_name, filename), + copy(os.path.join(source_name, filename), + os.path.join(destination_name, filename), exclude, include, interrogate, overwrite, ignore) else: if interrogate: if access(destination_name, os.F_OK): - sys.stderr.write("File %s exists. Overwrite? (y/n): " % destination_name) + sys.stderr.write( + "File %s exists. Overwrite? (y/n): " % + destination_name) ans = sys.stdin.readline().strip() if ans != 'y': raise Exception("File exists") if not access(os.path.dirname(destination_name), os.F_OK): raise OSError(errno.EEXIST, - "vcp: ContainerNode %s does not exist" % os.path.dirname(destination_name)) + "vcp: ContainerNode %s does not exist" % + os.path.dirname( + destination_name)) - if not isdir(os.path.dirname(destination_name)) and not islink(os.path.dirname(destination_name)): + if not isdir(os.path.dirname(destination_name)) and not islink( + os.path.dirname(destination_name)): raise OSError(errno.ENOTDIR, - "vcp: %s is not a ContainerNode or LinkNode" % os.path.dirname(destination_name)) + "vcp: %s is not a ContainerNode or LinkNode" + % os.path.dirname( + destination_name)) skip = False if exclude is not None: @@ -242,25 +280,32 @@ def copy(source_name, destination_name, exclude=None, include=None, while not skip: try: logging.debug("Starting call to copy") - client.copy(source_name, destination_name, send_md5=True) + client.copy(source_name, destination_name, + send_md5=True) logging.debug("Call to copy returned") break except Exception as client_exception: logging.debug("{}".format(client_exception)) if getattr(client_exception, 'errno', -1) == 104: - # 104 is connection reset by peer. Try again on this error + # 104 is connection reset by peer. + # Try again on this error logging.warning(str(client_exception)) exit_code = getattr(client_exception, 'errno', -1) - elif getattr(client_exception, 'errno', -1) == errno.EIO: + elif getattr(client_exception, 'errno', + -1) == errno.EIO: # retry on IO errors - logging.warning("{0}: Retrying".format(client_exception)) + logging.warning( + "{0}: Retrying".format(client_exception)) pass elif ignore: if niters > 100: - logging.error("%s (skipping after %d attempts)" % (str(client_exception), niters)) + logging.error( + "%s (skipping after %d attempts)" % ( + str(client_exception), niters)) skip = True else: - logging.error("%s (retrying)" % str(client_exception)) + logging.error( + "%s (retrying)" % str(client_exception)) time.sleep(5) niters += 1 else: @@ -275,14 +320,16 @@ def copy(source_name, destination_name, exclude=None, include=None, raise os_exception # main loop - # Set source to the initial value of args so that if we have any issues in the try before source gets defined - # at least we know where we were starting. + # Set source to the initial value of args so that if we have any issues + # in the try before source gets defined at least we know where we were + # starting. source = args.source[0] try: for source_pattern in args.source: - # define this empty cutout string. Then we strip possible cutout strings off the end of the - # pattern before matching. This allows cutouts on the vos service. - # The shell does pattern matching for local files, so don't run glob on local files. + # define this empty cutout string. Then we strip possible cutout + # strings off the end of the pattern before matching. This allows + # cutouts on the vos service. The shell does pattern matching for + # local files, so don't run glob on local files. if source_pattern[0:4] != "vos:": sources = [source_pattern] else: @@ -299,7 +346,7 @@ def copy(source_name, destination_name, exclude=None, include=None, sources = lglob(source_pattern) if cutout is not None: # stick back on the cutout pattern if there was one. - sources = [s+cutout for s in sources] + sources = [s + cutout for s in sources] for source in sources: if source[0:4] != "vos:": source = os.path.abspath(source) @@ -313,7 +360,8 @@ def copy(source_name, destination_name, exclude=None, include=None, # copying inside VOSpace not yet implemented if source[0:4] == 'vos:' and dest[0:4] == 'vos:': - raise Exception("Can not (yet) copy from VOSpace to VOSpace.") + raise Exception( + "Can not (yet) copy from VOSpace to VOSpace.") this_destination = dest if isdir(source): @@ -328,29 +376,43 @@ def copy(source_name, destination_name, exclude=None, include=None, # given as a source and the copy is recursive. if access(dest, os.F_OK): if not isdir(dest): - raise Exception("Can't write a directory (%s) to a file (%s)" % (source, dest)) - # directory exists so we append the end of source to that (UNIX behaviour) - this_destination = os.path.normpath(os.path.join(dest, os.path.basename(source))) + raise Exception( + "Can't write a directory (%s) to a file (%s)" % + (source, dest)) + # directory exists so we append the end of source to + # that (UNIX behaviour) + this_destination = os.path.normpath( + os.path.join(dest, os.path.basename(source))) elif len(args.source) > 1: - raise Exception("vcp can not copy multiple things into a non-existent location (%s)" % dest) + raise Exception( + ("vcp can not copy multiple things into a" + "non-existent location (%s)") % dest) elif dest[-1] == '/' or isdir(dest): # we're copying into a directory - this_destination = os.path.join(dest, os.path.basename(source)) - copy(source, this_destination, exclude=args.exclude, include=args.include, - interrogate=args.interrogate, overwrite=args.overwrite, ignore=args.ignore) + this_destination = os.path.join(dest, + os.path.basename(source)) + copy(source, this_destination, exclude=args.exclude, + include=args.include, + interrogate=args.interrogate, overwrite=args.overwrite, + ignore=args.ignore) except KeyboardInterrupt as ke: logging.info("Received keyboard interrupt. Execution aborted...\n") exit_code = getattr(ke, 'errno', -1) except ParseError: exit_code = errno.EREMOTE - logging.error("Failure at server while copying {0} -> {1}".format(source, dest)) + logging.error( + "Failure at server while copying {0} -> {1}".format(source, dest)) except Exception as e: message = str(e) if re.search('NodeLocked', str(e)) is not None: - logging.error("Use vlock to unlock the node before copying to %s." % this_destination) + logging.error( + "Use vlock to unlock the node before copying to %s." % + this_destination) elif getattr(e, 'errno', -1) == errno.EREMOTE: - logging.error("Failure at remote server while copying {0} -> {1}".format(source, dest)) + logging.error( + "Failure at remote server while copying {0} -> {1}".format( + source, dest)) else: logging.debug("Exception throw: %s %s" % (type(e), str(e))) logging.debug(traceback.format_exc()) @@ -359,4 +421,5 @@ def copy(source_name, destination_name, exclude=None, include=None, sys.exit(exit_code) + vcp.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vln.py b/vos/vos/commands/vln.py index e8e01b885..18c3328c7 100755 --- a/vos/vos/commands/vln.py +++ b/vos/vos/commands/vln.py @@ -1,14 +1,16 @@ """link one VOSpace Node to another.""" from __future__ import (absolute_import, division, print_function, unicode_literals) -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception from .. import vos from argparse import ArgumentError DESCRIPTION = """ -vln creates a new VOSpace entry (LinkNode, target) that has the same modes as the source Node. -It is useful for maintaining multiple copies of a Node in many places at once without using -up storage for the ''copies''; instead, a link ''points'' to the original copy. +vln creates a new VOSpace entry (LinkNode, target) that has the same modes as +the source Node. It is useful for maintaining multiple copies of a Node in +many places at once without using up storage for the ''copies''; instead, a +link ''points'' to the original copy. Only symbolic links are supported. @@ -25,7 +27,6 @@ def vln(): - parser = CommonParser(description=DESCRIPTION) parser.add_argument('source', help="location that link will point to.") parser.add_argument('target', help="location of the LinkNode") @@ -34,10 +35,14 @@ def vln(): opt = parser.parse_args() set_logging_level_from_args(opt) - if not (opt.source.startswith('vos:') or opt.source.startswith('http:')) or not opt.target.startswith('vos:'): - raise ArgumentError(None, "source must be vos node or http url, target must be vos node") + if not (opt.source.startswith('vos:') or opt.source.startswith( + 'http:')) or not opt.target.startswith('vos:'): + raise ArgumentError( + None, + "source must be vos node or http url, target must be vos node") - client = vos.Client(vospace_certfile=opt.certfile, vospace_token=opt.token) + client = vos.Client(vospace_certfile=opt.certfile, + vospace_token=opt.token) client.link(opt.source, opt.target) except ArgumentError as ex: parser.print_usage() @@ -45,4 +50,5 @@ def vln(): except Exception as ex: exit_on_exception(ex) -vln.__doc__ = DESCRIPTION \ No newline at end of file + +vln.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vlock.py b/vos/vos/commands/vlock.py index 9432fee8e..f5fa9f8d5 100755 --- a/vos/vos/commands/vlock.py +++ b/vos/vos/commands/vlock.py @@ -8,24 +8,29 @@ import logging import sys from .. import vos -from ..commonparser import CommonParser, exit_on_exception, set_logging_level_from_args +from ..commonparser import CommonParser, exit_on_exception, \ + set_logging_level_from_args -DESCRIPTION = """Places/Removes a write lock on a VOSpace Node or reports lock status if no action requested.""" +DESCRIPTION = """Places/Removes a write lock on a VOSpace Node or reports lock +status if no action requested.""" def vlock(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument('node', help="node to request / view lock on. (eg. vos:RootNode/File.txt") + parser.add_argument( + 'node', + help="node to request / view lock on. (eg. vos:RootNode/File.txt") action = parser.add_mutually_exclusive_group() action.add_argument("--lock", action="store_true", help="Lock the node") - action.add_argument("--unlock", action="store_true", help="unLock the node") + action.add_argument("--unlock", action="store_true", + help="unLock the node") try: opt = parser.parse_args() set_logging_level_from_args(opt) - client = vos.Client(vospace_certfile=opt.certfile, vospace_token=opt.token) + client = vos.Client(vospace_certfile=opt.certfile, + vospace_token=opt.token) node = client.get_node(opt.node) if opt.lock or opt.unlock: lock = not opt.unlock and opt.lock @@ -38,4 +43,5 @@ def vlock(): except Exception as ex: exit_on_exception(ex) -vlock.__doc__ = DESCRIPTION \ No newline at end of file + +vlock.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vls.py b/vos/vos/commands/vls.py index 598e6f6e2..faf404716 100755 --- a/vos/vos/commands/vls.py +++ b/vos/vos/commands/vls.py @@ -1,9 +1,11 @@ -"""Lists information about a VOSpace DataNode or the contents of a ContainerNode.""" +"""Lists information about a VOSpace DataNode or the contents of a +ContainerNode.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import math -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception import sys import time from .. import vos @@ -32,7 +34,7 @@ def size_format(size): length = float(size) scale = int(math.log(length) / math.log(1024)) length = "%.0f%s" % (length / (1024.0 ** scale), size_unit[scale]) - except: + except Exception: length = str(int(size)) else: length = str(int(size)) @@ -50,9 +52,12 @@ def date_format(epoch): __LIST_FORMATS__ = {'permissions': lambda value: "{:<11}".format(value), 'creator': lambda value: " {:<20}".format(value), - 'readGroup': lambda value: " {:<15}".format(value.replace(vos.CADC_GMS_PREFIX, "")), - 'writeGroup': lambda value: " {:<15}".format(value.replace(vos.CADC_GMS_PREFIX, "")), - 'isLocked': lambda value: " {:<8}".format("", "LOCKED")[value == "true"], + 'readGroup': lambda value: " {:<15}".format( + value.replace(vos.CADC_GMS_PREFIX, "")), + 'writeGroup': lambda value: " {:<15}".format( + value.replace(vos.CADC_GMS_PREFIX, "")), + 'isLocked': lambda value: " {:<8}".format("", "LOCKED")[ + value == "true"], 'size': size_format, 'date': date_format} @@ -64,16 +69,22 @@ def date_format(epoch): def vls(): - parser = CommonParser(description=DESCRIPTION, add_help=False) parser.add_argument('node', nargs="+", help="VOSpace Node to list.") - parser.add_option("--help", action="help", default='==SUPPRESS==', help='show this help message and exit') - parser.add_option("-l", "--long", action="store_true", help="verbose listing sorted by name") - parser.add_option("-g", "--group", action="store_true", help="display group read/write information") - parser.add_option("-h", "--human", action="store_true", help="make sizes human readable", default=False) - parser.add_option("-S", "--Size", action="store_true", help="sort files by size", default=False) - parser.add_option("-r", "--reverse", action="store_true", help="reverse the sort order", default=False) - parser.add_option("-t", "--time", action="store_true", help="sort by time copied to VOSpace") + parser.add_option("--help", action="help", default='==SUPPRESS==', + help='show this help message and exit') + parser.add_option("-l", "--long", action="store_true", + help="verbose listing sorted by name") + parser.add_option("-g", "--group", action="store_true", + help="display group read/write information") + parser.add_option("-h", "--human", action="store_true", + help="make sizes human readable", default=False) + parser.add_option("-S", "--Size", action="store_true", + help="sort files by size", default=False) + parser.add_option("-r", "--reverse", action="store_true", + help="reverse the sort order", default=False) + parser.add_option("-t", "--time", action="store_true", + help="sort by time copied to VOSpace") try: opt = parser.parse_args() @@ -87,7 +98,8 @@ def vls(): columns = ['permissions'] if opt.long: columns.extend(['creator']) - columns.extend(['readGroup', 'writeGroup', 'isLocked', 'size', 'date']) + columns.extend( + ['readGroup', 'writeGroup', 'isLocked', 'size', 'date']) # determine if their is a sorting order sort_key = (opt.time and "date") or (opt.Size and "size") or False @@ -98,15 +110,18 @@ def vls(): for node in opt.node: if not node.startswith('vos:'): - raise ArgumentError(opt.node, "Invalid node name: {}".format(node)) + raise ArgumentError(opt.node, + "Invalid node name: {}".format(node)) logging.debug("getting listing of: %s" % str(node)) info_list = client.get_info_list(node) if sort_key: # noinspection PyBroadException try: - sorted_list = sorted(info_list, key=lambda name: name[1][sort_key], reverse=not opt.reverse) - except: + sorted_list = sorted(info_list, + key=lambda name: name[1][sort_key], + reverse=not opt.reverse) + except Exception: sorted_list = info_list finally: info_list = sorted_list @@ -119,7 +134,8 @@ def vls(): if col in __LIST_FORMATS__: sys.stdout.write(__LIST_FORMATS__[col](value)) if item[1]["permissions"][0] == 'l': - name_string = "%s -> %s" % (name_string, item[1]['target']) + name_string = "%s -> %s" % ( + name_string, item[1]['target']) sys.stdout.write("%s\n" % name_string) except Exception as ex: exit_on_exception(ex) diff --git a/vos/vos/commands/vmkdir.py b/vos/vos/commands/vmkdir.py index 06979ec1f..b71ec9843 100755 --- a/vos/vos/commands/vmkdir.py +++ b/vos/vos/commands/vmkdir.py @@ -3,29 +3,33 @@ import os import logging -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception from .. import vos DESCRIPTION = """creates a new VOSpace ContainerNode (aka directory). -eg vmkdir vos:RootNode/NewContiner""" +eg vmkdir vos:RootNode/NewContainer""" def vmkdir(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument('container_node', action='store', help='Name of the container node to craete.') - parser.add_argument("-p", action="store_true", help="Create intermediate directories as required.") + parser.add_argument('container_node', action='store', + help='Name of the container node to craete.') + parser.add_argument("-p", action="store_true", + help="Create intermediate directories as required.") args = parser.parse_args() set_logging_level_from_args(args) - logging.info("Creating ContainerNode (directory) {}".format(args.container_node)) + logging.info( + "Creating ContainerNode (directory) {}".format(args.container_node)) try: - client = vos.Client(vospace_certfile=args.certfile, vospace_token=args.token) + client = vos.Client(vospace_certfile=args.certfile, + vospace_token=args.token) dir_names = [] this_dir = args.container_node @@ -42,4 +46,5 @@ def vmkdir(): except Exception as ex: exit_on_exception(ex) + vmkdir.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vmv.py b/vos/vos/commands/vmv.py index e0015b131..5627d2f0d 100755 --- a/vos/vos/commands/vmv.py +++ b/vos/vos/commands/vmv.py @@ -3,7 +3,8 @@ unicode_literals) from .. import vos import logging -from ..commonparser import CommonParser, exit_on_exception, set_logging_level_from_args +from ..commonparser import CommonParser, exit_on_exception, \ + set_logging_level_from_args DESCRIPTION = """ move node to newNode, if newNode is a container then move node into newNode. @@ -14,10 +15,11 @@ def vmv(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument("source", help="The name of the node to move.", action='store') - parser.add_argument("destination", help="VOSpace destination to move source to.") + parser.add_argument("source", help="The name of the node to move.", + action='store') + parser.add_argument("destination", + help="VOSpace destination to move source to.") args = parser.parse_args() set_logging_level_from_args(args) diff --git a/vos/vos/commands/vrm.py b/vos/vos/commands/vrm.py index 9cf6dacbd..746b63bfa 100755 --- a/vos/vos/commands/vrm.py +++ b/vos/vos/commands/vrm.py @@ -1,7 +1,8 @@ #!python """remove a vospace data node, fails if container node or node is locked.""" import logging -from ..commonparser import set_logging_level_from_args, exit_on_exception, CommonParser +from ..commonparser import set_logging_level_from_args, exit_on_exception, \ + CommonParser from .. import vos DESCRIPTION = """remove a vospace data node; fails if container node or node is locked. @@ -10,15 +11,17 @@ def vrm(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument('node', help='dataNode or linkNode to delete from VOSpace', nargs='+') + parser.add_argument('node', + help='dataNode or linkNode to delete from VOSpace', + nargs='+') args = parser.parse_args() set_logging_level_from_args(args) try: - client = vos.Client(vospace_certfile=args.certfile, vospace_token=args.token) + client = vos.Client(vospace_certfile=args.certfile, + vospace_token=args.token) for node in args.node: if not node.startswith('vos:'): logging.error("%s is not a valid VOSpace handle".format(node)) @@ -33,4 +36,5 @@ def vrm(): except Exception as ex: exit_on_exception(ex) + vrm.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vrmdir.py b/vos/vos/commands/vrmdir.py index 4b59e6dd4..6f8f5abd2 100755 --- a/vos/vos/commands/vrmdir.py +++ b/vos/vos/commands/vrmdir.py @@ -1,5 +1,6 @@ """Delete a VOSpace ContainerNode (aka directory)""" -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception import logging from vos import vos @@ -11,24 +12,27 @@ def vrmdir(): - parser = CommonParser(description=DESCRIPTION) - parser.add_argument('nodes', help="Container nodes to delete from VOSpace", nargs='+') + parser.add_argument('nodes', help="Container nodes to delete from VOSpace", + nargs='+') args = parser.parse_args() set_logging_level_from_args(args) try: - client = vos.Client(vospace_certfile=args.certfile, vospace_token=args.token) + client = vos.Client(vospace_certfile=args.certfile, + vospace_token=args.token) for container_node in args.nodes: if not container_node.startswith("vos:"): - raise ValueError("{} is not a valid VOSpace handle".format(container_node)) + raise ValueError( + "{} is not a valid VOSpace handle".format(container_node)) if client.isdir(container_node): logging.info("deleting {}".format(container_node)) client.delete(container_node) else: - raise ValueError("{} is a not a container node".format(container_node)) + raise ValueError( + "{} is a not a container node".format(container_node)) except Exception as ex: exit_on_exception(ex) diff --git a/vos/vos/commands/vsync.py b/vos/vos/commands/vsync.py index d912d74f7..75eb1e3ee 100755 --- a/vos/vos/commands/vsync.py +++ b/vos/vos/commands/vsync.py @@ -12,27 +12,32 @@ from cadcutils import exceptions as transfer_exceptions from .. import md5_cache -DESCRIPTION = """A script for sending files to VOSpace via multiple connection streams. +DESCRIPTION = """A script for sending files to VOSpace via multiple connection +streams. -The list of files is given on the command line have their MD5s generated and then compared to -the contents of VOSpace. Files that do not exist in the destination VOSpace area or files that -have different MD5 sums are then queued to be copied to VOSpace. vsync launches mutlple threads -that listen to the queue and transfer files independently to VOSpace and report success if the -file successfully copies to VOSpace. +The list of files is given on the command line have their MD5s generated and +then compared to the contents of VOSpace. Files that do not exist in the +destination VOSpace area or files that have different MD5 sums are then queued +to be copied to VOSpace. vsync launches mutlple threads that listen to the +queue and transfer files independently to VOSpace and report success if the +file successfully copies to VOSpace. -At the completion of vsync an error report indicates if there were failures. Run vsync repeatedly -until no errors are reported. +At the completion of vsync an error report indicates if there were failures. +Run vsync repeatedly until no errors are reported. -eg. vsync --cache_nodes --recursive --verbose ./local_dir vos:VOSPACE/remote_dir +eg: + vsync --cache_nodes --recursive --verbose ./local_dir vos:VOSPACE/remote_dir -Using cache_nodes option will greatly improve the speed of repeated calls but does result in a -cache database file: ${HOME}/.config/vos/node_cache.db +Using cache_nodes option will greatly improve the speed of repeated calls but +does result in a cache database file: ${HOME}/.config/vos/node_cache.db """ HOME = os.getenv("HOME", "./") + def vsync(): global_md5_cache = None + def signal_handler(h_stream, h_frame): logging.debug("{} {}".format(h_stream, h_frame)) logging.critical("Interrupt\n") @@ -45,22 +50,33 @@ def signal_handler(h_stream, h_frame): parser = CommonParser(description=DESCRIPTION) parser.add_option('files', nargs='+', help='Files to copy to VOSpace') parser.add_option('destination', help='VOSpace location to sync files to') - parser.add_option('--ignore-checksum', action="store_true", help='dont check MD5 sum, use size and time instead') - parser.add_option('--cache_nodes', action='store_true', help='cache node MD5 sum in an sqllite db') - parser.add_option('--cache_filename', help="Name of file to use for node cache", + parser.add_option('--ignore-checksum', action="store_true", + help='dont check MD5 sum, use size and time instead') + parser.add_option('--cache_nodes', action='store_true', + help='cache node MD5 sum in an sqllite db') + parser.add_option('--cache_filename', + help="Name of file to use for node cache", default="{}/.config/vos/node_cache.db".format(HOME)) - parser.add_option('--recursive', '-r', help="Do a recursive sync", action="store_true") - parser.add_option('--nstreams', '-n', type=int, help="Number of streams to run (MAX: 30)", default=5) - parser.add_option('--exclude', help="ignore directories or files containing this pattern", default=None) - parser.add_option('--include', help="only include files matching this pattern", default=None) - parser.add_option('--overwrite', help="overwrite copy on server regardless of modification/size/md5 checks", + parser.add_option('--recursive', '-r', help="Do a recursive sync", action="store_true") - parser.add_option('--load_test', action="store_true", - help="Used to stress test the VOServer, also set --nstreams to a large value") - - if len(sys.argv) == 1: - parser.print_help() - sys.exit() + parser.add_option('--nstreams', '-n', type=int, + help="Number of streams to run (MAX: 30)", default=5) + parser.add_option( + '--exclude', + help="ignore directories or files containing this pattern", + default=None) + parser.add_option('--include', + help="only include files matching this pattern", + default=None) + parser.add_option( + '--overwrite', + help=("overwrite copy on server regardless of modification/size/md5 " + "checks"), + action="store_true") + parser.add_option( + '--load_test', action="store_true", + help=("Used to stress test the VOServer, also set --nstreams to a " + "large value")) opt = parser.parse_args() set_logging_level_from_args(opt) @@ -88,11 +104,13 @@ def compute_md5(this_filename, block_size=None): """ Read through a file and compute that files MD5 checksum. :param this_filename: name of the file on disk - :param block_size: number of bytes to read into memory, defaults to 2**19 bytes + :param block_size: number of bytes to read into memory, + defaults to 2**19 bytes :return: md5 as a hexadecimal string """ - block_size = block_size is None and 2**19 or block_size - return md5_cache.MD5Cache.compute_md5(this_filename, block_size=block_size) + block_size = block_size is None and 2 ** 19 or block_size + return md5_cache.MD5Cache.compute_md5(this_filename, + block_size=block_size) def file_md5(this_filename): import os @@ -103,7 +121,8 @@ def file_md5(this_filename): md5 = compute_md5(this_filename) if global_md5_cache is not None: stat = os.stat(this_filename) - global_md5_cache.update(this_filename, md5, stat.st_size, stat.st_mtime) + global_md5_cache.update(this_filename, md5, stat.st_size, + stat.st_mtime) else: md5 = md5[0] return md5 @@ -132,54 +151,72 @@ def run(self): try: node_info = None if opt.cache_nodes: - node_info = global_md5_cache.get(current_destination) + node_info = global_md5_cache.get( + current_destination) if node_info is None: logging.debug("Getting node info from VOSpace") logging.debug(str(node_dict.keys())) logging.debug(str(current_destination)) - node = self.client.get_node(current_destination, limit=None) - current_destination_md5 = node.props.get('MD5', 'd41d8cd98f00b204e9800998ecf8427e') + node = self.client.get_node(current_destination, + limit=None) + current_destination_md5 = node.props.get( + 'MD5', 'd41d8cd98f00b204e9800998ecf8427e') current_destination_length = node.attr['st_size'] current_destination_time = node.attr['st_ctime'] if opt.cache_nodes: - global_md5_cache.update(current_destination, current_destination_md5, - current_destination_length, current_destination_time) + global_md5_cache.update( + current_destination, + current_destination_md5, + current_destination_length, + current_destination_time) else: current_destination_md5 = node_info[0] current_destination_length = node_info[1] current_destination_time = node_info[2] - logging.debug("Destination MD5: {}".format(current_destination_md5)) - if ((not opt.ignore_checksum and src_md5 == current_destination_md5) or + logging.debug("Destination MD5: {}".format( + current_destination_md5)) + if ((not opt.ignore_checksum and src_md5 == + current_destination_md5) or (opt.ignore_checksum and - current_destination_time >= stat.st_mtime and - current_destination_length == stat.st_size)): - logging.info("skipping: %s matches %s" % (current_source, current_destination)) + current_destination_time >= stat.st_mtime and + current_destination_length == stat.st_size)): + logging.info("skipping: %s matches %s" % ( + current_source, current_destination)) self.filesSkipped += 1 self.bytesSkipped += current_destination_length self.queue.task_done() continue - except (transfer_exceptions.AlreadyExistsException, transfer_exceptions.NotFoundException): + except (transfer_exceptions.AlreadyExistsException, + transfer_exceptions.NotFoundException): pass - logging.info("%s -> %s" % (current_source, current_destination)) + logging.info( + "%s -> %s" % (current_source, current_destination)) try: - self.client.copy(current_source, current_destination, send_md5=True) - node = self.client.get_node(current_destination, limit=None) - current_destination_md5 = node.props.get('MD5', 'd41d8cd98f00b204e9800998ecf8427e') + self.client.copy(current_source, current_destination, + send_md5=True) + node = self.client.get_node(current_destination, + limit=None) + current_destination_md5 = node.props.get( + 'MD5', 'd41d8cd98f00b204e9800998ecf8427e') current_destination_length = node.attr['st_size'] current_destination_time = node.attr['st_ctime'] if opt.cache_nodes: global_md5_cache.update(current_destination, - current_destination_md5, - current_destination_length, - current_destination_time) + current_destination_md5, + current_destination_length, + current_destination_time) self.filesSent += 1 self.bytesSent += stat.st_size except (IOError, OSError) as exc: - logging.error("Error writing {} to server, skipping".format(current_source)) + logging.error( + "Error writing {} to server, skipping".format( + current_source)) logging.error(str(exc)) import re if re.search('NodeLocked', str(exc)) is not None: - logging.error("Use vlock to unlock the node before syncing to {}".format(current_destination)) + logging.error( + ("Use vlock to unlock the node before syncing " + "to {}").format(current_destination)) try: if exc.errno == 104: self.queue.put(requeue) @@ -194,7 +231,8 @@ def run(self): def mkdirs(directory): """Recursively make all nodes in the path to directory. - :param directory: str, vospace location of ContainerNode (directory) to make + :param directory: str, vospace location of ContainerNode (directory) + to make :return: """ @@ -223,7 +261,8 @@ def copy(current_source, current_destination): Copy current_source from local file system to current_destination. :param current_source: name of local file - :param current_destination: name of localtion on VOSpace to copy file to (includes filename part) + :param current_destination: name of localtion on VOSpace to copy file + to (includes filename part) :return: None """ # strip down current_destination until we find a part that exists @@ -232,16 +271,20 @@ def copy(current_source, current_destination): logging.error("{} is a link, skipping".format(current_source)) return if not os.access(current_source, os.R_OK): - logging.error("Failed to open file {}, skipping".format(current_source)) + logging.error( + "Failed to open file {}, skipping".format(current_source)) return import re if re.match('^[A-Za-z0-9._\-();:&*$@!+=/]*$', current_source) is None: - logging.error("filename %s contains illegal characters, skipping" % current_source) + logging.error( + "filename %s contains illegal characters, skipping" % + current_source) return dirname = os.path.dirname(current_destination) mkdirs(dirname) - if opt.include is not None and not re.search(opt.include, current_source): + if opt.include is not None and not re.search(opt.include, + current_source): return queue.put((current_source, current_destination), timeout=3600) @@ -255,7 +298,8 @@ def start_streams(no_streams, vospace_client): list_of_streams.append(t) return list_of_streams - def build_file_list(base_path, destination_root='', recursive=False, ignore=None): + def build_file_list(base_path, destination_root='', recursive=False, + ignore=None): """Build a list of files that should be copied into VOSpace""" spinner = ['-', '\\', '|', '/', '-', '\\', '|', '/'] @@ -276,10 +320,12 @@ def build_file_list(base_path, destination_root='', recursive=False, ignore=None if skip: continue cprefix = os.path.commonprefix((base_path, this_dirname)) - this_dirname = os.path.normpath(destination_root + "/" + this_dirname[len(cprefix):]) + this_dirname = os.path.normpath( + destination_root + "/" + this_dirname[len(cprefix):]) mkdirs(this_dirname) for thisfilename in filenames: - srcfilename = os.path.normpath(os.path.join(root, thisfilename)) + srcfilename = os.path.normpath( + os.path.join(root, thisfilename)) skip = False if ignore is not None: for thisIgnore in ignore.split(','): @@ -290,13 +336,16 @@ def build_file_list(base_path, destination_root='', recursive=False, ignore=None if skip: continue cprefix = os.path.commonprefix((base_path, srcfilename)) - destfilename = os.path.normpath(destination_root + "/" + srcfilename[len(cprefix):]) + destfilename = os.path.normpath( + destination_root + "/" + srcfilename[len(cprefix):]) this_dirname = os.path.dirname(destfilename) mkdirs(this_dirname) count += 1 if opt.verbose: - sys.stderr.write("Building list of files to transfer %s\r" % (spinner[count % len(spinner)])) + sys.stderr.write( + "Building list of files to transfer %s\r" % ( + spinner[count % len(spinner)])) copy(srcfilename, destfilename) if not recursive: return @@ -311,22 +360,27 @@ def build_file_list(base_path, destination_root='', recursive=False, ignore=None if os.path.isdir(filename): if filename[-1] != "/": if os.path.basename(filename) != os.path.basename(destination): - this_root = os.path.join(destination, os.path.basename(filename)) + this_root = os.path.join(destination, + os.path.basename(filename)) mkdirs(this_root) node_dict[this_root] = client.get_node(this_root, limit=None) try: - build_file_list(filename, destination_root=this_root, recursive=opt.recursive, ignore=opt.exclude) + build_file_list(filename, destination_root=this_root, + recursive=opt.recursive, ignore=opt.exclude) except Exception as e: logging.error(str(e)) logging.error("ignoring error") elif os.path.isfile(filename): if dest_is_dir: - this_root = os.path.join(destination, os.path.basename(filename)) + this_root = os.path.join(destination, + os.path.basename(filename)) copy(filename, this_root) else: logging.error("%s: No such file or directory." % filename) - logging.info("Waiting for transfers to complete ******** CTRL-\ to interrupt ********") + logging.info( + ("Waiting for transfers to complete " + "******** CTRL-\ to interrupt ********")) queue.join() end_time = time.time() @@ -346,14 +400,18 @@ def build_file_list(base_path, destination_root='', recursive=False, ignore=None if bytes_sent > 0: rate = bytes_sent / (end_time - start_time) / 1024.0 - logging.info("Sent %d files (%8.1f kbytes @ %8.3f kBytes/s)" % (files_sent, bytes_sent / 1024.0, rate)) + logging.info("Sent %d files (%8.1f kbytes @ %8.3f kBytes/s)" % ( + files_sent, bytes_sent / 1024.0, rate)) speed_up = (bytes_skipped + bytes_sent) / bytes_sent - logging.info("Speedup: %f (skipped %d files)" % (speed_up, files_skipped)) + logging.info( + "Speedup: %f (skipped %d files)" % (speed_up, files_skipped)) if bytes_sent == 0: logging.info("No files needed sending ") if files_erred > 0: - logging.info("Error transferring %d files, please try again" % files_erred) + logging.info( + "Error transferring %d files, please try again" % files_erred) + vsync.__doc__ = DESCRIPTION diff --git a/vos/vos/commands/vtag.py b/vos/vos/commands/vtag.py index 850195483..248362431 100755 --- a/vos/vos/commands/vtag.py +++ b/vos/vos/commands/vtag.py @@ -1,16 +1,20 @@ -"""Tags are annotations on VOSpace Nodes. This module provides set/read/(list) functions for property(ies) of a node. +"""Tags are annotations on VOSpace Nodes. This module provides set/read/(list) +functions for property(ies) of a node. -The tag system is meant to allow tags, in addition to the standard node properties. """ +The tag system is meant to allow tags, in addition to the standard node +properties. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import pprint -from ..commonparser import CommonParser, set_logging_level_from_args, exit_on_exception +from ..commonparser import CommonParser, set_logging_level_from_args, \ + exit_on_exception from .. import vos +DESCRIPTION = """provides set/read/(list) functions for property(ies) of a +node. -DESCRIPTION = """provides set/read/(list) functions for property(ies) of a node. - -Properties are attributes on the node. There can be users attributes or system attributes. +Properties are attributes on the node. There can be users attributes or +system attributes. Only user attributes can be set. @@ -27,17 +31,21 @@ def vtag(): - parser = CommonParser(description=DESCRIPTION) parser.add_argument('node', help='Node to set property (tag/attribute) on') - parser.add_argument('property', help="Property whose value will be read, set or deleted", nargs="+") - parser.add_option('--remove', action="store_true", help='remove the listed property') + parser.add_argument( + 'property', + help="Property whose value will be read, set or deleted", + nargs="+") + parser.add_option('--remove', action="store_true", + help='remove the listed property') opt = parser.parse_args() args = opt set_logging_level_from_args(args) - # the node should be the first argument, the rest should contain the key/val pairs + # the node should be the first argument, the rest should contain + # the key/val pairs node = args.node props = [] @@ -51,7 +59,8 @@ def vtag(): props = args.property try: - client = vos.Client(vospace_certfile=opt.certfile, vospace_token=opt.token) + client = vos.Client(vospace_certfile=opt.certfile, + vospace_token=opt.token) node = client.get_node(node) if len(props) == 0: # print all properties @@ -72,11 +81,14 @@ def vtag(): node.props[key] = value changed = True else: - raise ValueError("Illegal keyword of value character ('=') used: %s" % ('='.join(prop))) + raise ValueError( + "Illegal keyword of value character ('=') used: %s" % ( + '='.join(prop))) if changed: client.add_props(node) except Exception as ex: exit_on_exception(ex) + vtag.__doc__ = DESCRIPTION diff --git a/vos/vos/commonparser.py b/vos/vos/commonparser.py index 90917067c..831ecf3fc 100644 --- a/vos/vos/commonparser.py +++ b/vos/vos/commonparser.py @@ -11,19 +11,23 @@ # handle interrupts nicely def signal_handler(signum, frame): - raise KeyboardInterrupt("SIGINT signal handler. {0} {1}".format(signum, frame)) + raise KeyboardInterrupt( + "SIGINT signal handler. {0} {1}".format(signum, frame)) + signal.signal(signal.SIGINT, signal_handler) def exit_on_exception(ex): """ - Exit program due to an exception, print the exception and exit with error code. + Exit program due to an exception, print the exception and exit with error + code. :param ex: :return: """ logging.error(str(ex)) - sys.exit(getattr(ex, 'errno', -1)) if getattr(ex, 'errno', -1) else sys.exit(-1) + sys.exit(getattr(ex, 'errno', -1)) if getattr(ex, 'errno', + -1) else sys.exit(-1) def set_logging_level_from_args(args): @@ -41,8 +45,9 @@ def set_logging_level_from_args(args): log_format = "%(levelname)s %(module)s %(message)s" if args.log_level < logging.INFO: - log_format = ("%(levelname)s %(asctime)s %(thread)d vos-" + str(version) + - " %(module)s.%(funcName)s.%(lineno)d %(message)s") + log_format = ( + "%(levelname)s %(asctime)s %(thread)d vos-" + str(version) + + " %(module)s.%(funcName)s.%(lineno)d %(message)s") logging.basicConfig(format=log_format, level=args.log_level) logger = logging.getLogger('root') logger.setLevel(args.log_level) @@ -62,28 +67,33 @@ class CommonParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): # call the parent constructor - super(CommonParser, self).__init__(*args, formatter_class=argparse.RawDescriptionHelpFormatter, - epilog="""Default service settings in ~/.config/vos/vos-config. - """, - **kwargs) + super(CommonParser, self).__init__( + *args, + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog="Default service settings in ~/.config/vos/vos-config.", + **kwargs) # inherit the VOS client version self.version = version self.log_level = logging.ERROR # now add on the common parameters - self.add_argument("--certfile", - help="filename of your CADC X509 authentication certificate", - default=os.path.join(os.getenv("HOME", "."), - ".ssl/cadcproxy.pem")) - self.add_argument("--token", - help="authentication token string (alternative to certfile)", - default=None) + self.add_argument( + "--certfile", + help="filename of your CADC X509 authentication certificate", + default=os.path.join(os.getenv("HOME", "."), ".ssl/cadcproxy.pem")) + self.add_argument( + "--token", + help="authentication token string (alternative to certfile)", + default=None) self.add_argument("--version", action="version", version=version) self.add_argument("-d", "--debug", action="store_true", default=False, help="print on command debug messages.") - self.add_argument("--vos-debug", action="store_true", help="Print on vos debug messages.") - self.add_argument("-v", "--verbose", action="store_true", default=False, + self.add_argument("--vos-debug", action="store_true", + help="Print on vos debug messages.") + self.add_argument("-v", "--verbose", action="store_true", + default=False, help="print verbose messages") - self.add_argument("-w", "--warning", action="store_true", default=False, + self.add_argument("-w", "--warning", action="store_true", + default=False, help="print warning messages only") diff --git a/vos/vos/logExceptions.py b/vos/vos/logExceptions.py index 2f21b6acd..541133f13 100644 --- a/vos/vos/logExceptions.py +++ b/vos/vos/logExceptions.py @@ -1,9 +1,11 @@ """ -Many of the vos methods are decorated with this expection logger so that exception messages are displayed. +Many of the vos methods are decorated with this expection logger so that +exception messages are displayed. """ import logging import traceback + def logExceptions(): """ A decorator which catches and logs exceptions. @@ -18,5 +20,7 @@ def wrapper(*args, **kwds): logger.error("Exception throw: %s %s" % (type(e), str(e))) logger.error(traceback.format_exc()) raise + return wrapper + return decorator diff --git a/vos/vos/md5_cache.py b/vos/vos/md5_cache.py index df0eef916..fb6039945 100644 --- a/vos/vos/md5_cache.py +++ b/vos/vos/md5_cache.py @@ -1,10 +1,12 @@ """ A cache of MD5 meta data associated with VOSpace transfers. - - When transferring large numbers of files to and from VOSpace there is a substantial expectation of network failures - which will require that a transfer be re-attempted. This module provides a class that keeps track of the MD5 values - associated files on disk and in VOSpace allowing the caller to choose to skip files that match (MD5 wise) between the - two locations. + + When transferring large numbers of files to and from VOSpace there is a + substantial expectation of network failures which will require that a + transfer be re-attempted. This module provides a class that keeps track + of the MD5 values associated files on disk and in VOSpace allowing the + caller to choose to skip files that match (MD5 wise) between the + two locations. """ import sqlite3 import logging @@ -15,13 +17,13 @@ class MD5Cache: - def __init__(self, cache_db=None): """Setup the sqlDB that will contain the cache table. - - The slqDB can then be used to lookup MD5 values rather than recompute them at each restart of a transfer. - - :param cache_db: The path and filename where the SQL db will be stored. + + The slqDB can then be used to lookup MD5 values rather than + recompute them at each restart of a transfer. + + :param cache_db: The path and filename where the SQL db will be stored. """ if cache_db is None: self.cache_obj = tempfile.NamedTemporaryFile() @@ -33,15 +35,19 @@ def __init__(self, cache_db=None): sql_conn = sqlite3.connect(self.cache_db) with sql_conn: # build cache lookup if doesn't already exists - sql_conn.execute(("create table if not exists " - "md5_cache (filename text PRIMARY KEY NOT NULL , md5 text, st_size int, st_mtime int)")) + sql_conn.execute( + ("create table if not exists " + "md5_cache (filename text PRIMARY KEY NOT NULL , " + "md5 text, st_size int, st_mtime int)")) @staticmethod def compute_md5(filename, block_size=READ_BUFFER_SIZE): """ - A convenience routine that computes and returns the MD5 of the supplied filename. + A convenience routine that computes and returns the MD5 of the + supplied filename. :param filename: Name of the file to compute the MD5 checksum for. - :param block_size: Loop through the file with this number of bytes per call. + :param block_size: Loop through the file with this number of + bytes per call. :return: the MD5 hexdigest of the file. :rtype: str """ @@ -55,15 +61,18 @@ def compute_md5(filename, block_size=READ_BUFFER_SIZE): return md5.hexdigest() def get(self, filename): - """Get the MD5 for filename. - + """Get the MD5 for filename. + First look in MD5 cache databse and then compute if needbe. - + :param filename: name of the file you want the MD5 sum for. """ slq_conn = sqlite3.connect(self.cache_db) with slq_conn: - cursor = slq_conn.execute("SELECT md5, st_size, st_mtime FROM md5_cache WHERE filename = ? ", (filename,)) + cursor = slq_conn.execute( + "SELECT md5, st_size, " + "st_mtime FROM md5_cache WHERE filename = ? ", + (filename,)) md5_row = cursor.fetchone() if md5_row is not None: return md5_row @@ -72,29 +81,35 @@ def get(self, filename): def delete(self, filename): """Delete a record from the cache MD5 database. - - :param filename: Name of the file whose md5 record to be deleted from the cache database + + :param filename: Name of the file whose md5 record to be deleted + from the cache database """ sql_conn = sqlite3.connect(self.cache_db) with sql_conn: - sql_conn.execute("DELETE from md5_cache WHERE filename = ?", (filename,)) + sql_conn.execute("DELETE from md5_cache WHERE filename = ?", + (filename,)) def update(self, filename, md5, st_size, st_mtime): """Update the MD5 value stored in the cache db - + :param filename: Name of the file to update the MD5 value for. :param md5: the MD5 hexdigest to be stored to the databse. :param st_size: size of the file being updated (stored to database) - :param st_mtime: last modified time of the file being stored to database. + :param st_mtime: last modified time of the file being stored to + database. """ # UPDATE the MD5 database sql_connection = sqlite3.connect(self.cache_db) try: with sql_connection: - sql_connection.execute("DELETE from md5_cache WHERE filename = ?", (filename,)) - sql_connection.execute(("INSERT INTO md5_cache (filename, md5, st_size, st_mtime)" - " VALUES ( ?, ?, ?, ?)"), (filename, md5, st_size, st_mtime)) + sql_connection.execute( + "DELETE from md5_cache WHERE filename = ?", (filename,)) + sql_connection.execute( + ("INSERT INTO md5_cache (filename, md5, st_size, st_mtime)" + " VALUES ( ?, ?, ?, ?)"), + (filename, md5, st_size, st_mtime)) except Exception as e: logging.error(e) return md5 diff --git a/vos/vos/node_cache.py b/vos/vos/node_cache.py index d5b00dcf6..bf0b66409 100644 --- a/vos/vos/node_cache.py +++ b/vos/vos/node_cache.py @@ -1,13 +1,17 @@ -""" keep track of vospace nodes that have been already been accessed during the current session.""" +""" keep track of vospace nodes that have been already been accessed during +the current session.""" import threading import logging logger = logging.getLogger('vos') -#logger.setLevel(logging.ERROR) + + +# logger.setLevel(logging.ERROR) class NodeCache(dict): - """ A dictionary like object that provides the ability to look up a VOSpace nodes metadata. - + """ A dictionary like object that provides the ability to look up a + VOSpace nodes metadata. + usage: # Create a node cache: nodeCache = NodeCache() @@ -33,14 +37,14 @@ def __init__(self, *args): def watch(self, uri): """Factory that returns a watch 2015 09 09.39169 for the given uri. - + :param uri: the VOSpace uri to watch """ return self.Watch(self, uri.rstrip('/')) def volatile(self, uri): """Factory for volatile objects. - + :param uri: the VOSpace uri to tag as volatile """ return self.Volatile(self, uri.rstrip('/')) @@ -48,15 +52,15 @@ def volatile(self, uri): def __missing__(self, key): """Attempting to access a non-cached node returns None rather than raising an exception. - - :param key: the key in the dict being accessed (i.e. uri) + + :param key: the key in the dict being accessed (i.e. uri) """ return None def __setitem__(self, key, value): """If an node is directly inserted into the cache, automatically create a watch. - + :param key: the uri of the node being inserted into the dictionary. :param value: the actual node object that will be stored. """ @@ -75,10 +79,11 @@ class Volatile(object): def __init__(self, node_cache, uri): """ - - :param node_cache: the NOdeCache object this Volatile object is in. + + :param node_cache: the NOdeCache object this Volatile object is in. :type node_cache: NodeCache - :param uri: the VOSpace uri that references this node in the NodeCache + :param uri: the VOSpace uri that references this node in the + NodeCache :type uri: str """ self.node_cache = node_cache @@ -91,7 +96,8 @@ def __enter__(self): """ with self.node_cache.lock: - # Add this volatile object to a list of all active volatile objects. + # Add this volatile object to a list of all active volatile + # objects. self.node_cache.volatile_nodes.append(self) # Remove any cached nodes in the volatile sub-tree. @@ -105,7 +111,8 @@ def __enter__(self): # Mark any watched nodes in the volatile sub-tree dirty for watchedNode in self.node_cache.watched_nodes: - if watchedNode.uri.startswith(self.uri) or (watchedNode.uri == parent): + if watchedNode.uri.startswith(self.uri) or\ + (watchedNode.uri == parent): watchedNode.dirty = True return self @@ -123,12 +130,12 @@ class Watch(object): def __init__(self, node_cache, uri): """ - - :param node_cache: the NodeCache object containing the uri to watch. + + :param node_cache: the NodeCache object containing the uri to + watch. :type node_cache: NodeCache :param uri: the uri in the NodeCache that will be watched. :type uri: str - """ self.node_cache = node_cache self.uri = uri @@ -152,7 +159,8 @@ def __exit__(self, exc_type, exc_value, traceback): self.node_cache.watched_nodes.remove(self) def insert(self, value): - """ Insert an value, likely node object, into the cache, but only if the watch is not dirty.""" + """ Insert an value, likely node object, into the cache, but only + if the watch is not dirty.""" if not self.dirty: # noinspection PyCallByClass dict.__setitem__(self.node_cache, self.uri, value) diff --git a/vos/vos/tests/setup_package.py b/vos/vos/tests/setup_package.py deleted file mode 100644 index f2fd9ed48..000000000 --- a/vos/vos/tests/setup_package.py +++ /dev/null @@ -1,3 +0,0 @@ -def get_package_data(): - return { - _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc']} diff --git a/vos/vos/tests/test_commonparser.py b/vos/vos/tests/test_commonparser.py index 9f935cd64..88eeb1d37 100644 --- a/vos/vos/tests/test_commonparser.py +++ b/vos/vos/tests/test_commonparser.py @@ -42,6 +42,3 @@ def run(): suite1 = unittest.TestLoader().loadTestsFromTestCase(TestCommonParser) all_tests = unittest.TestSuite([suite1]) return unittest.TextTestRunner(verbosity=2).run(all_tests) - -if __name__ == "__main__": - run() diff --git a/vos/vos/tests/test_md5_cache.py b/vos/vos/tests/test_md5_cache.py index 7bfa6ffc4..2cdd1db16 100644 --- a/vos/vos/tests/test_md5_cache.py +++ b/vos/vos/tests/test_md5_cache.py @@ -4,9 +4,10 @@ import hashlib from vos.md5_cache import MD5Cache -from mock import patch, MagicMock, Mock, call, mock_open +from mock import patch, MagicMock, call, mock_open -# The following is a temporary workaround for Python issue 25532 (https://bugs.python.org/issue25532) +# The following is a temporary workaround for Python issue 25532 +# (https://bugs.python.org/issue25532) call.__wrapped__ = None @@ -17,40 +18,43 @@ class TestMD5Cache(unittest.TestCase): @patch('vos.md5_cache.sqlite3.connect') def test_sqlite3(self, mock_sqlite3): """ tests interactions with sqlite3 db""" - + # test constructor sql_conn_mock = MagicMock() mock_sqlite3.return_value = sql_conn_mock - + md5_cache = MD5Cache() sql_conn_mock.execute.assert_called_once_with( - 'create table if not exists md5_cache (filename text' +\ + 'create table if not exists md5_cache (filename text' ' PRIMARY KEY NOT NULL , md5 text, st_size int, st_mtime int)') - # test delete sql_conn_mock.reset_mock() md5_cache.delete('somefile') sql_conn_mock.execute.assert_called_once_with( 'DELETE from md5_cache WHERE filename = ?', ('somefile',)) - + # test update sql_conn_mock.reset_mock() - self.assertEquals(0x00123, md5_cache.update('somefile', 0x00123, 23, 'Jan 01 2001')) + self.assertEquals(0x00123, md5_cache.update('somefile', 0x00123, 23, + 'Jan 01 2001')) call1 = call('DELETE from md5_cache WHERE filename = ?', ('somefile',)) - call2 = call('INSERT INTO md5_cache (filename, md5, st_size, st_mtime) VALUES ( ?, ?, ?, ?)', ('somefile', 291, 23, 'Jan 01 2001')) + call2 = call( + 'INSERT INTO md5_cache (filename, md5, st_size, st_mtime) ' + 'VALUES ( ?, ?, ?, ?)', ('somefile', 291, 23, 'Jan 01 2001')) calls = [call1, call2] sql_conn_mock.execute.assert_has_calls(calls) - + # test get sql_conn_mock.reset_mock() cursor_mock = MagicMock() cursor_mock.fetchone.return_value = ['0x0023', '23', 'Jan 01 2000'] sql_conn_mock.execute.return_value = cursor_mock self.assertEquals(cursor_mock.fetchone.return_value, - md5_cache.get('somefile')) + md5_cache.get('somefile')) sql_conn_mock.execute.assert_called_once_with( - 'SELECT md5, st_size, st_mtime FROM md5_cache WHERE filename = ? ', ('somefile',)) + 'SELECT md5, st_size, st_mtime FROM md5_cache WHERE filename = ? ', + ('somefile',)) def test_compute_md5(self): file_mock = MagicMock() @@ -62,13 +66,13 @@ def test_compute_md5(self): expect_md5.update(b) cahce_file = tempfile.NamedTemporaryFile() cache = MD5Cache(cahce_file.name) - with patch('six.moves.builtins.open', mock_open(read_data=b''.join(buffer))): - self.assertEquals(expect_md5.hexdigest(), cache.compute_md5('fakefile', 4)) + with patch('six.moves.builtins.open', + mock_open(read_data=b''.join(buffer))): + self.assertEquals(expect_md5.hexdigest(), + cache.compute_md5('fakefile', 4)) + def run(): suite1 = unittest.TestLoader().loadTestsFromTestCase(TestMD5Cache) allTests = unittest.TestSuite([suite1]) return unittest.TextTestRunner(verbosity=2).run(allTests) - -if __name__ == "__main__": - run() diff --git a/vos/vos/tests/test_node_cache.py b/vos/vos/tests/test_node_cache.py index 62a694d57..4bcde484e 100644 --- a/vos/vos/tests/test_node_cache.py +++ b/vos/vos/tests/test_node_cache.py @@ -116,8 +116,8 @@ def test_02_watchnvolatile(self): w.insert('d') self.assertFalse('/a/b/c' in node_cache) - # Set up a watch and then make a parent node volatile. Caching should be - # disabled on the watched tree. + # Set up a watch and then make a parent node volatile. Caching should + # be disabled on the watched tree. with node_cache.watch('/a/b/c') as w: self.assertFalse('/a/b/c' in node_cache) w.insert('d') @@ -157,6 +157,3 @@ def run(): suite1 = unittest.TestLoader().loadTestsFromTestCase(TestNodeCache) all_tests = unittest.TestSuite([suite1]) return unittest.TextTestRunner(verbosity=2).run(all_tests) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/vos/vos/tests/test_vofile.py b/vos/vos/tests/test_vofile.py index 6ce7ce7cf..ff62016e2 100644 --- a/vos/vos/tests/test_vofile.py +++ b/vos/vos/tests/test_vofile.py @@ -29,8 +29,8 @@ def __init__(self, *args, **kwargs): @patch.object(Connection, 'get_connection') @unittest.skipIf(skipTests, "Individual tests") def test_retry_successfull(self, mock_get_connection): - # this tests the read function when first HTTP request returns a 503 but the second one - # is successfull and returns a 200 + # this tests the read function when first HTTP request returns a + # 503 but the second one is successfull and returns a 200 # mock the 503 response mockHttpResponse503 = Mock(name="HttpResponse503") @@ -54,16 +54,19 @@ def test_retry_successfull(self, mock_get_connection): iterator = iter([mockHttpResponse503, mockHttpResponse200]) generator.__iter__.return_value = iterator conn.get_connection.return_value = mockHttpRequest - # vofile = vos.VOFile(["Some URL"], mockConn, "GET") - vofile = vos.VOFile(["Some URL"], conn, "GET") + vos.VOFile(["Some URL"], conn, "GET") # check the response - # TODO self.assertEqual("Testing", vofile.read(), "Incorrect returned value from read") + # TODO self.assertEqual("Testing", vofile.read(), + # "Incorrect returned value from read") # mockHttpResponse503.getheader.assert_called_with("Retry-After", 5) - # 1 retry -> getheader in HttpResponse503 was called 2 times in the order shown below. - # TODO call is only available in mock 1.0. Uncomment when this version available + # 1 retry -> getheader in HttpResponse503 was called 2 times in the + # order shown below. + # TODO call is only available in mock 1.0. Uncomment when this + # version available # expected = [call('Content-Length', 0), call('Retry-After', 5)] - # self.assertEquals( expected, mockHttpResponse503.getheader.call_args_list) + # self.assertEquals( expected, + # mockHttpResponse503.getheader.call_args_list) @unittest.skipIf(skipTests, "Individual tests") def test_fail_max_retry(self): @@ -94,19 +97,23 @@ def getheader(name, default): # set number of retries to 1 and check the OSError was thrown vofile.maxRetries = 1 - with self.assertRaises(OSError) as cm: + with self.assertRaises(OSError): vofile.read() mock_resp.headers.get.assert_called_with('Retry-After', 5) - # 1 retry -> getheader in HttpResponse was called 4 times in the order shown below. - # TODO call is only available in mock 1.0. Uncomment when this version available + # 1 retry -> getheader in HttpResponse was called 4 times in the + # order shown below. + # TODO call is only available in mock 1.0. Uncomment when this version + # available # expected = [call('Content-Length', 10), call('Retry-After', 3)] - # self.assertEquals( expected, mock_resp.mock_headers.get.call_args_list) + # self.assertEquals( expected, + # mock_resp.mock_headers.get.call_args_list) @patch.object(Connection, 'get_connection') @unittest.skipIf(skipTests, "Individual tests") def test_retry_412_successfull(self, mock_get_connection): - # this tests the read function when first HTTP request returns a 412 but the second one + # this tests the read function when first HTTP request returns a + # 412 but the second one # is successful and returns a 200 # mock the 412 response @@ -131,11 +138,14 @@ def test_retry_412_successfull(self, mock_get_connection): vofile.currentRetryDelay = 2 # check the response - #self.assertEqual("Testing", vofile.read(), "Incorrect returned value from read") + # self.assertEqual("Testing", vofile.read(), "Incorrect returned value + # from read") # 1 retry -> getheader in HttpResponse412 was called once as follows. - # TODO call is only available in mock 1.0. Uncomment when this version available + # TODO call is only available in mock 1.0. Uncomment when this version + # available # expected = [call('Content-Length', 0)] - # self.assertEquals( expected, mockHttpResponse412.getheader.call_args_list) + # self.assertEquals( expected, + # mockHttpResponse412.getheader.call_args_list) @unittest.skipIf(skipTests, "Individual tests") def test_multiple_urls(self): @@ -156,14 +166,16 @@ def test_multiple_urls(self): # mock the 503 response mock_resp_503 = requests.Response() mock_resp_503.status_code = 503 - mock_resp_503.headers = {'Content-Length': 10, 'Content-MD5': 12345, 'Retry-After':1} + mock_resp_503.headers = {'Content-Length': 10, 'Content-MD5': 12345, + 'Retry-After': 1} conn = Connection() # test successful - use first url self.responses = [mock_resp_200] vofile = vos.VOFile(transfer_urls, conn, "GET") - with patch('vos.vos.net.ws.Session.send', Mock(side_effect=self.responses)): + with patch('vos.vos.net.ws.Session.send', + Mock(side_effect=self.responses)): vofile.read() assert(vofile.url == transfer_urls[0]) assert(vofile.urlIndex == 0) @@ -172,18 +184,21 @@ def test_multiple_urls(self): # test first url busy self.responses = [mock_resp_503, mock_resp_200] vofile = vos.VOFile(transfer_urls, conn, "GET") - with patch('vos.vos.net.ws.Session.send', Mock(side_effect=self.responses)): + with patch('vos.vos.net.ws.Session.send', + Mock(side_effect=self.responses)): vofile.read() assert(vofile.url == transfer_urls[1]) assert(vofile.urlIndex == 1) assert(len(vofile.URLs) == 3) - # #test first url error - ignored internally, second url busy, third url works + # test first url error - ignored internally, second url busy, + # third url works # test 404 which raises OSError self.responses = [mock_resp_404, mock_resp_503, mock_resp_200] vofile = vos.VOFile(transfer_urls, conn, "GET") - #with self.assertRaises(exceptions.NotFoundException) as ex: - with patch('vos.vos.net.ws.requests.Session.send', Mock(side_effect=self.responses)): + # with self.assertRaises(exceptions.NotFoundException) as ex: + with patch('vos.vos.net.ws.requests.Session.send', + Mock(side_effect=self.responses)): vofile.read() assert(vofile.url == transfer_urls[2]) assert(vofile.urlIndex == 2) @@ -193,7 +208,8 @@ def test_multiple_urls(self): self.responses = [mock_resp_503, mock_resp_503, mock_resp_503, mock_resp_200] vofile = vos.VOFile(transfer_urls, conn, "GET") - with patch('vos.vos.net.ws.requests.Session.send', Mock(side_effect=self.responses)): + with patch('vos.vos.net.ws.requests.Session.send', + Mock(side_effect=self.responses)): vofile.read() assert(vofile.url == transfer_urls[2]) assert(vofile.urlIndex == 2) @@ -209,21 +225,20 @@ def test_checkstatus(self, mock_get_connection): mock_resp = Object mock_resp.status_code = 200 mock_resp.headers = { - 'Content-MD5': 12345, 'Content-Length': 10, 'X-CADC-Content-Length': 10 + 'Content-MD5': 12345, 'Content-Length': 10, + 'X-CADC-Content-Length': 10 } vofile.resp = mock_resp self.assertTrue(vofile.checkstatus()) self.assertEqual(vofile.get_file_info(), (10, 12345)) - def side_effect(self, foo, stream=True, verify=False): # removes first in the list # mock the 200 response response = self.responses.pop(0) return response - def get_headers(self, arg): return self.headers[arg] @@ -231,6 +246,3 @@ def get_headers(self, arg): def run(): suite = unittest.TestLoader().loadTestsFromTestCase(TestVOFile) return unittest.TextTestRunner(verbosity=2).run(suite) - -if __name__ == "__main__": - run() diff --git a/vos/vos/tests/test_vos.py b/vos/vos/tests/test_vos.py index 0e0be68cf..2322422e0 100644 --- a/vos/vos/tests/test_vos.py +++ b/vos/vos/tests/test_vos.py @@ -1,14 +1,15 @@ # Test the vos Client class - + import os import unittest import requests from xml.etree import ElementTree -from mock import Mock, patch, MagicMock, call, mock_open +from mock import Mock, patch, MagicMock, call from vos import Client, Connection, Node, VOFile from vos import vos as vos -# The following is a temporary workaround for Python issue 25532 (https://bugs.python.org/issue25532) +# The following is a temporary workaround for Python issue 25532 +# (https://bugs.python.org/issue25532) call.__wrapped__ = None NODE_XML = """ @@ -28,6 +29,7 @@ """ + class Object(object): pass @@ -38,7 +40,8 @@ class TestClient(unittest.TestCase): def off_quota(self): """ - Test that a 413 raised by the server gets a reasonable error to the user. + Test that a 413 raised by the server gets a reasonable error to the + user. @return: """ with patch('vos.vos.VOFile') as mockVOFile: @@ -79,7 +82,8 @@ def test_init_client(self): @patch('vos.vos.net.ws.WsCapabilities.get_access_url', Mock(return_value='http://foo.com/vospace')) - @patch('vos.vos.net.ws.RetrySession.send', Mock(return_value=Mock(spec=requests.Response, status_code=404))) + @patch('vos.vos.net.ws.RetrySession.send', + Mock(return_value=Mock(spec=requests.Response, status_code=404))) def test_open(self): # Invalid mode raises OSError with self.assertRaises(OSError): @@ -97,61 +101,62 @@ def test_open(self): client.get_node_url = Mock(return_value=mock_vofile) vofile = client.open(None, url=None) self.assertEquals(vofile.url.URLs[0], 'http://foo.com/bar') - - - @patch('vos.vos.Node.get_info', Mock(return_value={'name':'aa'})) + + @patch('vos.vos.Node.get_info', Mock(return_value={'name': 'aa'})) def test_get_info_list(self): # list tuples of a LinkNode mock_node = MagicMock(type='vos:DataNode') mock_node.return_value = mock_node mock_node.name = 'testnode' - mock_node.get_info.return_value = {'name':'aa'} + mock_node.get_info.return_value = {'name': 'aa'} mock_link_node = Mock(type='vos:LinkNode') mock_link_node.target = 'vos:/somefile' client = Client() client.get_node = MagicMock(side_effect=[mock_link_node, mock_node]) - self.assertEquals({'testnode':mock_node.get_info.return_value}.items(), - client.get_info_list('vos:/somenode')) - - + self.assertEquals( + {'testnode': mock_node.get_info.return_value}.items(), + client.get_info_list('vos:/somenode')) def test_nodetype(self): mock_node = MagicMock(id=333) mock_node.type = 'vos:ContainerNode' client = Client() client.get_node = Mock(return_value=mock_node) - self.assertEquals('vos:ContainerNode', client._node_type('vos:/somenode')) + self.assertEquals('vos:ContainerNode', + client._node_type('vos:/somenode')) self.assertTrue(client.isdir('vos:/somenode')) - + mock_node.type = 'vos:DataNode' self.assertEquals('vos:DataNode', client._node_type('vos:/somenode')) self.assertTrue(client.isfile('vos:/somenode')) - + # through a link mock_node.type = 'vos:ContainerNode' mock_link_node = Mock(type='vos:LinkNode') mock_link_node.target = 'vos:/somefile' - client.get_node = Mock(side_effect= - [mock_link_node, mock_node, mock_link_node, mock_node]) - self.assertEquals('vos:ContainerNode', client._node_type('vos:/somenode')) + client.get_node = Mock(side_effect=[mock_link_node, mock_node, + mock_link_node, mock_node]) + self.assertEquals('vos:ContainerNode', + client._node_type('vos:/somenode')) self.assertTrue(client.isdir('vos:/somenode')) - - # through an external link - not sure why the type is DataNode in this case??? + + # through an external link - not sure why the type is DataNode in + # this case??? mock_link_node.target = '/somefile' client.get_node = Mock(side_effect=[mock_link_node, mock_link_node]) self.assertEquals('vos:DataNode', client._node_type('vos:/somenode')) self.assertTrue(client.isfile('vos:/somenode')) - patch('vos.EndPoints.nodes', Mock()) + @patch('vos.vos.net.ws.WsCapabilities.get_access_url', Mock(return_value='http://foo.com/vospace')) def test_glob(self): # test the pattern matches in directories and file names - + # simple test for listing of directory, no wild cards - - # NOTE: Mock class also has a 'name' attribute so we cannot + + # NOTE: Mock class also has a 'name' attribute so we cannot # instantiate a mock node with Mock(name='blah'). There are # two other wasy to do it as seen below mock_node = MagicMock(type='vos:ContainerNode') @@ -159,18 +164,18 @@ def test_glob(self): client = Client() client.get_node = Mock(return_value=mock_node) self.assertEquals(['vos:/anode/'], client.glob('vos:/anode/')) - + # simple test for file listing of file mock_node = MagicMock(type='vos:DataNode') mock_node.configure_mock(name='afile') client = Client() client.get_node = Mock(return_value=mock_node) self.assertEquals(['vos:/afile'], client.glob('vos:/afile')) - + # create a mock directory structure on the form - # /anode/abc /anode/def - > anode/a* should return + # /anode/abc /anode/def - > anode/a* should return # /anode/adc - + mock_node = MagicMock(type='vos:ContainerNode') mock_node.configure_mock(name='anode') mock_child_node1 = Mock(type='vos:DataNode') @@ -182,24 +187,23 @@ def test_glob(self): mock_base_node = Mock(type='vos:ContainerNode') mock_base_node.name = 'vos:' mock_base_node.node_list = [mock_node] - mock_node.node_list = [mock_base_node, mock_child_node1, mock_child_node2] + mock_node.node_list = [mock_base_node, mock_child_node1, + mock_child_node2] client = Client() - client.get_node = Mock(side_effect=[mock_node, mock_base_node, mock_node]) + client.get_node = Mock( + side_effect=[mock_node, mock_base_node, mock_node]) self.assertEquals(['vos:/anode/abc'], client.glob('vos:/anode/a*')) self.assertEquals(['vos:/anode/abc'], client.glob('vos:/*node/abc')) - + # test nodes: # /anode/.test1 /bnode/sometests /bnode/blah # /[a,c]node/*test* should return /bnode/somtests (.test1 is filtered # out as a special file) - - mock_child_node1 = Mock(type='vos:DataNode') - mock_child_node1.name = '.test1' mock_node1 = MagicMock(type='vos:ContainerNode') mock_node1.configure_mock(name='anode') mock_node1.node_list = [mock_child_node1] - + mock_child_node2 = Mock(type='vos:DataNode') mock_child_node2.name = 'sometests' mock_child_node3 = Mock(type='vos:DataNode') @@ -207,18 +211,18 @@ def test_glob(self): mock_node2 = MagicMock(type='vos:ContainerNode') mock_node2.configure_mock(name='bnode') mock_node2.node_list = [mock_child_node2, mock_child_node3] - + # because we use wild characters in the root node, # we need to create a corresponding node for the base node mock_base_node = Mock(type='vos:DataNode') mock_base_node.name = 'vos:' mock_base_node.node_list = [mock_node1, mock_node2] client = Client() - client.get_node = Mock(side_effect=[mock_base_node, mock_node1, mock_node2]) - self.assertEquals(['vos:/bnode/sometests'], + client.get_node = Mock( + side_effect=[mock_base_node, mock_node1, mock_node2]) + self.assertEquals(['vos:/bnode/sometests'], client.glob('vos:/[a,b]node/*test*')) - - + @patch('vos.vos.md5_cache.MD5Cache.compute_md5') @patch('__main__.open', MagicMock(), create=True) def test_copy(self, computed_md5_mock): @@ -226,15 +230,14 @@ def test_copy(self, computed_md5_mock): md5sum = 'd41d8cd98f00b204e9800998ecf84eee' # patch the compute_md5 function in vos to return the above value computed_md5_mock.return_value = md5sum - - #mock the props of the corresponding node + + # mock the props of the corresponding node props = MagicMock() props.get.return_value = md5sum - #add props to the mocked node + # add props to the mocked node node = MagicMock(spec=Node) node.props = props - # mock one by one the chain of connection.session.response.headers conn = MagicMock(spec=Connection) session = MagicMock() @@ -244,29 +247,31 @@ def test_copy(self, computed_md5_mock): response.headers = headers session.get.return_value = response conn.session = session - + test_client = Client() # use the mocked connection instead of the real one test_client.conn = conn - get_node_url_mock = Mock(return_value=['http://cadc.ca/test', 'http://cadc.ca/test']) + get_node_url_mock = Mock( + return_value=['http://cadc.ca/test', 'http://cadc.ca/test']) test_client.get_node_url = get_node_url_mock mock_update = Mock() test_client.update = mock_update - - #patch Client.get_node to return our mocked node + + # patch Client.get_node to return our mocked node get_node_mock = Mock(return_value=node) test_client.get_node = get_node_mock - + # time to test... vospaceLocation = 'vos://test/foo' osLocation = '/tmp/foo' # copy from vospace test_client.copy(vospaceLocation, osLocation) - get_node_url_mock.assert_called_once_with(vospaceLocation, method='GET', - cutout=None, view='data') + get_node_url_mock.assert_called_once_with(vospaceLocation, + method='GET', + cutout=None, view='data') computed_md5_mock.assert_called_once_with(osLocation) get_node_mock.assert_called_once_with(vospaceLocation) - + # copy to vospace when md5 sums are the same -> only update occurs get_node_url_mock.reset_mock() computed_md5_mock.reset_mock() @@ -278,13 +283,14 @@ def test_copy(self, computed_md5_mock): get_node_url_mock.reset_mock() computed_md5_mock.reset_mock() mock_update.reset_mock() - props.get.side_effect= ['d00223344', md5sum] + props.get.side_effect = ['d00223344', md5sum] test_client.copy(osLocation, vospaceLocation) assert not mock_update.called get_node_url_mock.assert_called_once_with(vospaceLocation, 'PUT') computed_md5_mock.assert_called_once_with(osLocation) - # copy 0 size file -> delete and create on client but no bytes transferred + # copy 0 size file -> delete and create on client but no bytes + # transferred get_node_url_mock.reset_mock() computed_md5_mock.reset_mock() computed_md5_mock.return_value = vos.ZERO_MD5 @@ -319,10 +325,10 @@ def test_copy(self, computed_md5_mock): computed_md5_mock.return_value = '000bad000' with self.assertRaises(OSError): test_client.copy(vospaceLocation, osLocation) - + with self.assertRaises(OSError): test_client.copy(osLocation, vospaceLocation) - + # patch sleep to stop the test from sleeping and slowing down execution @patch('vos.vos.time.sleep', MagicMock(), create=True) @patch('vos.vos.VOFile') @@ -332,7 +338,7 @@ def test_transfer_error(self, mock_vofile): vospace_url = 'https://somevospace.server/vospace' session = Mock() - session.get.side_effect = [Mock(content='COMPLETED')] + session.get.side_effect = [Mock(content='COMPLETED')] conn = Mock(spec=Connection) conn.session = session @@ -340,47 +346,51 @@ def test_transfer_error(self, mock_vofile): # use the mocked connection instead of the real one test_client.conn = conn - + # job successfully completed vofile.read.side_effect = [b'QUEUED', b'COMPLETED'] self.assertFalse(test_client.get_transfer_error( - vospace_url +'/results/transferDetails', 'vos://vospace')) - session.get.assert_called_once_with(vospace_url + '/phase', allow_redirects=False) - + vospace_url + '/results/transferDetails', 'vos://vospace')) + session.get.assert_called_once_with(vospace_url + '/phase', + allow_redirects=False) + # job suspended session.reset_mock() session.get.side_effect = [Mock(content=b'COMPLETED')] vofile.read.side_effect = [b'QUEUED', b'SUSPENDED'] with self.assertRaises(OSError): test_client.get_transfer_error( - vospace_url +'/results/transferDetails', 'vos://vospace') - #check arguments for session.get calls - self.assertEquals([call(vospace_url + '/phase', allow_redirects=False)], - session.get.call_args_list ) + vospace_url + '/results/transferDetails', 'vos://vospace') + # check arguments for session.get calls + self.assertEquals( + [call(vospace_url + '/phase', allow_redirects=False)], + session.get.call_args_list) # job encountered an internal error session.reset_mock() vofile.read.side_effect = Mock(side_effect=[b'QUEUED', b'ERROR']) - session.get.side_effect = [Mock(content=b'COMPLETED'), Mock(text='InternalFault')] + session.get.side_effect = [Mock(content=b'COMPLETED'), + Mock(text='InternalFault')] with self.assertRaises(OSError): test_client.get_transfer_error( - vospace_url +'/results/transferDetails', 'vos://vospace') + vospace_url + '/results/transferDetails', 'vos://vospace') self.assertEquals([call(vospace_url + '/phase', allow_redirects=False), - call(vospace_url + '/error')], - session.get.call_args_list ) + call(vospace_url + '/error')], + session.get.call_args_list) # job encountered an unsupported link error session.reset_mock() link_file = 'testlink.fits' vofile.read.side_effect = Mock(side_effect=[b'QUEUED', b'ERROR']) session.get.side_effect = [Mock(content=b'COMPLETED'), - Mock(text="Unsupported link target: " + link_file)] + Mock( + text="Unsupported link target: " + + link_file)] self.assertEquals(link_file, test_client.get_transfer_error( - vospace_url +'/results/transferDetails', 'vos://vospace')) + vospace_url + '/results/transferDetails', 'vos://vospace')) self.assertEquals([call(vospace_url + '/phase', allow_redirects=False), - call(vospace_url + '/error')], - session.get.call_args_list ) - + call(vospace_url + '/error')], + session.get.call_args_list) def test_add_props(self): old_node = Node(ElementTree.fromstring(NODE_XML)) @@ -399,8 +409,8 @@ def test_add_props(self): with patch('vos.Client', client) as mock: mock.add_props(new_node) mock.conn.session.post.assert_called_with('http://foo.com/bar', - headers=headers, data=data) - + headers=headers, + data=data) @patch('vos.vos.net.ws.WsCapabilities.get_access_url', Mock(return_value='http://www.canfar.phys.uvic.ca/vospace/nodes')) @@ -414,7 +424,7 @@ def test_create(self): headers = {'size': str(len(data))} client = Client() - #client.get_node_url = Mock(return_value='http://foo.com/bar') + # client.get_node_url = Mock(return_value='http://foo.com/bar') session_mock = MagicMock() client.conn = Mock() client.conn.session = session_mock @@ -422,19 +432,22 @@ def test_create(self): result = client.create(uri) self.assertEquals(node, result) - session_mock.put.assert_called_with('http://www.canfar.phys.uvic.ca/vospace/nodes/bar', - headers=headers, data=data) + session_mock.put.assert_called_with( + 'http://www.canfar.phys.uvic.ca/vospace/nodes/bar', + headers=headers, data=data) def test_update(self): node = Node(ElementTree.fromstring(NODE_XML)) resp = Mock() - resp.headers.get = Mock(return_value="https://www.canfar.phys.uvic.ca/vospace") + resp.headers.get = Mock( + return_value="https://www.canfar.phys.uvic.ca/vospace") conn = Mock(spec=vos.Connection) conn.session.post = Mock(return_value=resp) client = Client(conn=conn) - client.get_node_url = Mock(return_value='https://www.canfar.phys.uvic.ca/vospace') + client.get_node_url = Mock( + return_value='https://www.canfar.phys.uvic.ca/vospace') client.get_transfer_error = Mock() client.protocol = 'https' @@ -445,12 +458,14 @@ def test_update(self): client.get_endpoints = Mock(return_value=endpoints_mock) result = client.update(node, False) self.assertEqual(result, 0) - client.conn.session.post.assert_called_with('https://www.canfar.phys.uvic.ca/vospace', - data=data, allow_redirects=False) + client.conn.session.post.assert_called_with( + 'https://www.canfar.phys.uvic.ca/vospace', + data=data, allow_redirects=False) call1 = call(property_url, allow_redirects=False, data=data, headers={'Content-type': 'text/xml'}) - call2 = call('https://www.canfar.phys.uvic.ca/vospace/phase', allow_redirects=False, data="PHASE=RUN", + call2 = call('https://www.canfar.phys.uvic.ca/vospace/phase', + allow_redirects=False, data="PHASE=RUN", headers={'Content-type': "text/text"}) calls = [call1, call2] @@ -467,31 +482,36 @@ def test_getNode(self): """ uri = "vos://foo.com!vospace/bar" - nodes = """ - - - - 2016-05-10T09:52:13 - - - - - 2016-05-19T09:52:14 - - - - """ + nodes = (' \n' + '\n' + ' \n' + ' 2016-05-10T09:52:13' + '\n' + ' \n' + '\n' + '\n' + ' \n' + ' ' + '2016-05-19T09:52:14\n' + ' \n' + '\n' + '\n') mock_vofile = Mock() client = Client() client.open = Mock(return_value=mock_vofile) - mock_vofile.read = Mock(return_value=NODE_XML.format(uri, '').encode('UTF-8')) + mock_vofile.read = Mock( + return_value=NODE_XML.format(uri, '').encode('UTF-8')) my_node = client.get_node(uri, limit=0, force=False) self.assertEqual(uri, my_node.uri) self.assertEqual(len(my_node.node_list), 0) - mock_vofile.read = Mock(return_value=NODE_XML.format(uri, nodes).encode('UTF-8')) + mock_vofile.read = Mock( + return_value=NODE_XML.format(uri, nodes).encode('UTF-8')) my_node = client.get_node(uri, limit=2, force=True) self.assertEqual(uri, my_node.uri) @@ -515,7 +535,8 @@ def test_move(self): with self.assertRaises(OSError): client.move(uri1, uri2) - @patch('vos.vos.net.ws.WsCapabilities.get_access_url', Mock(return_value='https://www.canfar.phys.uvic.ca/vospace/nodes')) + @patch('vos.vos.net.ws.WsCapabilities.get_access_url', + Mock(return_value='https://www.canfar.phys.uvic.ca/vospace/nodes')) def test_delete(self): certfile = '/tmp/SomeCert.pem' open(certfile, 'w+') @@ -534,7 +555,7 @@ class TestNode(unittest.TestCase): def test_compute_md5(self): pass - #from vos import vos + # from vos import vos # mock_file = MagicMock(spec=file, wraps=StringIO('a')) # foo = mock_file.open() # self.assertEqual(foo.read, 'a') @@ -565,7 +586,7 @@ def test_compute_md5(self): def test_node_eq(self): # None node raises LoookupError with self.assertRaises(LookupError): - node = Node(None) + Node(None) # Node equality node1 = Node(ElementTree.Element(Node.NODE)) @@ -628,9 +649,9 @@ def test_change_prop(self): self.assertEqual('2000', quota.text) # Delete a property - #node.change_prop('quota', None) - #quota = TestVos.get_node_property(node, 'quota') - #self.assertIsNone(quota) + # node.change_prop('quota', None) + # quota = TestVos.get_node_property(node, 'quota') + # self.assertIsNone(quota) def test_clear_properties(self): # Add a new property @@ -682,5 +703,3 @@ def test_seek(self): self.assertEquals(5, vofile._fpos) vofile.seek(10, os.SEEK_END) self.assertEquals(15, vofile._fpos) - - diff --git a/vos/vos/vos.py b/vos/vos/vos.py index 3bae05ddf..504813e24 100644 --- a/vos/vos/vos.py +++ b/vos/vos/vos.py @@ -12,6 +12,7 @@ import errno from datetime import datetime import fnmatch + try: from cStringIO import StringIO except ImportError: @@ -31,16 +32,19 @@ from xml.etree import ElementTree from copy import deepcopy from .node_cache import NodeCache + try: from .version import version except ImportError: version = "unknown" from cadcutils import net, exceptions, util from . import md5_cache + try: from urllib import splittag except ImportError: import six.moves.urllib.parse + splittag = six.moves.urllib.parse.splittag urlparse = six.moves.urllib.parse.urlparse @@ -56,7 +60,8 @@ BUFSIZE = 8388608 # Size of read/write buffer MAX_RETRY_DELAY = 128 # maximum delay between retries -DEFAULT_RETRY_DELAY = 30 # start delay between retries when Try_After not sent by server. +# start delay between retries when Try_After not sent by server. +DEFAULT_RETRY_DELAY = 30 MAX_RETRY_TIME = 900 # maximum time for retries before giving up... VOSPACE_ARCHIVE = os.getenv("VOSPACE_ARCHIVE", "vospace") @@ -66,7 +71,6 @@ CADC_GMS_PREFIX = "ivo://cadc.nrc.ca/gms#" - VO_PROPERTY_URI_ISLOCKED = 'ivo://cadc.nrc.ca/vospace/core#islocked' VO_VIEW_DEFAULT = 'ivo://ivoa.net/vospace/core#defaultview' # CADC specific views @@ -84,18 +88,19 @@ _DEFAULT_CONFIG_PATH = os.path.join(_ROOT, 'data', 'default-vos-config') _CONFIG_PATH = os.path.expanduser("~") + '/.config/vos/vos-config' - # Pattern matching in filenames to extract out the RA/DEC/RADIUS part -FILENAME_PATTERN_MAGIC = re.compile(r'^(?P[/_\-=+!,;:@&*$.\w~]*)' # legal filename string - r'(?P' # Look for a cutout part - r'(?P(\[\d*:?\d*\])?(\[[+-]?\*?\d*:?[+-]?\d*,?[+-]?\*?\d*:?[+-]?\d*\]))' # pixel - r'|' # OR - r'(?P' # possible wcs cutout - r'\((?P[+]?\d*(\.\d*)?),' # ra part - r'(?P[\-+]?\d*(\.\d*)?),' # dec part - r'(?P\d*(\.\d*)?)\))' # radius of cutout - r')?$' - ) +FILENAME_PATTERN_MAGIC = re.compile( + r'^(?P[/_\-=+!,;:@&*$.\w~]*)' # legal filename string + r'(?P' # Look for a cutout part + r'(?P(\[\d*:?\d*\])?' + r'(\[[+-]?\*?\d*:?[+-]?\d*,?[+-]?\*?\d*:?[+-]?\d*\]))' # pixel + r'|' # OR + r'(?P' # possible wcs cutout + r'\((?P[+]?\d*(\.\d*)?),' # ra part + r'(?P[\-+]?\d*(\.\d*)?),' # dec part + r'(?P\d*(\.\d*)?)\))' # radius of cutout + r')?$' + ) MAGIC_GLOB_CHECK = re.compile('[*?[]') try: @@ -130,14 +135,18 @@ class URLParser(object): There is a difference between the 2.5 and 2.7 version of the urlparse.urlparse command, so here I roll my own... """ - # TODO - ad: since 2.5 is no longer supported maybe it's time to get rid of this + + # TODO - ad: since 2.5 is no longer supported maybe it's time to get + # rid of this def __init__(self, url): self.scheme = None self.netloc = None self.args = None self.path = None - m = re.match("(^(?P[a-zA-Z]*):)?(//(?P(?P[^!~]*)[!~](?P[^/]*)))?" - "(?P/?[^?]*)?(?P\?.*)?", url) + m = re.match( + "(^(?P[a-zA-Z]*):)?" + "(//(?P(?P[^!~]*)[!~](?P[^/]*)))?" + "(?P/?[^?]*)?(?P\?.*)?", url) self.scheme = m.group('scheme') self.netloc = m.group('netloc') self.server = m.group('server') @@ -153,7 +162,8 @@ def __str__(self): class Connection(object): """Class to hold and act on the X509 certificate""" - def __init__(self, vospace_certfile=None, vospace_token=None, http_debug=False, + def __init__(self, vospace_certfile=None, vospace_token=None, + http_debug=False, resource_id=vos_config.get('vos', 'resourceID')): """Setup the Certificate for later usage @@ -161,14 +171,18 @@ def __init__(self, vospace_certfile=None, vospace_token=None, http_debug=False, ${HOME}/.ssl or a temporary filename vospace_token -- token string (alternative to vospace_certfile) http_debug -- set True to generate debug statements (Deprecated) - resource_id -- The resource ID of the vospace service. Defaults to CADC vos. + resource_id -- The resource ID of the vospace service. Defaults to + CADC vos. - If the user supplies an empty vospace_certificate, the connection will be 'anonymous'. - If no certificate or token are provided, and attempt to find user/password combination - in the .netrc file is made before the connection is downgraded to 'anonymous' + If the user supplies an empty vospace_certificate, the connection + will be 'anonymous'. If no certificate or token are provided, and + attempt to find user/password combination in the .netrc file is made + before the connection is downgraded to 'anonymous' """ if http_debug is not False: - warnings.warn("Connection object no longer uses http_debug setting.", DeprecationWarning) + warnings.warn( + "Connection object no longer uses http_debug setting.", + DeprecationWarning) self.vo_token = None session_headers = None self.resource_id = resource_id @@ -187,18 +201,26 @@ def __init__(self, vospace_certfile=None, vospace_token=None, http_debug=False, "Could not access certificate at {0}.".format(cert)) cert = None else: - logger.debug('Authenticate with cert {}'.format(vospace_certfile)) + logger.debug( + 'Authenticate with cert {}'.format(vospace_certfile)) self.subject = net.Subject(certificate=vospace_certfile) if cert is None: - if os.access(os.path.join(os.environ['HOME'], ".netrc"), os.F_OK): - logger.debug('Authenticate with user/password from $HOME/.netrc file') + if os.access(os.path.join(os.environ['HOME'], ".netrc"), + os.F_OK): + logger.debug( + ('Authenticate with user/password ' + 'from $HOME/.netrc file')) self.subject = net.Subject(netrc=True) else: - logger.warning('No valid authentication found. Reverting to anonymous.') + logger.warning( + ('No valid authentication found. ' + 'Reverting to anonymous.')) self.subject = net.Subject() - self.ws_client = net.BaseWsClient(resource_id, self.subject, 'vos/' + version, - host=os.getenv('VOSPACE_WEBSERVICE', None), + self.ws_client = net.BaseWsClient(resource_id, self.subject, + 'vos/' + version, + host=os.getenv('VOSPACE_WEBSERVICE', + None), session_headers=session_headers) EndPoints.subject = self.subject @@ -213,7 +235,8 @@ def get_connection(self, url=None): :param url: a VOSpace uri """ if url is not None: - raise OSError(errno.ENOSYS, "Connections are no longer set per URL.") + raise OSError(errno.ENOSYS, + "Connections are no longer set per URL.") return self.ws_client @@ -243,7 +266,8 @@ def __init__(self, node, node_type=None, properties=None, subnodes=None): if node is a string then create a node named node of nodeType with properties - :param node: the name of the node to create or a string representing that node + :param node: the name of the node to create or a string representing + that node """ self.uri = None self.name = None @@ -302,7 +326,9 @@ def update(self): self.is_public = False if self.props.get('ispublic', 'false') == 'true': self.is_public = True - logger.debug("{0} {1} -> {2}".format(self.uri, VO_PROPERTY_URI_ISLOCKED, self.props)) + logger.debug( + "{0} {1} -> {2}".format(self.uri, VO_PROPERTY_URI_ISLOCKED, + self.props)) self.groupwrite = self.props.get('groupwrite', '') self.groupread = self.props.get('groupread', '') logger.debug("Setting file attributes via setattr") @@ -319,7 +345,8 @@ def set_property(self, key, value): properties = self.node.find(Node.PROPERTIES) uri = "%s#%s" % (Node.IVOAURL, key) ElementTree.SubElement(properties, Node.PROPERTY, - attrib={'uri': uri, 'readOnly': 'false'}).text = value + attrib={'uri': uri, + 'readOnly': 'false'}).text = value def __str__(self): """Convert the Node to a string representation of the Node""" @@ -354,7 +381,8 @@ def setattr(self, attr=None): else: # mktime is expecting a localtime but we're sending a UT date, so # some correction will be needed - modified_time = convert_vospace_time_to_seconds(self.props.get('date')) + modified_time = convert_vospace_time_to_seconds( + self.props.get('date')) self.attr['st_ctime'] = attr.get('st_ctime', modified_time) self.attr['st_mtime'] = attr.get('st_mtime', modified_time) @@ -366,14 +394,16 @@ def setattr(self, attr=None): if self.type == 'vos:ContainerNode': st_mode |= stat.S_IFDIR st_nlink = max(2, len(self.get_info_list()) + 2) - # if getInfoList length is < 0 we have a problem elsewhere, so above hack solves that problem. + # if getInfoList length is < 0 we have a problem elsewhere, so + # above hack solves that problem. elif self.type == 'vos:LinkNode': st_mode |= stat.S_IFLNK else: st_mode |= stat.S_IFREG self.attr['st_nlink'] = st_nlink - # Set the OWNER permissions: all vospace Nodes have read/write/execute by owner + # Set the OWNER permissions: all vospace Nodes have read/write/execute + # by owner st_mode |= stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR # Set the GROUP permissions @@ -391,8 +421,9 @@ def setattr(self, attr=None): self.attr['st_mode'] = attr.get('st_mode', st_mode) - # We set the owner and group bits to be those of the currently running process. - # This is a hack since we don't have an easy way to figure these out. + # We set the owner and group bits to be those of the currently running + # process. This is a hack since we don't have an easy way to figure + # these out. # TODO Come up with a better approach to uid setting self.attr['st_uid'] = attr.get('st_uid', os.getuid()) self.attr['st_gid'] = attr.get('st_uid', os.getgid()) @@ -403,12 +434,16 @@ def setattr(self, attr=None): self.attr['st_blocks'] = self.attr['st_size'] // 512 def setxattr(self, attrs=None): - """Initialize the extended attributes using the Node properties that are not part of the core set. + """Initialize the extended attributes using the Node properties that + are not part of the core set. - :param attrs: An input list of attributes being sent from an external source, not supported. + :param attrs: An input list of attributes being sent from an external + source, not supported. """ if attrs is not None: - raise OSError(errno.ENOSYS, "No externally set extended Attributes for vofs yet.") + raise OSError( + errno.ENOSYS, + "No externally set extended Attributes for vofs yet.") for key in self.props: if key in Client.vosProperties: @@ -425,7 +460,8 @@ def chwgrp(self, group): """ logger.debug("Setting groups to: {0}".format(group)) if group is not None and len(group.split()) > 3: - raise AttributeError("Exceeded max of 4 write groups: {0}<-".format(group.split())) + raise AttributeError( + "Exceeded max of 4 write groups: {0}<-".format(group.split())) self.groupwrite = group return self.change_prop('groupwrite', group) @@ -436,7 +472,8 @@ def chrgrp(self, group): :type group: unicode """ if group is not None and len(group.split()) > 3: - raise AttributeError("Exceeded max of 4 read groups: {0}<-".format(group)) + raise AttributeError( + "Exceeded max of 4 read groups: {0}<-".format(group)) self.groupread = group return self.change_prop('groupread', group) @@ -453,7 +490,8 @@ def set_public(self, value): def fix_prop(prop): """Check if prop is a well formed uri and if not then make into one - :param prop: the property to expand into a IVOA uri value for a property. + :param prop: the property to expand into a IVOA uri value for a + property. :rtype unicode """ (url, tag) = splittag(prop) @@ -569,16 +607,21 @@ def chmod(self, mode): # logger.debug("%d -> %s" % (changed, changed>0)) return changed > 0 - def create(self, uri, node_type="vos:DataNode", properties=None, subnodes=None): - """Build the XML needed to represent a VOSpace node returns an ElementTree representation of the XML + def create(self, uri, node_type="vos:DataNode", properties=None, + subnodes=None): + """Build the XML needed to represent a VOSpace node returns an + ElementTree representation of the XML :param uri: The URI for this node. :type uri: str - :param node_type: the type of VOSpace node, likely one of vos:DataNode, vos:ContainerNode, vos:LinkNode + :param node_type: the type of VOSpace node, likely one of + vos:DataNode, vos:ContainerNode, vos:LinkNode :type node_type: unicode - :param properties: a dictionary of the node properties, keys should be single words from the IVOA list + :param properties: a dictionary of the node properties, keys should + be single words from the IVOA list :type properties: dict - :param subnodes: Any children to attach to this node, only valid for vos:ContainerNode + :param subnodes: Any children to attach to this node, only valid + for vos:ContainerNode :type subnodes: [Node] """ if not subnodes: @@ -601,11 +644,13 @@ def create(self, uri, node_type="vos:DataNode", properties=None, subnodes=None): properties['type'] = mimetypes.guess_type(uri)[0] properties_node = ElementTree.SubElement(node, Node.PROPERTIES) for prop in properties.keys(): - property_node = ElementTree.SubElement(properties_node, Node.PROPERTY) + property_node = ElementTree.SubElement(properties_node, + Node.PROPERTY) property_node.attrib['readOnly'] = "false" property_node.attrib["uri"] = self.fix_prop(prop) if properties[prop] is None: - # Setting the property value to None indicates that this is actually a delete + # Setting the property value to None indicates that this is + # actually a delete property_node.attrib['xsi:nil'] = 'true' property_node.attrib["xmlns:xsi"] = Node.XSINS property_node.text = "" @@ -622,12 +667,15 @@ def create(self, uri, node_type="vos:DataNode", properties=None, subnodes=None): ElementTree.SubElement(accepts, "view").attrib['uri'] = VO_VIEW_DEFAULT provides = ElementTree.SubElement(node, Node.PROVIDES) - ElementTree.SubElement(provides, "view").attrib['uri'] = VO_VIEW_DEFAULT - ElementTree.SubElement(provides, "view").attrib['uri'] = CADC_VO_VIEWS['rss'] + ElementTree.SubElement(provides, "view").attrib[ + 'uri'] = VO_VIEW_DEFAULT + ElementTree.SubElement(provides, "view").attrib['uri'] = CADC_VO_VIEWS[ + 'rss'] # Only DataNode can have a dataview... if node_type == "vos:DataNode": - ElementTree.SubElement(provides, "view").attrib['uri'] = CADC_VO_VIEWS['data'] + ElementTree.SubElement(provides, "view").attrib['uri'] = \ + CADC_VO_VIEWS['data'] # if this is a container node then add directory contents if node_type == "vos:ContainerNode": @@ -739,7 +787,8 @@ def clear_properties(self): def get_info_list(self): """ :rtype [(Node, dict)] - :return a list of tuples containing the (NodeName, Info) about the node and its childern + :return a list of tuples containing the (NodeName, Info) about the + node and its childern """ info = {} for node in self.node_list: @@ -749,17 +798,20 @@ def get_info_list(self): return info.items() def set_props(self, props): - """Set the SubElement Node PROPERTY values of the given xmlx ELEMENT provided using the Nodes props dictionary. + """Set the SubElement Node PROPERTY values of the given xmlx ELEMENT + provided using the Nodes props dictionary. :param props: the xmlx element to set the Node PROPERTY of. """ for property_node in props.findall(Node.PROPERTY): - self.props[self.get_prop_name(property_node.get('uri'))] = self.get_prop_value(property_node) + self.props[self.get_prop_name( + property_node.get('uri'))] = self.get_prop_value(property_node) return @staticmethod def get_prop_name(prop): - """parse the property uri and get the name of the property (strips off the url and just returns the tag) + """parse the property uri and get the name of the property (strips off + the url and just returns the tag) if this is an IVOA property, otherwise sends back the entry uri. :param prop: the uri of the property to get the name of. @@ -799,7 +851,8 @@ class VOFile(object): retryCodes = (503, 408, 504, 412) def __init__(self, url_list, connector, method, size=None, - follow_redirect=True, byte_range=None, possible_partial_read=False): + follow_redirect=True, byte_range=None, + possible_partial_read=False): self.closed = True assert isinstance(connector, Connection) self.connector = connector @@ -839,7 +892,8 @@ def __init__(self, url_list, connector, method, size=None, self.trans_encode = None # open the connection self._fobj = None - self.open(self.URLs[self.urlIndex], method, byte_range=byte_range, possible_partial_read=possible_partial_read) + self.open(self.URLs[self.urlIndex], method, byte_range=byte_range, + possible_partial_read=possible_partial_read) def tell(self): return self._fpos @@ -879,10 +933,12 @@ def close(self): def checkstatus(self, codes=(200, 201, 202, 206, 302, 303, 503, 404, 416, 416, 402, 408, 412, 504)): - """check the response status. If the status code doesn't match a value from the codes list then + """check the response status. If the status code doesn't match a + value from the codes list then raise an Exception. - :param codes: a list of http status_codes that are NOT failures but require some additional action. + :param codes: a list of http status_codes that are NOT failures but + require some additional action. """ if self.resp is None: return @@ -892,23 +948,28 @@ def checkstatus(self, codes=(200, 201, 202, 206, 302, 303, 503, 404, 416, 409: "Conflict", 423: "Locked", 408: "Connection Timeout"} - logger.debug("status %d for URL %s" % (self.resp.status_code, self.url)) + logger.debug( + "status %d for URL %s" % (self.resp.status_code, self.url)) if self.resp.status_code not in codes: logger.debug("Got status code: %s for %s" % (self.resp.status_code, self.url)) msg = self.resp.text if msg is not None: - msg = html2text.html2text(msg, self.url).strip().replace('\n', ' ') + msg = html2text.html2text(msg, self.url).strip().replace('\n', + ' ') logger.debug("Error message: {0}".format(msg)) - if self.resp.status_code in VOFile.errnos.keys() or (msg is not None and "Node is busy" in msg): - if msg is None or len(msg) == 0 and self.resp.status_code in msgs: + if self.resp.status_code in VOFile.errnos.keys() or ( + msg is not None and "Node is busy" in msg): + if msg is None or len( + msg) == 0 and self.resp.status_code in msgs: msg = msgs[self.resp.status_code] if (self.resp.status_code == 401 and self.connector.subject.anon and self.connector.vo_token is None): msg += " using anonymous access " - exception = OSError(VOFile.errnos.get(self.resp.status_code, self.resp.status_code), msg) + exception = OSError(VOFile.errnos.get(self.resp.status_code, + self.resp.status_code), msg) if self.resp.status_code == 500 and "read-only" in msg: exception = OSError(errno.EPERM, "VOSpace in read-only mode.") raise exception @@ -917,7 +978,10 @@ def checkstatus(self, codes=(200, 201, 202, 206, 302, 303, 503, 404, 416, # fallback to work around a server-side Java bug that limits # 'Content-Length' to a signed 32-bit integer (~2 gig files) try: - self.size = int(self.resp.headers.get("Content-Length", self.resp.headers.get(HEADER_CONTENT_LENGTH, 0))) + self.size = int(self.resp.headers.get("Content-Length", + self.resp.headers.get( + HEADER_CONTENT_LENGTH, + 0))) except ValueError: self.size = 0 @@ -927,15 +991,18 @@ def checkstatus(self, codes=(200, 201, 202, 206, 302, 303, 503, 404, 416, return True - def open(self, url, method="GET", byte_range=None, possible_partial_read=False): + def open(self, url, method="GET", byte_range=None, + possible_partial_read=False): """Open a connection to the given URL :param url: The URL to be opened :type url: unicode :param method: HTTP Method to use on open (PUT/GET/POST) :type method: unicode - :param byte_range: The range of byte_range to read, This is in open so we can set the header parameter. + :param byte_range: The range of byte_range to read, This is in open so + we can set the header parameter. :type byte_range: unicode - :param possible_partial_read: Sometimes we kill during read, this tells the server that isn't an error. + :param possible_partial_read: Sometimes we kill during read, this + tells the server that isn't an error. :type possible_partial_read: bool """ logger.debug("Opening %s (%s)" % (url, method)) @@ -993,7 +1060,8 @@ def get_file_info(self): def read(self, size=None, return_response=False): """return size bytes from the connection response - :param return_response: should we return the response object or the bytes read? + :param return_response: should we return the response object or the + bytes read? :param size: number of bytes to read from the file. """ @@ -1001,11 +1069,15 @@ def read(self, size=None, return_response=False): # this is original retry flag of the session orig_retry_flag = self.connector.session.retry try: - if (len(self.URLs) > 1) and (self.urlIndex < len(self.URLs) - 1): - # there is more urls to try so don't bother retrying on transient errors + if (len(self.URLs) > 1) and\ + (self.urlIndex < len(self.URLs) - 1): + # there is more urls to try so don't bother retrying on + # transient errors # return instead and try the next url self.connector.session.retry = False - self.resp = self.connector.session.send(self.request, stream=True, verify=False) + self.resp = self.connector.session.send(self.request, + stream=True, + verify=False) except exceptions.HttpException as http_exception: # this is the path for all status_codes between 400 and 600 if http_exception.orig_exception is not None: @@ -1016,9 +1088,12 @@ def read(self, size=None, return_response=False): self.checkstatus() - if isinstance(http_exception, exceptions.UnauthorizedException) or\ - isinstance(http_exception, exceptions.BadRequestException) or\ - isinstance(http_exception, exceptions.ForbiddenException): + if isinstance(http_exception, + exceptions.UnauthorizedException) or \ + isinstance(http_exception, + exceptions.BadRequestException) or \ + isinstance(http_exception, + exceptions.ForbiddenException): raise # Note: 404 (File Not Found) might be returned when: @@ -1046,7 +1121,10 @@ def read(self, size=None, return_response=False): # fallback to work around a server-side Java bug that limits # 'Content-Length' to a signed 32-bit integer (~2 gig files) try: - self.size = int(self.resp.headers.get("Content-Length", self.resp.headers.get(HEADER_CONTENT_LENGTH, 0))) + self.size = int(self.resp.headers.get("Content-Length", + self.resp.headers.get( + HEADER_CONTENT_LENGTH, + 0))) except Exception: self.size = 0 @@ -1063,7 +1141,8 @@ def read(self, size=None, return_response=False): return self.resp else: buff = self.resp.raw.read(size) - size = size is not None and size < len(buff) and size or len(buff) + size = size is not None and size < len(buff) and size or len( + buff) # logger.debug("Sending back {0} bytes".format(size)) return buff[:size] elif self.resp.status_code == 303 or self.resp.status_code == 302: @@ -1071,14 +1150,18 @@ def read(self, size=None, return_response=False): logger.debug("Got redirect URL: {0}".format(url)) self.url = url if not url: - raise OSError(errno.ENOENT, - "Got 303 on {0} but no Location value in header? [{1}]".format(self.url, - self.resp.content), - self.url) + raise OSError( + errno.ENOENT, + "Got 303 on {0} but no Location value in header? [{1}]". + format(self.url, self.resp.content), + self.url) if self.followRedirect: - # We open this new URL without the byte range and partial read as we are following a service - # redirect and that service redirect is to the object that satisfies the original request. - # TODO seperate out making the transfer reqest and reading the response content. + # We open this new URL without the byte range and partial + # read as we are following a service redirect and that service + # redirect is to the object that satisfies the original + # request. + # TODO seperate out making the transfer reqest and reading + # the response content. self.open(url, "GET") # logger.debug("Following redirected URL: %s" % (URL)) return self.read(size) @@ -1089,7 +1172,8 @@ def read(self, size=None, return_response=False): # start from top of URLs with a delay self.urlIndex = 0 - logger.error("Servers busy {0} for {1}".format(self.resp.status_code, self.URLs)) + logger.error("Servers busy {0} for {1}".format(self.resp.status_code, + self.URLs)) msg = self.resp.text if msg is not None: msg = html2text.html2text(msg, self.url).strip() @@ -1116,10 +1200,11 @@ def read(self, size=None, return_response=False): self.resp = None return self.read(size) else: - raise OSError(self.resp.status_code, - "failed to connect to server after multiple attempts {0} {1}".format(self.resp.reason, - self.resp.status_code), - self.url) + raise OSError( + self.resp.status_code, + "failed to connect to server after multiple attempts {0} {1}". + format(self.resp.reason, self.resp.status_code), + self.url) @staticmethod def write(buf): @@ -1127,7 +1212,10 @@ def write(buf): :param buf: string to write to the file. """ - raise OSError(errno.ENOSYS, "Direct write to a VOSpaceFile is not supported, use copy instead.") + raise OSError( + errno.ENOSYS, + "Direct write to a VOSpaceFile is not supported, use " + "copy instead.") class EndPoints(object): @@ -1150,7 +1238,8 @@ def __init__(self, resource_id_uri): :type resource_id_uri: unicode """ self.resource_id = resource_id_uri - self.service = net.BaseWsClient(self.resource_id, EndPoints.subject, 'vos/' + version, + self.service = net.BaseWsClient(self.resource_id, EndPoints.subject, + 'vos/' + version, host=self.VOSPACE_WEBSERVICE) @property @@ -1164,8 +1253,9 @@ def uri(self): @property def server(self): """ - Returns the server where the __nodes__ capability is deployed. Most of the time all the capabilities - are deployed on the same server but sometimes they might not be. + Returns the server where the __nodes__ capability is deployed. Most + of the time all the capabilities are deployed on the same server but + sometimes they might not be. :return: The network location of the VOSpace server. """ return urlparse(self.nodes).netloc @@ -1197,7 +1287,8 @@ class Client(object): DWS = '/data/pub/' VO_TRANSFER_PROTOCOLS = ['http', 'https'] - # reserved vospace properties, not to be used for extended property setting + # reserved vospace properties, not to be used for extended property + # setting vosProperties = ["description", "type", "encoding", "MD5", "length", "creator", "date", "groupread", "groupwrite", "ispublic"] @@ -1215,7 +1306,8 @@ def __init__(self, vospace_certfile=None, root_node=None, conn=None, secure_get=False, vospace_token=None): """This could/should be expanded to set various defaults - :param vospace_certfile: x509 proxy certificate file location. Overrides certfile in conn. + :param vospace_certfile: x509 proxy certificate file location. + Overrides certfile in conn. :type vospace_certfile: unicode :param vospace_token: token string (alternative to vospace_certfile) :type vospace_token: unicode @@ -1223,24 +1315,29 @@ def __init__(self, vospace_certfile=None, root_node=None, conn=None, :type root_node: unicode :param conn: a connection pool object for this Client :type conn: Session - :param transfer_shortcut: if True then just assumed data web service urls + :param transfer_shortcut: if True then just assumed data web service + urls :type transfer_shortcut: bool :param http_debug: turn on http debugging. :type http_debug: bool - :param secure_get: Use HTTPS: ie. transfer contents of files using SSL encryption. + :param secure_get: Use HTTPS: ie. transfer contents of files using + SSL encryption. :type secure_get: bool """ if not isinstance(conn, Connection): - vospace_certfile = vospace_certfile is None and Client.VOSPACE_CERTFILE or vospace_certfile + vospace_certfile = vospace_certfile is None and\ + Client.VOSPACE_CERTFILE or vospace_certfile conn = Connection(vospace_certfile=vospace_certfile, vospace_token=vospace_token, http_debug=http_debug) self.protocol = vos_config.get('transfer', 'protocol') if self.protocol not in self.VO_TRANSFER_PROTOCOLS: - raise SystemError('Unsupported protocol {}. Valid protocols: {}. Update {}'.format( - self.protocol, self.VO_TRANSFER_PROTOCOLS, _CONFIG_PATH)) + raise SystemError( + 'Unsupported protocol {}. Valid protocols: {}. Update {}'. + format(self.protocol, self.VO_TRANSFER_PROTOCOLS, + _CONFIG_PATH)) self.conn = conn self.rootNode = root_node @@ -1265,10 +1362,13 @@ def glob(self, pathname): return list(self.iglob(pathname)) def iglob(self, pathname): - """Return an iterator which yields the paths matching a pathname pattern. + """Return an iterator which yields the paths matching a pathname + pattern. - The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames - starting with a dot are special cases that are not matched by '*' and '?' patterns. + The pattern may contain simple shell-style wildcards a la fnmatch. + However, unlike fnmatch, filenames + starting with a dot are special cases that are not matched by '*' + and '?' patterns. :param pathname: path to run glob against. :type pathname: unicode @@ -1288,8 +1388,8 @@ def iglob(self, pathname): yield name return # `os.path.split()` returns the argument itself as a dirname if it is a - # drive or UNC path. Prevent an infinite recursion if a drive or UNC path - # contains magic characters (i.e. r'\\?\C:'). + # drive or UNC path. Prevent an infinite recursion if a drive or UNC + # path contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and self.has_magic(dirname): dirs = self.iglob(dirname) else: @@ -1317,8 +1417,10 @@ def glob1(self, dirname, pattern): """ if not dirname: dirname = self.rootNode - if isinstance(pattern, six.string_types) and not isinstance(dirname, six.string_types): - dirname = str(dirname).encode(sys.getfilesystemencoding() or sys.getdefaultencoding()) + if isinstance(pattern, six.string_types) and\ + not isinstance(dirname, six.string_types): + dirname = str(dirname).encode( + sys.getfilesystemencoding() or sys.getdefaultencoding()) try: names = self.listdir(dirname, force=True) except os.error: @@ -1329,24 +1431,28 @@ def glob1(self, dirname, pattern): def glob0(self, dirname, basename): if basename == '': - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. + # `os.path.split()` returns an empty basename for paths ending + # with a directory separator. 'q*x/' should match only + # directories. if self.isdir(dirname): return [basename] else: if self.access(os.path.join(dirname, basename)): return [basename] else: - raise OSError(errno.EACCES, "Permission denied: {0}".format(os.path.join(dirname, basename))) + raise OSError(errno.EACCES, "Permission denied: {0}".format( + os.path.join(dirname, basename))) return [] def get_endpoints(self, uri): """ Returns the end points or a vospace service corresponding to an uri - The main purpose of this method is to cache the EndPoints for used services for performance reasons. + The main purpose of this method is to cache the EndPoints for used + services for performance reasons. - :param uri: uri of and entry or of a resource id for which the end points are seek + :param uri: uri of and entry or of a resource id for which the end + points are seek :return: corresponding EndPoint object """ @@ -1356,7 +1462,8 @@ def get_endpoints(self, uri): if uri_parts.netloc is None: resource_id = vos_config.get('vos', 'resourceID') else: - resource_id = 'ivo://{0}'.format(uri_parts.netloc).replace("!", "/").replace("~", "/") + resource_id = 'ivo://{0}'.format(uri_parts.netloc).replace( + "!", "/").replace("~", "/") elif uri_parts.scheme.startswith('ivo'): resource_id = uri else: @@ -1377,19 +1484,25 @@ def has_magic(s): def copy(self, source, destination, send_md5=False, disposition=False): """copy from source to destination. - One of source or destination must be a vospace location and the other must be a local location. + One of source or destination must be a vospace location and the other + must be a local location. - :param source: The source file to send to VOSpace or the VOSpace node to retrieve + :param source: The source file to send to VOSpace or the VOSpace node + to retrieve :type source: unicode - :param destination: The VOSpace location to put the file to or the local destination. + :param destination: The VOSpace location to put the file to or the + local destination. :type destination: unicode - :param send_md5: Should copy send back the md5 of the destination file or just the size? + :param send_md5: Should copy send back the md5 of the destination + file or just the size? :type send_md5: bool - :param disposition: Should the filename from content disposition be returned instead of size or MD5? + :param disposition: Should the filename from content disposition be + returned instead of size or MD5? :type disposition: bool - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module - + """ # TODO: handle vospace to vospace copies. @@ -1403,10 +1516,12 @@ def copy(self, source, destination, send_md5=False, disposition=False): if source[0:4] == "vos:": if destination is None: - # Set the destination, initially, to the same directory as the source (strip the vos:) - destination = os.path.dirname(source)[4:] + # Set the destination, initially, to the same directory as + # the source (strip the vos:) + destination = os.path.dirname(source)[4:] if os.path.isdir(destination): - # We can't write to a directory so take file name from content-disposition or + # We can't write to a directory so take file name from + # content-disposition or # from filename part of source. disposition = True check_md5 = False @@ -1416,9 +1531,10 @@ def copy(self, source, destination, send_md5=False, disposition=False): if cutout_match.group('pix'): cutout = cutout_match.group('pix') elif cutout_match.group('wcs') is not None: - cutout = "CIRCLE ICRS {} {} {}".format(cutout_match.group('ra'), - cutout_match.group('dec'), - cutout_match.group('rad')) + cutout = "CIRCLE ICRS {} {} {}".format( + cutout_match.group('ra'), + cutout_match.group('dec'), + cutout_match.group('rad')) else: raise ValueError("Bad source name: ".format(source)) source = cutout_match.group('filename') @@ -1428,13 +1544,15 @@ def copy(self, source, destination, send_md5=False, disposition=False): check_md5 = True source_md5 = self.get_node(source).props.get('MD5', ZERO_MD5) - - get_urls = self.get_node_url(source, method='GET', cutout=cutout, view=view) + get_urls = self.get_node_url(source, method='GET', cutout=cutout, + view=view) while not success: - # If there are no urls available, drop through to full negotiation if that wasn't already tried + # If there are no urls available, drop through to full + # negotiation if that wasn't already tried if len(get_urls) == 0: if self.transfer_shortcut and not get_node_url_retried: - get_urls = self.get_node_url(source, method='GET', cutout=cutout, view=view, + get_urls = self.get_node_url(source, method='GET', + cutout=cutout, view=view, full_negotiation=True) # remove the first one as we already tried that one. get_urls.pop(0) @@ -1443,28 +1561,38 @@ def copy(self, source, destination, send_md5=False, disposition=False): break get_url = get_urls.pop(0) try: - response = self.conn.session.get(get_url, timeout=(2, 5), stream=True) + response = self.conn.session.get(get_url, timeout=(2, 5), + stream=True) if disposition: - # Build the destination location from the content-disposition value, or source name. - content_disposition = response.headers.get('content-disposition', destination) - content_disposition = re.search('.*filename=(\S*).*', content_disposition) + # Build the destination location from the + # content-disposition value, or source name. + content_disposition = response.headers.get( + 'content-disposition', destination) + content_disposition = re.search('.*filename=(\S*).*', + content_disposition) if content_disposition is not None: - content_disposition = content_disposition.group(1).strip() + content_disposition = content_disposition.group( + 1).strip() else: content_disposition = os.path.split(source)[-1] if os.path.isdir(destination): - destination = os.path.join(destination, content_disposition) - source_md5 = response.headers.get('Content-MD5', source_md5) + destination = os.path.join(destination, + content_disposition) + source_md5 = response.headers.get('Content-MD5', + source_md5) response.raise_for_status() with open(destination, 'wb') as fout: - for chunk in response.iter_content(chunk_size=512 * 1024): + for chunk in response.iter_content( + chunk_size=512 * 1024): if chunk: fout.write(chunk) fout.flush() destination_size = os.stat(destination).st_size if check_md5: - destination_md5 = md5_cache.MD5Cache.compute_md5(destination) - logger.debug("{0} {1}".format(source_md5, destination_md5)) + destination_md5 = md5_cache.MD5Cache.compute_md5( + destination) + logger.debug( + "{0} {1}".format(source_md5, destination_md5)) assert destination_md5 == source_md5 success = True except Exception as ex: @@ -1481,14 +1609,17 @@ def copy(self, source, destination, send_md5=False, disposition=False): destination_md5 = None source_md5 = md5_cache.MD5Cache.compute_md5(source) if source_md5 == destination_md5: - logger.info('copy: src and dest are already the same -> update node metadata') + logger.info( + 'copy: src and dest are already the same -> ' + 'update node metadata') # post the node so that the modify time is updated self.update(destination_node) destination_size = os.stat(source).st_size elif source_md5 == ZERO_MD5: logger.info("destination: size is 0") destination_size = 0 - # TODO delete and recreate the node. Is there a better way to delete just the content of the node? + # TODO delete and recreate the node. Is there a better way + # to delete just the content of the node? if destination_md5: self.delete(destination) self.create(destination) @@ -1499,8 +1630,11 @@ def copy(self, source, destination, send_md5=False, disposition=False): while not success: if len(put_urls) == 0: if self.transfer_shortcut and not get_node_url_retried: - put_urls = self.get_node_url(destination, method='PUT', full_negotiation=True) - # remove the first one as we already tried that one. + put_urls = self.get_node_url(destination, + method='PUT', + full_negotiation=True) + # remove the first one as we already tried + # that one. put_urls.pop(0) get_node_url_retried = True else: @@ -1521,8 +1655,9 @@ def copy(self, source, destination, send_md5=False, disposition=False): break if not success: - raise OSError(errno.EFAULT, "Failed copying {0} -> {1}\n{2}".format(source, destination, - copy_failed_message)) + raise OSError(errno.EFAULT, + "Failed copying {0} -> {1}\n{2}". + format(source, destination, copy_failed_message)) if disposition: return content_disposition if send_md5: @@ -1530,9 +1665,11 @@ def copy(self, source, destination, send_md5=False, disposition=False): return destination_size def fix_uri(self, uri): - """given a uri check if the authority part is there and if it isn't then add the VOSpace authority + """given a uri check if the authority part is there and if it isn't + then add the VOSpace authority - :param uri: The string that should be parsed into a proper URI, if possible. + :param uri: The string that should be parsed into a proper URI, if + possible. """ parts = URLParser(uri) @@ -1552,7 +1689,9 @@ def fix_uri(self, uri): # Check that path name compiles with the standard logger.debug("Got value of args: {0}".format(parts.args)) if parts.args is not None and parts.args != "": - uri = urlparse.parse_qs(urlparse.urlparse(parts.args).query).get('link', None)[0] + uri = \ + urlparse.parse_qs( + urlparse.urlparse(parts.args).query).get('link', None)[0] logger.debug("Got uri: {0}".format(uri)) if uri is not None: return self.fix_uri(uri) @@ -1560,7 +1699,8 @@ def fix_uri(self, uri): # Check for filename values. path = FILENAME_PATTERN_MAGIC.match(os.path.normpath(parts.path)) if path is None or path.group('filename') is None: - raise OSError(errno.EINVAL, "Illegal vospace container name", parts.path) + raise OSError(errno.EINVAL, "Illegal vospace container name", + parts.path) logger.debug("Match : {}".format(path.groupdict())) filename = path.group('filename') @@ -1569,7 +1709,8 @@ def fix_uri(self, uri): host = parts.netloc if not host or host == '': # default host corresponds to the resource ID of the client - host = self.conn.resource_id.replace('ivo://', '').replace('/', '!') + host = self.conn.resource_id.replace('ivo://', '').replace('/', + '!') path = os.path.normpath(filename).strip('/') uri = "{0}://{1}/{2}{3}".format(parts.scheme, host, path, parts.args) @@ -1583,7 +1724,8 @@ def get_node(self, uri, limit=0, force=False): :type uri: unicode :param limit: -- load children nodes in batches of limit :type limit: int, None - :param force: force getting the node from the service, rather than returning a cached version. + :param force: force getting the node from the service, rather than + returning a cached version. :return: The VOSpace Node :rtype: Node @@ -1608,19 +1750,25 @@ def get_node(self, uri, limit=0, force=False): dom = ElementTree.parse(xml_file) node = Node(dom.getroot()) elif uri.startswith('http'): - header = self.open(None, url=uri, mode=os.O_RDONLY, head=True) + header = self.open(None, url=uri, mode=os.O_RDONLY, + head=True) header.read() - logger.debug("Got http headers: {0}".format(header.resp.headers)) - properties = {'type': header.resp.headers.get('Content-Type', 'txt'), - 'date': time.strftime( - '%Y-%m-%dT%H:%M:%S GMT', - time.strptime(header.resp.headers.get('Date', None), - '%a, %d %b %Y %H:%M:%S GMT')), - 'groupwrite': None, - 'groupread': None, - 'ispublic': URLParser(uri).scheme == 'https' and 'true' or 'false', - 'length': header.resp.headers.get('Content-Length', 0)} - node = Node(node=uri, node_type=Node.DATA_NODE, properties=properties) + logger.debug( + "Got http headers: {0}".format(header.resp.headers)) + properties = { + 'type': header.resp.headers.get('Content-Type', 'txt'), + 'date': time.strftime( + '%Y-%m-%dT%H:%M:%S GMT', + time.strptime( + header.resp.headers.get('Date', None), + '%a, %d %b %Y %H:%M:%S GMT')), + 'groupwrite': None, + 'groupread': None, + 'ispublic': URLParser( + uri).scheme == 'https' and 'true' or 'false', + 'length': header.resp.headers.get('Content-Length', 0)} + node = Node(node=uri, node_type=Node.DATA_NODE, + properties=properties) logger.debug(str(node)) else: raise OSError(2, "Bad URI {0}".format(uri)) @@ -1633,11 +1781,13 @@ def get_node(self, uri, limit=0, force=False): next_uri = None while next_uri != node.node_list[-1].uri: next_uri = node.node_list[-1].uri - xml_file = StringIO(self.open(uri, os.O_RDONLY, next_uri=next_uri, - limit=limit).read().decode('UTF-8')) + xml_file = StringIO( + self.open(uri, os.O_RDONLY, next_uri=next_uri, + limit=limit).read().decode('UTF-8')) xml_file.seek(0) next_page = Node(ElementTree.parse(xml_file).getroot()) - if len(next_page.node_list) > 0 and next_uri == next_page.node_list[0].uri: + if len(next_page.node_list) > 0 and next_uri == \ + next_page.node_list[0].uri: next_page.node_list.pop(0) node.node_list.extend(next_page.node_list) for childNode in node.node_list: @@ -1645,26 +1795,35 @@ def get_node(self, uri, limit=0, force=False): childWatch.insert(childNode) return node - def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, cutout=None, full_negotiation=None): - """Split apart the node string into parts and return the correct URL for this node. + def get_node_url(self, uri, method='GET', view=None, limit=None, + next_uri=None, cutout=None, full_negotiation=None): + """Split apart the node string into parts and return the correct URL + for this node. :param uri: The VOSpace uri to get an associated url for. :type uri: unicode - :param method: What will this URL be used to do: 'GET' the node, 'PUT' or 'POST' to the node or 'DELETE' it + :param method: What will this URL be used to do: 'GET' the node, + 'PUT' or 'POST' to the node or 'DELETE' it :type method: unicode - :param view: If this is a 'GET' which view of the node should the URL provide. + :param view: If this is a 'GET' which view of the node should the + URL provide. :type view: unicode - :param limit: If this is a container how many of the children should be returned? (None - Unlimited) + :param limit: If this is a container how many of the children should + be returned? (None - Unlimited) :type limit: int, None - :param next_uri: When getting a container we make repeated calls until all 'limit' children returned. next_uri - tells the service what was the last child uri retrieved in the previous call. + :param next_uri: When getting a container we make repeated calls + until all 'limit' children returned. next_uri tells the service what + was the last child uri retrieved in the previous call. :type next_uri: unicode - :param cutout: The cutout pattern to apply to the file at the service end: applies to view='cutout' only. + :param cutout: The cutout pattern to apply to the file at the service + end: applies to view='cutout' only. :type cutout: str, None - :param full_negotiation: Should we use the transfer UWS or do a GET and follow the redirect. + :param full_negotiation: Should we use the transfer UWS or do a GET + and follow the redirect. :type full_negotiation: bool - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ uri = self.fix_uri(uri) @@ -1678,7 +1837,8 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, raise OSError(errno.ENOENT, "No target for link") parts = URLParser(target) if parts.scheme != "vos": - # This is not a link to another VOSpace node so lets just return the target as the url + # This is not a link to another VOSpace node so lets just + # return the target as the url url = target if cutout is not None: url = "{0}?cutout={1}".format(target, cutout) @@ -1686,7 +1846,8 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, logger.debug("Returning URL: {0}".format(url)) return [url] logger.debug("Getting URLs for: {0}".format(target)) - return self.get_node_url(target, method=method, view=view, limit=limit, next_uri=next_uri, + return self.get_node_url(target, method=method, view=view, + limit=limit, next_uri=next_uri, cutout=cutout, full_negotiation=full_negotiation) @@ -1698,7 +1859,8 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, endpoints = self.get_endpoints(uri) - # full_negotiation is an override, so it can be used to force either shortcut (false) or full negotiation (true) + # full_negotiation is an override, so it can be used to force either + # shortcut (false) or full negotiation (true) if full_negotiation is not None: do_shortcut = not full_negotiation else: @@ -1710,11 +1872,15 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, if not do_shortcut and method == 'PUT': return self._put(uri) - if (view == "cutout" and cutout is None) or (cutout is not None and view != "cutout"): - raise ValueError("For cutout, must specify a view=cutout and for view=cutout must specify cutout") + if (view == "cutout" and cutout is None) or ( + cutout is not None and view != "cutout"): + raise ValueError( + "For cutout, must specify a view=cutout and for view=cutout" + "must specify cutout") if method == 'GET' and view not in ['data', 'cutout']: - # This is a request for the URL of the Node, which returns an XML document that describes the node. + # This is a request for the URL of the Node, which returns an XML + # document that describes the node. fields = {} if limit is not None: fields['limit'] = limit @@ -1730,18 +1896,22 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, logger.debug('URL: {} ({})'.format(url, method)) return url - # This is the shortcut. We do a GET request on the service with the parameters sent as arguments. + # This is the shortcut. We do a GET request on the service with the + # parameters sent as arguments. direction = {'GET': 'pullFromVoSpace', 'PUT': 'pushToVoSpace'} - # On GET override the protocol to be http (faster) unless a secure_get is requested. + # On GET override the protocol to be http (faster) unless a + # secure_get is requested. protocol = { - 'GET': {'https': (self.secure_get and Client.VO_HTTPSGET_PROTOCOL) or Client.VO_HTTPGET_PROTOCOL, + 'GET': {'https': ((self.secure_get and Client.VO_HTTPSGET_PROTOCOL) + or Client.VO_HTTPGET_PROTOCOL), 'http': Client.VO_HTTPGET_PROTOCOL}, 'PUT': {'https': Client.VO_HTTPSPUT_PROTOCOL, 'http': Client.VO_HTTPPUT_PROTOCOL}} - # build the url for that will request the url that provides access to the node. + # build the url for that will request the url that provides access to + # the node. url = endpoints.transfer args = { @@ -1755,7 +1925,9 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} - response = self.conn.session.get(endpoints.transfer, params=args, headers=headers, allow_redirects=False) + response = self.conn.session.get(endpoints.transfer, params=args, + headers=headers, + allow_redirects=False) assert isinstance(response, requests.Response) logging.debug("Transfer Server said: {0}".format(response.content)) @@ -1785,22 +1957,26 @@ def get_node_url(self, uri, method='GET', view=None, limit=None, next_uri=None, def link(self, src_uri, link_uri): """Make link_uri point to src_uri. - :param src_uri: the existing resource, either a vospace uri or a http url + :param src_uri: the existing resource, either a vospace uri or a http + url :type src_uri: unicode - :param link_uri: the vospace node to create that will be a link to src_uri + :param link_uri: the vospace node to create that will be a link to + src_uri :type link_uri: unicode - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the - cadcutils.exceptions module + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ link_uri = self.fix_uri(link_uri) src_uri = self.fix_uri(src_uri) - # if the link_uri points at an existing directory then we try and make a link into that directory + # if the link_uri points at an existing directory then we try and + # make a link into that directory if self.isdir(link_uri): link_uri = os.path.join(link_uri, os.path.basename(src_uri)) - with self.nodeCache.volatile(src_uri), self.nodeCache.volatile(link_uri): + with self.nodeCache.volatile(src_uri), self.nodeCache.volatile( + link_uri): link_node = Node(link_uri, node_type="vos:LinkNode") ElementTree.SubElement(link_node.node, "target").text = src_uri data = str(link_node) @@ -1811,7 +1987,8 @@ def link(self, src_uri, link_uri): self.conn.session.put(url, data=data, headers={'size': str(size)}) def move(self, src_uri, destination_uri): - """Move src_uri to destination_uri. If destination_uri is a containerNode then move src_uri into destination_uri + """Move src_uri to destination_uri. If destination_uri is a + containerNode then move src_uri into destination_uri :param src_uri: the VOSpace node to be moved. :type src_uri: unicode @@ -1822,7 +1999,8 @@ def move(self, src_uri, destination_uri): """ src_uri = self.fix_uri(src_uri) destination_uri = self.fix_uri(destination_uri) - with self.nodeCache.volatile(src_uri), self.nodeCache.volatile(destination_uri): + with self.nodeCache.volatile(src_uri), self.nodeCache.volatile( + destination_uri): return self.transfer(src_uri, destination_uri, view='move') def _get(self, uri, view="defaultview", cutout=None): @@ -1837,10 +2015,13 @@ def transfer(self, uri, direction, view=None, cutout=None): """Build the transfer XML document :param direction: is this a pushToVoSpace or a pullFromVoSpace ? :param uri: the uri to transfer from or to VOSpace. - :param view: which view of the node (data/default/cutout/etc.) is being transferred - :param cutout: a special parameter added to the 'cutout' view request. e.g. '[0][1:10,1:10]' - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + :param view: which view of the node (data/default/cutout/etc.) is + being transferred + :param cutout: a special parameter added to the 'cutout' view + request. e.g. '[0][1:10,1:10]' + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ endpoints = self.get_endpoints(uri) @@ -1853,7 +2034,8 @@ def transfer(self, uri, direction, view=None, cutout=None): ElementTree.SubElement(transfer_xml, "vos:direction").text = direction if view == 'move': - ElementTree.SubElement(transfer_xml, "vos:keepBytes").text = "false" + ElementTree.SubElement(transfer_xml, + "vos:keepBytes").text = "false" else: if view == 'defaultview': ElementTree.SubElement(transfer_xml, "vos:view").attrib[ @@ -1865,8 +2047,11 @@ def transfer(self, uri, direction, view=None, cutout=None): param = ElementTree.SubElement(vos_view, "vos:param") param.attrib['uri'] = CADC_VO_VIEWS[view] param.text = cutout - protocol_element = ElementTree.SubElement(transfer_xml, "vos:protocol") - protocol_element.attrib['uri'] = "{0}#{1}".format(Node.IVOAURL, protocol[direction]) + protocol_element = ElementTree.SubElement(transfer_xml, + "vos:protocol") + protocol_element.attrib['uri'] = "{0}#{1}".format(Node.IVOAURL, + protocol[ + direction]) logging.debug(ElementTree.tostring(transfer_xml)) logging.debug("Sending to : {}".format(endpoints.transfer)) @@ -1880,7 +2065,8 @@ def transfer(self, uri, direction, view=None, cutout=None): logging.debug("{0}".format(resp)) logging.debug("{0}".format(resp.text)) if resp.status_code != 303: - raise OSError(resp.status_code, "Failed to get transfer service response.") + raise OSError(resp.status_code, + "Failed to get transfer service response.") transfer_url = resp.headers.get('Location', None) if self.conn.session.auth is not None and "auth" not in transfer_url: @@ -1900,7 +2086,8 @@ def transfer(self, uri, direction, view=None, cutout=None): xml_string = self.conn.session.get(xfer_url).text logging.debug("Transfer Document: %s" % xml_string) transfer_document = ElementTree.fromstring(xml_string) - logging.debug("XML version: {0}".format(ElementTree.tostring(transfer_document))) + logging.debug( + "XML version: {0}".format(ElementTree.tostring(transfer_document))) all_protocols = transfer_document.findall(Node.PROTOCOL) if all_protocols is None or not len(all_protocols) > 0: return self.get_transfer_error(transfer_url, uri) @@ -1919,8 +2106,9 @@ def get_transfer_error(self, url, uri): """Follow a transfer URL to the Error message :param url: The URL of the transfer request that had the error. :param uri: The uri that we were trying to transfer (get or put). - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ error_codes = {'NodeNotFound': errno.ENOENT, @@ -1954,8 +2142,9 @@ def get_transfer_error(self, url, uri): slept = 0 if logger.getEffectiveLevel() == logging.INFO: while slept < sleep_time: - sys.stdout.write("\r%s %s" % (phase, - roller[total_time_slept % len(roller)])) + sys.stdout.write( + "\r%s %s" % (phase, roller[total_time_slept % + len(roller)])) sys.stdout.flush() slept += 1 total_time_slept += 1 @@ -1963,8 +2152,10 @@ def get_transfer_error(self, url, uri): sys.stdout.write("\r \n") else: time.sleep(sleep_time) - phase = self.conn.session.get(phase_url, allow_redirects=False).text - logging.debug("Async transfer Phase for url %s: %s " % (url, phase)) + phase = self.conn.session.get(phase_url, + allow_redirects=False).text + logging.debug( + "Async transfer Phase for url %s: %s " % (url, phase)) except KeyboardInterrupt: # abort the job when receiving a Ctrl-C/Interrupt from the client logging.error("Received keyboard interrupt") @@ -1984,41 +2175,53 @@ def get_transfer_error(self, url, uri): raise OSError("UWS status: {0}".format(status), errno.EFAULT) error_url = job_url + "/error" error_message = self.conn.session.get(error_url).text - logger.debug("Got transfer error {0} on URI {1}".format(error_message, uri)) - # Check if the error was that the link type is unsupported and try and follow that link. - target = re.search("Unsupported link target:(?P .*)$", error_message) + logger.debug( + "Got transfer error {0} on URI {1}".format(error_message, uri)) + # Check if the error was that the link type is unsupported and try and + # follow that link. + target = re.search("Unsupported link target:(?P .*)$", + error_message) if target is not None: return target.group('target').strip() raise OSError(error_codes.get(error_message, errno.EFAULT), "{0}: {1}".format(uri, error_message)) def open(self, uri, mode=os.O_RDONLY, view=None, head=False, url=None, - limit=None, next_uri=None, size=None, cutout=None, byte_range=None, + limit=None, next_uri=None, size=None, cutout=None, + byte_range=None, full_negotiation=False, possible_partial_read=False): """Create a VOFile connection to the specified uri or url. :rtype : VOFile - :param uri: The uri of the VOSpace resource to create a connection to, override by specifying url + :param uri: The uri of the VOSpace resource to create a connection to, + override by specifying url :type uri: unicode, None - :param mode: The mode os.O_RDONLY or os.O_WRONLY to open the connection with. + :param mode: The mode os.O_RDONLY or os.O_WRONLY to open the + connection with. :type mode: bit - :param view: The view of the VOSpace resource, one of: default, data, cutout + :param view: The view of the VOSpace resource, one of: default, data, + cutout :type view: unicode, None :param head: Just return the http header of this request. :type head: bool - :param url: Ignore the uri (ie don't look up the url using get_node_url) and just connect to this url + :param url: Ignore the uri (ie don't look up the url using + get_node_url) and just connect to this url :type url: unicode, None - :param limit: limit response from vospace to this many child nodes. relevant for containerNode type + :param limit: limit response from vospace to this many child nodes. + relevant for containerNode type :type limit: int, None - :param next_uri: The uri of the last child node returned by a previous request on a containerNode + :param next_uri: The uri of the last child node returned by a + previous request on a containerNode :type next_uri: unicode, None :param size: The size of file to expect or be put to VOSpace :type size: int, None :param cutout: The cutout pattern to use during a get :type cutout: unicode, None - :param byte_range: The range of bytes to request, rather than getting the entire file. + :param byte_range: The range of bytes to request, rather than getting + the entire file. :type byte_range: unicode, None - :param full_negotiation: force this interaction to use the full UWS interaction to get the url for the resource + :param full_negotiation: force this interaction to use the full UWS + interaction to get the url for the resource :type full_negotiation: bool :param possible_partial_read: """ @@ -2058,20 +2261,25 @@ def open(self, uri, mode=os.O_RDONLY, view=None, head=False, url=None, else: parts = URLParser(target) if parts.scheme == 'vos': - # This is a link to another VOSpace node so lets open that instead. - return self.open(target, mode, view, head, url, limit, - next_uri, size, cutout, byte_range) + # This is a link to another VOSpace node so lets + # open that instead. + return self.open(target, mode, view, head, url, + limit, + next_uri, size, cutout, + byte_range) else: # A target external link # TODO Need a way of passing along authentication. if cutout is not None: - target = "{0}?cutout={1}".format(target, cutout) - return VOFile([target], - self.conn, - method=method, - size=size, - byte_range=byte_range, - possible_partial_read=possible_partial_read) + target = "{0}?cutout={1}".format(target, + cutout) + return VOFile( + [target], + self.conn, + method=method, + size=size, + byte_range=byte_range, + possible_partial_read=possible_partial_read) except OSError as ose: if ose.errno in [2, 404]: pass @@ -2080,30 +2288,35 @@ def open(self, uri, mode=os.O_RDONLY, view=None, head=False, url=None, if url is None: url = self.get_node_url(uri, method=method, view=view, - limit=limit, next_uri=next_uri, cutout=cutout, + limit=limit, next_uri=next_uri, + cutout=cutout, full_negotiation=full_negotiation) if url is None: raise OSError(errno.EREMOTE) - return VOFile(url, self.conn, method=method, size=size, byte_range=byte_range, + return VOFile(url, self.conn, method=method, size=size, + byte_range=byte_range, possible_partial_read=possible_partial_read) def add_props(self, node): """Given a node structure do a POST of the XML to the VOSpace to update the node properties - Makes a new copy of current local state, then gets a copy of what's on the server and + Makes a new copy of current local state, then gets a copy of + what's on the server and then updates server with differences. :param node: the Node object to add some properties to. - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ new_props = copy.deepcopy(node.props) old_props = self.get_node(node.uri, force=True).props for prop in old_props: - if prop in new_props and old_props[prop] == new_props[prop] and old_props[prop] is not None: + if prop in new_props and old_props[prop] == new_props[prop] and \ + old_props[prop] is not None: del (new_props[prop]) node.node = node.create(node.uri, node_type=node.type, properties=new_props) @@ -2119,8 +2332,9 @@ def create(self, uri): :param uri: the Node that we are going to create on the server. :type uri: vos.Node - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ fixed_uri = self.fix_uri(uri) @@ -2129,7 +2343,8 @@ def create(self, uri): url = '{}{}'.format(self.get_endpoints(fixed_uri).nodes, path) data = str(node) size = len(data) - return Node(self.conn.session.put(url, data=data, headers={'size': str(size)}).content) + return Node(self.conn.session.put(url, data=data, + headers={'size': str(size)}).content) def update(self, node, recursive=False): """Updates the node properties on the server. For non-recursive @@ -2138,9 +2353,11 @@ def update(self, node, recursive=False): be changed in the node itself as well as all its children. :param node: the node to update. - :param recursive: should this update be applied to all children? (True/False) - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + :param recursive: should this update be applied to all children? + (True/False) + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ # Let's do this update using the async transfer method @@ -2153,7 +2370,8 @@ def update(self, node, recursive=False): resp = self.conn.session.post(property_url, allow_redirects=False, data=str(node), - headers={'Content-type': 'text/xml'}) + headers={ + 'Content-type': 'text/xml'}) except Exception as ex: logger.error(str(ex)) raise ex @@ -2162,7 +2380,8 @@ def update(self, node, recursive=False): logger.debug("Got prop-update response: {0}".format(resp.content)) transfer_url = resp.headers.get('Location', None) logger.debug("Got job status redirect: {0}".format(transfer_url)) - # logger.debug("Got back %s from $Client.VOPropertiesEndPoint " % (con)) + # logger.debug( + # "Got back %s from $Client.VOPropertiesEndPoint " % (con)) # Start the job self.conn.session.post(transfer_url + "/phase", allow_redirects=False, @@ -2178,12 +2397,14 @@ def update(self, node, recursive=False): def mkdir(self, uri): """ - Create a ContainerNode on the service. Raise OSError(EEXIST) if the container exists. + Create a ContainerNode on the service. Raise OSError(EEXIST) if the + container exists. :param uri: The URI of the ContainerNode to create on the service. :type uri: unicode - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ uri = self.fix_uri(uri) @@ -2196,13 +2417,15 @@ def mkdir(self, uri): if http_error.response.status_code != 409: raise http_error else: - raise OSError(errno.EEXIST, 'ContainerNode {0} already exists'.format(uri)) + raise OSError(errno.EEXIST, + 'ContainerNode {0} already exists'.format(uri)) def delete(self, uri): """Delete the node :param uri: The (Container/Link/Data)Node to delete from the service. - - :raises When a network problem occurs, it raises one of the HttpException exceptions declared in the + + :raises When a network problem occurs, it raises one of the + HttpException exceptions declared in the cadcutils.exceptions module """ uri = self.fix_uri(uri) @@ -2318,9 +2541,11 @@ def access(self, uri, mode=os.O_RDONLY): if mode == os.O_RDONLY: try: self.get_node(uri, limit=0, force=True) - except (exceptions.NotFoundException, exceptions.AlreadyExistsException, - exceptions.UnauthorizedException, exceptions.ForbiddenException): - return False + except (exceptions.NotFoundException, + exceptions.AlreadyExistsException, + exceptions.UnauthorizedException, + exceptions.ForbiddenException): + return False return isinstance(self.open(uri, mode=mode), VOFile) @@ -2329,11 +2554,13 @@ def status(self, uri, code=None): This is done by checking the view=data header and seeing if you get an error. - :param uri: the VOSpace (Container/Link/Data)Node to check access status on. + :param uri: the VOSpace (Container/Link/Data)Node to check access + status on. :param code: NOT SUPPORTED. """ if not code: - raise OSError(errno.ENOSYS, "Use of 'code' option values no longer supported.") + raise OSError(errno.ENOSYS, + "Use of 'code' option values no longer supported.") self.get_node(uri) return True @@ -2342,4 +2569,5 @@ def get_job_status(self, url): :param url: the URL of the UWS job to get status of. :rtype: unicode """ - return VOFile(url, self.conn, method="GET", follow_redirect=False).read() + return VOFile(url, self.conn, method="GET", + follow_redirect=False).read()