diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..a38fcad5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,36 @@ +#**************************************************************** +# Copyright (c) 2016 Larence Livermore National Security (LLNS) +# All rights reserved. +#**************************************************************** +# +# Organization: Lawrence Livermore National Lab (LLNL) +# Matrix: Atmospheric, Earth and Energy Division +# Project: Earth Systems Grid +# Author: Denis Nadeau +# +# Description: +# +# Files that git should ignore... +#**************************************************************** + +TAGS +*~ +*swp +*.cache +*.DS_Store +*.a +*.o +*.class +*.pyc +*.project +*.cproject +*.pydevproject +*.egg +*#* +*.md5 +semantic.cache* +build/ +Makefile +.coverage +covhtml +git.py diff --git a/Lib/__init__.py b/Lib/__init__.py index ff556f8a..cd131167 100644 --- a/Lib/__init__.py +++ b/Lib/__init__.py @@ -1,15 +1,16 @@ """ CDMS module-level API """ + import cdat_info -cdat_info.pingPCMDIdb("cdat","cdms2") +cdat_info.pingPCMDIdb("cdat", "cdms2") from . import git -__all__ = ["cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", \ -"sliceut", "error", "variable", "fvariable", "tvariable", "dataset", \ -"database", "cache", "selectors", "MV2", "convention", "bindex", \ -"auxcoord", "gengrid", "gsHost", "gsStaticVariable", "gsTimeVariable", \ -"mvBaseWriter", "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"] +__all__ = ["cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", + "sliceut", "error", "variable", "fvariable", "tvariable", "dataset", + "database", "cache", "selectors", "MV2", "convention", "bindex", + "auxcoord", "gengrid", "gsHost", "gsStaticVariable", "gsTimeVariable", + "mvBaseWriter", "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"] # Errors from error import CDMSError @@ -29,23 +30,23 @@ # Dataset functions from dataset import createDataset, openDataset, useNetcdf3, \ - getNetcdfClassicFlag, getNetcdfShuffleFlag, getNetcdfDeflateFlag, getNetcdfDeflateLevelFlag,\ - setNetcdfClassicFlag, setNetcdfShuffleFlag, setNetcdfDeflateFlag, setNetcdfDeflateLevelFlag,\ - setNetcdfUseNCSwitchModeFlag,getNetcdfUseNCSwitchModeFlag,\ - setCompressionWarnings,\ - setNetcdf4Flag, getNetcdf4Flag,\ - setNetcdfUseParallelFlag, getNetcdfUseParallelFlag, \ - getMpiRank, getMpiSize + getNetcdfClassicFlag, getNetcdfShuffleFlag, getNetcdfDeflateFlag, getNetcdfDeflateLevelFlag,\ + setNetcdfClassicFlag, setNetcdfShuffleFlag, setNetcdfDeflateFlag, setNetcdfDeflateLevelFlag,\ + setNetcdfUseNCSwitchModeFlag, getNetcdfUseNCSwitchModeFlag,\ + setCompressionWarnings,\ + setNetcdf4Flag, getNetcdf4Flag,\ + setNetcdfUseParallelFlag, getNetcdfUseParallelFlag, \ + getMpiRank, getMpiSize open = openDataset # Database functions from database import connect, Base, Onelevel, Subtree -#Selectors +# Selectors import selectors from selectors import longitude, latitude, time, level, required, \ - longitudeslice, latitudeslice, levelslice, timeslice + longitudeslice, latitudeslice, levelslice, timeslice from avariable import order2index, orderparse, setNumericCompatibility, getNumericCompatibility # TV @@ -65,6 +66,7 @@ except: pass -from restApi import esgfConnection,esgfDataset,FacetConnection +from restApi import esgfConnection, esgfDataset, FacetConnection MV = MV2 + diff --git a/Lib/auxcoord.py b/Lib/auxcoord.py index 24a97e8c..876c0d13 100644 --- a/Lib/auxcoord.py +++ b/Lib/auxcoord.py @@ -94,3 +94,4 @@ def __init__(self, data, typecode=None, copy=0, savespace=0, mask=None, fill_val ## internattr.initialize_internal_attributes(TransientAuxAxis1D) # Copy internal attrs from parents + diff --git a/Lib/avariable.py b/Lib/avariable.py index f15d7a06..b6a24036 100644 --- a/Lib/avariable.py +++ b/Lib/avariable.py @@ -27,7 +27,6 @@ InvalidRegion = "Invalid region: " OutOfRange = "Coordinate interval is out of range or intersection has no data: " NotImplemented = "Child of AbstractVariable failed to implement: " - _numeric_compatibility = False # Backward compatibility with numpy behavior # False: return scalars from 0-D slices # MV axis=None by default @@ -116,7 +115,11 @@ def setNumericCompatibility(mode): def getNumericCompatibility(): return _numeric_compatibility + class AbstractVariable(CdmsObj, Slab): + def info(self, flag=None, device=None): + Slab.info(self, flag, device) + def __init__ (self, parent=None, variableNode=None): """Not to be called by users. variableNode is the variable tree node, if any. @@ -1174,13 +1177,19 @@ def _process_specs (self, specs, keys): """ myrank = self.rank() nsupplied = len(specs) - if Ellipsis in specs: + # numpy will broadcast if we have a new axis in specs + # --------------------------------------------------- + if (numpy.newaxis in specs): + nnewaxis = 1 + else: + nnewaxis = 0 + + if (Ellipsis in specs): nellipses = 1 - elif numpy.newaxis in specs: - raise CDMSError, 'Sorry, you cannot use NewAxis in this context ' + str(specs) else: nellipses = 0 - if nsupplied-nellipses > myrank: + + if nsupplied-nellipses-nnewaxis > myrank: raise CDMSError, InvalidRegion + \ "too many dimensions: %d, for variable %s"%(len(specs),self.id) @@ -1188,7 +1197,7 @@ def _process_specs (self, specs, keys): i = 0 j = 0 while i < nsupplied: - if specs[i] is Ellipsis: + if (specs[i] is Ellipsis) or (specs[i] is numpy.newaxis): j = myrank - (nsupplied - (i+1)) else: speclist[j] = specs[i] diff --git a/Lib/dataset.py b/Lib/dataset.py index 977b1d8b..92e7a241 100644 --- a/Lib/dataset.py +++ b/Lib/dataset.py @@ -1,17 +1,17 @@ -## Automatically adapted for numpy.oldnumeric Aug 01, 2007 by foo -## Further modified to be pure new numpy June 24th 2008 +# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by foo +# Further modified to be pure new numpy June 24th 2008 """ CDMS dataset and file objects""" from error import CDMSError import Cdunif import numpy import cdmsNode -import os, sys +import os +import sys import string import urllib import cdmsURLopener # Import after urllib, to handle errors import urlparse -## import internattr import cdmsobj import re from CDMLParser import CDMLParser @@ -32,7 +32,7 @@ # Default is serial mode until setNetcdfUseParallelFlag(1) is called rk = 0 sz = 1 -Cdunif.CdunifSetNCFLAGS("use_parallel",0) +Cdunif.CdunifSetNCFLAGS("use_parallel", 0) CdMpi = False try: @@ -76,8 +76,32 @@ class DuplicateAxisError(CDMSError): _FilePath = r"([^\s\]\',]+)" # Two file map patterns, _IndexList4 is the original one, _IndexList5 supports # forecast data too... -_IndexList4 = re.compile(_ListStartPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_FilePath+_ListEndPat) -_IndexList5 = re.compile(_ListStartPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_IndexPat+_ListSepPat+_FilePath+_ListEndPat) +_IndexList4 = re.compile( + _ListStartPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _FilePath + + _ListEndPat) +_IndexList5 = re.compile( + _ListStartPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _IndexPat + + _ListSepPat + + _FilePath + + _ListEndPat) _NPRINT = 20 _showCompressWarnings = True @@ -94,48 +118,50 @@ def setCompressionWarnings(value=None): global _showCompressWarnings if value is None: value = not _showCompressWarnings - if isinstance(value,str): - if not value.slower() in ['y','n','yes','no']: + if isinstance(value, str): + if not value.slower() in ['y', 'n', 'yes', 'no']: raise CDMSError("setCompressionWarnings flags must be yes/no or 1/0, or None to invert it") - if value.lower()[0]=='y': + if value.lower()[0] == 'y': value = 1 else: value = 0 - if not isinstance(value, (int,bool)): + if not isinstance(value, (int, bool)): raise CDMSError("setCompressionWarnings flags must be yes/no or 1/0, or None to invert it") - if value in [1,True]: + if value in [1, True]: _showCompressWarnings = True - elif value in [0,False]: + elif value in [0, False]: _showCompressWarnings = False else: raise CDMSError("setCompressionWarnings flags must be yes\/no or 1\/0, or None to invert it") return _showCompressWarnings + def setNetcdfUseNCSwitchModeFlag(value): """ Tells cdms2 to switch constantly between netcdf define/write modes""" - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error UseNCSwitchMode flag must be 1(can use)/0(do not use) or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("use_define_mode",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("use_define_mode", 0) else: - Cdunif.CdunifSetNCFLAGS("use_define_mode",1) + Cdunif.CdunifSetNCFLAGS("use_define_mode", 1) + def setNetcdfUseParallelFlag(value): """ Sets NetCDF classic flag value""" global CdMpi - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error UseParallel flag must be 1(can use)/0(do not use) or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("use_parallel",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("use_parallel", 0) else: - Cdunif.CdunifSetNCFLAGS("use_parallel",1) + Cdunif.CdunifSetNCFLAGS("use_parallel", 1) CdMpi = True if not MPI.Is_initialized(): MPI.Init() - rk = MPI.COMM_WORLD.Get_rank() + def getMpiRank(): ''' Return number of processor available ''' @@ -145,6 +171,7 @@ def getMpiRank(): else: return 0 + def getMpiSize(): if CdMpi: sz = MPI.COMM_WORLD.Get_size() @@ -152,77 +179,91 @@ def getMpiSize(): else: return 1 + def setNetcdf4Flag(value): """ Sets NetCDF classic flag value""" - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error NetCDF4 flag must be 1/0 or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("netcdf4",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("netcdf4", 0) else: - Cdunif.CdunifSetNCFLAGS("netcdf4",1) + Cdunif.CdunifSetNCFLAGS("netcdf4", 1) + -def setNetcdfClassicFlag(value): +def setNetcdfClassicFlag(value): """ Sets NetCDF classic flag value""" - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error NetCDF Classic flag must be 1/0 or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("classic",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("classic", 0) else: - Cdunif.CdunifSetNCFLAGS("classic",1) + Cdunif.CdunifSetNCFLAGS("classic", 1) + -def setNetcdfShuffleFlag(value): +def setNetcdfShuffleFlag(value): """ Sets NetCDF shuffle flag value""" - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error NetCDF Shuffle flag must be 1/0 or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("shuffle",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("shuffle", 0) else: - Cdunif.CdunifSetNCFLAGS("shuffle",1) + Cdunif.CdunifSetNCFLAGS("shuffle", 1) + def setNetcdfDeflateFlag(value): """ Sets NetCDF deflate flag value""" - if value not in [True,False,0,1]: + if value not in [True, False, 0, 1]: raise CDMSError("Error NetCDF deflate flag must be 1/0 or true/False") - if value in [0,False]: - Cdunif.CdunifSetNCFLAGS("deflate",0) + if value in [0, False]: + Cdunif.CdunifSetNCFLAGS("deflate", 0) else: - Cdunif.CdunifSetNCFLAGS("deflate",1) - + Cdunif.CdunifSetNCFLAGS("deflate", 1) + + def setNetcdfDeflateLevelFlag(value): """ Sets NetCDF deflate level flag value""" - if value not in [0,1,2,3,4,5,6,7,8,9]: + if value not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise CDMSError("Error NetCDF deflate_level flag must be an integer < 10") - Cdunif.CdunifSetNCFLAGS("deflate_level",value) + Cdunif.CdunifSetNCFLAGS("deflate_level", value) + def getNetcdfUseNCSwitchModeFlag(): """ Returns NetCDF UseParallel flag value""" return Cdunif.CdunifGetNCFLAGS("use_define_mode") + def getNetcdfUseParallelFlag(): """ Returns NetCDF UseParallel flag value""" return Cdunif.CdunifGetNCFLAGS("use_parallel") + def getNetcdf4Flag(): """ Returns NetCDF4 flag value""" return Cdunif.CdunifGetNCFLAGS("netcdf4") + def getNetcdfClassicFlag(): """ Returns NetCDF classic flag value""" return Cdunif.CdunifGetNCFLAGS("classic") + def getNetcdfShuffleFlag(): """ Returns NetCDF shuffle flag value""" return Cdunif.CdunifGetNCFLAGS("shuffle") + def getNetcdfDeflateFlag(): """ Returns NetCDF deflate flag value""" return Cdunif.CdunifGetNCFLAGS("deflate") + def getNetcdfDeflateLevelFlag(): """ Returns NetCDF deflate level flag value""" return Cdunif.CdunifGetNCFLAGS("deflate_level") + + def useNetcdf3(): - """ Turns off (0) NetCDF flags for shuffle/defalte/defaltelevel + """ Turns off (0) NetCDF flags for shuffle/cuDa/deflatelevel Output files are generated as NetCDF3 Classic after that """ setNetcdfShuffleFlag(0) @@ -231,11 +272,13 @@ def useNetcdf3(): # Create a tree from a file path. # Returns the parse tree root node. + + def load(path): fd = open(path) text = fd.read() fd.close() - p=CDMLParser() + p = CDMLParser() p.feed(text) p.close() return p.getRoot() @@ -244,13 +287,15 @@ def load(path): # URI is of the form scheme://netloc/path;parameters?query#fragment # where fragment may be an XPointer. # Returns the parse tree root node. + + def loadURI(uri): - (scheme,netloc,path,parameters,query,fragment)=urlparse.urlparse(uri) - uripath = urlparse.urlunparse((scheme,netloc,path,'','','')) + (scheme, netloc, path, parameters, query, fragment) = urlparse.urlparse(uri) + uripath = urlparse.urlunparse((scheme, netloc, path, '', '', '')) fd = urllib.urlopen(uripath) text = fd.read() fd.close() - p=CDMLParser() + p = CDMLParser() p.feed(text) p.close() return p.getRoot() @@ -258,14 +303,18 @@ def loadURI(uri): # Create a dataset # 'path' is the XML file name, or netCDF filename for simple file create # 'template' is a string template for the datafile(s), for dataset creation -def createDataset(path,template=None): - return openDataset(path,'w',template) + + +def createDataset(path, template=None): + return openDataset(path, 'w', template) # Open an existing dataset # 'uri' is a Uniform Resource Identifier, referring to a cdunif file, XML file, # or LDAP URL of a catalog dataset entry. # 'mode' is 'r', 'r+', 'a', or 'w' -def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): + + +def openDataset(uri, mode='r', template=None, dods=1, dpath=None, hostObj=None): """ Options::: mode :: (str) ('r') mode to open the file in read/write/append @@ -281,41 +330,42 @@ def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): ::: """ uri = string.strip(uri) - (scheme,netloc,path,parameters,query,fragment)=urlparse.urlparse(uri) - if scheme in ('','file'): + (scheme, netloc, path, parameters, query, fragment) = urlparse.urlparse(uri) + if scheme in ('', 'file'): if netloc: # In case of relative path... path = netloc + path path = os.path.expanduser(path) path = os.path.normpath(os.path.join(os.getcwd(), path)) - root,ext = os.path.splitext(path) - if ext in ['.xml','.cdml']: - if mode!='r': raise ModeNotSupported(mode) + root, ext = os.path.splitext(path) + if ext in ['.xml', '.cdml']: + if mode != 'r': + raise ModeNotSupported(mode) datanode = load(path) else: # If the doesn't exist allow it to be created - ##Ok mpi has issues with bellow we need to test this only with 1 rank + # Ok mpi has issues with bellow we need to test this only with 1 rank if not os.path.exists(path): - return CdmsFile(path,mode,mpiBarrier=CdMpi) - elif mode=="w": + return CdmsFile(path, mode, mpiBarrier=CdMpi) + elif mode == "w": try: os.remove(path) except: pass - return CdmsFile(path,mode,mpiBarrier=CdMpi) - + return CdmsFile(path, mode, mpiBarrier=CdMpi) + # The file exists - file1 = CdmsFile(path,"r") + file1 = CdmsFile(path, "r") if libcf is not None: if hasattr(file1, libcf.CF_FILETYPE): if getattr(file1, libcf.CF_FILETYPE) == libcf.CF_GLATT_FILETYPE_HOST: file = gsHost.open(path, mode) - elif mode=='r' and hostObj is None: + elif mode == 'r' and hostObj is None: # helps performance on machines where file open (in CdmsFile) is costly file = file1 else: - file = CdmsFile(path, mode, hostObj = hostObj) + file = CdmsFile(path, mode, hostObj=hostObj) file1.close() else: file1.close() @@ -325,17 +375,18 @@ def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): file1.close() return CdmsFile(path, mode) elif scheme in ['http', 'gridftp']: - + if (dods): - if mode!='r': raise ModeNotSupported(mode) + if mode != 'r': + raise ModeNotSupported(mode) # DODS file? try: - file = CdmsFile(uri,mode) + file = CdmsFile(uri, mode) return file - except Exception,err: - msg = "Error in DODS open of: "+uri - if os.path.exists(os.path.join(os.path.expanduser("~"),".dodsrc")): - msg+="\nYou have a .dodsrc in your HOME directory, try to remove it" + except Exception: + msg = "Error in DODS open of: " + uri + if os.path.exists(os.path.join(os.path.expanduser("~"), ".dodsrc")): + msg += "\nYou have a .dodsrc in your HOME directory, try to remove it" raise CDMSError(msg) else: try: @@ -343,8 +394,8 @@ def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): return datanode except: datanode = loadURI(uri) - raise CDMSError("Error in loadURI of: "+uri) - + raise CDMSError("Error in loadURI of: " + uri) + else: raise SchemeNotSupported(scheme) @@ -358,13 +409,13 @@ def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): # Note: In general, dset.datapath is relative to the URL of the # enclosing database, but here the database is null, so the # datapath should be absolute. - if dpath==None: + if dpath is None: direc = datanode.getExternalAttr('directory') head = os.path.dirname(path) if direc and os.path.isabs(direc): dpath = direc elif direc: - dpath = os.path.join(head,direc) + dpath = os.path.join(head, direc) else: dpath = head @@ -372,6 +423,8 @@ def openDataset(uri,mode='r',template=None,dods=1,dpath=None, hostObj=None): return dataset # Functions for parsing the file map. + + def parselist(text, f): """Parse a string of the form [A, A, ...]. f is a function which parses A and returns (A, nconsumed) @@ -380,13 +433,13 @@ def parselist(text, f): n = 0 m = _ListStart.match(text) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[0:_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT]) result = [] n += m.end() s, nconsume = f(text[n:]) result.append(s) n += nconsume - while 1: + while True: m = _ListSep.match(text[n:]) if m is None: break @@ -397,10 +450,11 @@ def parselist(text, f): n += nconsume m = _ListEnd.match(text[n:]) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[n:n+_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT]) n += m.end() return result, n + def parseIndexList(text): """Parse a string of the form [i,j,k,l,...,path] where i,j,k,l,... are indices or '-', and path is a filename. @@ -412,45 +466,48 @@ def parseIndexList(text): m = _IndexList5.match(text) nindices = 5 if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[0:_NPRINT]) - result = [None]*(nindices+1) + raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT]) + result = [None] * (nindices + 1) for i in range(nindices): - s = m.group(i+1) - if s!='-': + s = m.group(i + 1) + if s != '-': result[i] = string.atoi(s) - result[nindices] = m.group(nindices+1) + result[nindices] = m.group(nindices + 1) return result, m.end() + def parseName(text): m = _Name.match(text) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[0:_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT]) return m.group(), m.end() + def parseVarMap(text): """Parse a string of the form [ namelist, slicelist ]""" n = 0 m = _ListStart.match(text) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[0:_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT]) result = [] n += m.end() - s, nconsume = parselist(text[n:],parseName) + s, nconsume = parselist(text[n:], parseName) result.append(s) n += nconsume m = _ListSep.match(text[n:]) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[n:n+_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT]) n += m.end() s, nconsume = parselist(text[n:], parseIndexList) result.append(s) n += nconsume m = _ListEnd.match(text[n:]) if m is None: - raise CDMSError("Parsing cdms_filemap near "+text[n:n+_NPRINT]) + raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT]) n += m.end() return result, n + def parseFileMap(text): """Parse a CDMS filemap. having the form: filemap :== [ varmap, varmap, ...] @@ -460,19 +517,21 @@ def parseFileMap(text): indexlist :== [i,j,k,l,path] """ result, n = parselist(text, parseVarMap) - if n grid - (scheme,netloc,xmlpath,parameters,query,fragment)=urlparse.urlparse(uri) + # Gridmap:(latname,lonname,order,maskname,gridclass) => grid + (scheme, netloc, xmlpath, parameters, query, fragment) = urlparse.urlparse(uri) self._xmlpath_ = xmlpath # Dictionary of dictionaries, keyed on node tags - self.dictdict = {'variable':self.variables, - 'axis':self.axes, - 'rectGrid':self.grids, - 'curveGrid':self.grids, - 'genericGrid':self.grids, - 'xlink':self.xlinks - } + self.dictdict = {'variable': self.variables, + 'axis': self.axes, + 'rectGrid': self.grids, + 'curveGrid': self.grids, + 'genericGrid': self.grids, + 'xlink': self.xlinks + } # Dataset IDs are external, so may not have been defined yet. - if not hasattr(self,'id'): - self.id='' + if not hasattr(self, 'id'): + self.id = '' self._status_ = 'open' self._convention_ = convention.getDatasetConvention(self) @@ -520,7 +579,7 @@ def __init__(self, uri, mode, datasetNode=None, parent=None, datapath=None): coordsaux = self._convention_.getDsetnodeAuxAxisIds(datasetNode) for node in datasetNode.getIdDict().values(): - if node.tag=='variable': + if node.tag == 'variable': if node.id in coordsaux: if node.getDomain().getChildCount() == 1: obj = DatasetAuxAxis1D(self, node.id, node) @@ -528,22 +587,22 @@ def __init__(self, uri, mode, datasetNode=None, parent=None, datapath=None): obj = DatasetAxis2D(self, node.id, node) else: obj = DatasetVariable(self, node.id, node) - self.variables[node.id]=obj - elif node.tag=='axis': - obj = Axis(self,node) - self.axes[node.id]=obj - elif node.tag=='rectGrid': - obj = RectGrid(self,node) - self.grids[node.id]=obj - elif node.tag=='xlink': + self.variables[node.id] = obj + elif node.tag == 'axis': + obj = Axis(self, node) + self.axes[node.id] = obj + elif node.tag == 'rectGrid': + obj = RectGrid(self, node) + self.grids[node.id] = obj + elif node.tag == 'xlink': obj = Xlink(node) - self.xlinks[node.id]=obj + self.xlinks[node.id] = obj else: dict = self.dictdict.get(node.tag) if dict is not None: - dict[node.id]=node + dict[node.id] = node else: - self.dictdict[node.tag] = {node.id:node} + self.dictdict[node.tag] = {node.id: node} # Initialize grid domains for grid in self.grids.values(): @@ -574,31 +633,31 @@ def __init__(self, uri, mode, datasetNode=None, parent=None, datapath=None): else: grid = self._gridmap_.get(gridkey) if grid is None: - if hasattr(var,'grid_type'): + if hasattr(var, 'grid_type'): gridtype = var.grid_type else: gridtype = "generic" candidateBasename = None if gridkey[4] == 'rectGrid': - gridshape = (len(lat),len(lon)) + gridshape = (len(lat), len(lon)) elif gridkey[4] == 'curveGrid': gridshape = lat.shape elif gridkey[4] == 'genericGrid': gridshape = lat.shape - candidateBasename = 'grid_%d'%gridshape + candidateBasename = 'grid_%d' % gridshape else: - gridshape = (len(lat),len(lon)) + gridshape = (len(lat), len(lon)) if candidateBasename is None: - candidateBasename = 'grid_%dx%d'%gridshape - if not self.grids.has_key(candidateBasename): + candidateBasename = 'grid_%dx%d' % gridshape + if candidateBasename not in self.grids: gridname = candidateBasename else: foundname = 0 - for i in range(97,123): # Lower-case letters - candidateName = candidateBasename+'_'+chr(i) - if not self.grids.has_key(candidateName): + for i in range(97, 123): # Lower-case letters + candidateName = candidateBasename + '_' + chr(i) + if candidateName not in self.grids: gridname = candidateName foundname = 1 break @@ -606,11 +665,11 @@ def __init__(self, uri, mode, datasetNode=None, parent=None, datapath=None): if not foundname: print 'Warning: cannot generate a grid for variable', var.id continue - + # Create the grid if gridkey[4] == 'rectGrid': node = cdmsNode.RectGridNode(gridname, lat.id, lon.id, gridtype, gridkey[2]) - grid = RectGrid(self,node) + grid = RectGrid(self, node) grid.initDomain(self.axes, self.variables) elif gridkey[4] == 'curveGrid': grid = DatasetCurveGrid(lat, lon, gridname, self) @@ -651,34 +710,34 @@ def __init__(self, uri, mode, datasetNode=None, parent=None, datapath=None): # but now there _may_ be an additional item before path... for varm1 in varmap: tstart, tend, levstart, levend = varm1[0:4] - if (len(varm1)>=6): + if (len(varm1) >= 6): forecast = varm1[4] else: forecast = None path = varm1[-1] self._filemap_[(varname, tstart, levstart, forecast)] = path if tstart is not None: - timemap[(tstart, tend)] = 1 # Collect unique (tstart, tend) tuples + timemap[(tstart, tend)] = 1 # Collect unique (tstart, tend) tuples if levstart is not None: levmap[(levstart, levend)] = 1 if forecast is not None: - fcmap[(forecast,forecast)] = 1 + fcmap[(forecast, forecast)] = 1 tkeys = timemap.keys() - if len(tkeys)>0: + if len(tkeys) > 0: tkeys.sort() tpart = map(lambda x: list(x), tkeys) else: tpart = None levkeys = levmap.keys() - if len(levkeys)>0: + if len(levkeys) > 0: levkeys.sort() levpart = map(lambda x: list(x), levkeys) else: levpart = None fckeys = fcmap.keys() - if len(fckeys)>0: + if len(fckeys) > 0: fckeys.sort() - if self.variables.has_key(varname): + if varname in self.variables: self.variables[varname]._varpart_ = [tpart, levpart] def getConvention(self): @@ -686,7 +745,7 @@ def getConvention(self): return self._convention_ # Get a dictionary of objects with the given tag - def getDictionary(self,tag): + def getDictionary(self, tag): return self.dictdict[tag] # Synchronize writes with data/metadata files @@ -707,24 +766,24 @@ def close(self): self.parent = None self._status_ = 'closed' -## Note: Removed to allow garbage collection of reference cycles -## def __del__(self): -## if cdmsobj._debug==1: -## print 'Deleting dataset',self.id -## self.close() - +# Note: Removed to allow garbage collection of reference cycles +# def __del__(self): +# if cdmsobj._debug==1: +# print 'Deleting dataset',self.id +# self.close() + # Create an axis # 'name' is the string name of the Axis # 'ar' is the 1-D data array, or None for an unlimited axis # Return an axis object. - def createAxis(self,name,ar): + def createAxis(self, name, ar): pass # Create an implicit rectilinear grid. lat, lon, and mask are objects. # order and type are strings - def createRectGrid(self,id, lat, lon, order, type="generic", mask=None): + def createRectGrid(self, id, lat, lon, order, type="generic", mask=None): node = cdmsNode.RectGridNode(id, lat.id, lon.id, type, order, mask.id) - grid = RectGrid(self,node) + grid = RectGrid(self, node) grid.initDomain(self.axes, self.variables) self.grids[grid.id] = grid # self._gridmap_[gridkey] = grid @@ -734,31 +793,31 @@ def createRectGrid(self,id, lat, lon, order, type="generic", mask=None): # 'datatype' is a CDMS datatype # 'axisnames' is a list of axes or grids # Return a variable object. - def createVariable(self,name,datatype,axisnames): + def createVariable(self, name, datatype, axisnames): pass # Search for a pattern in a string-valued attribute. If attribute is None, # search all string attributes. If tag is 'dataset', just check the dataset, # else check all nodes in the dataset of class type matching the tag. If tag # is None, search the dataset and all objects contained in it. - def searchPattern(self,pattern,attribute,tag): + def searchPattern(self, pattern, attribute, tag): resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('dataset',None): - if self.searchone(pattern,attribute)==1: + if tag in ('dataset', None): + if self.searchone(pattern, attribute) == 1: resultlist = [self] else: resultlist = [] if tag is None: for dict in self.dictdict.values(): for obj in dict.values(): - if obj.searchone(pattern,attribute): + if obj.searchone(pattern, attribute): resultlist.append(obj) - elif tag!='dataset': + elif tag != 'dataset': dict = self.dictdict[tag] for obj in dict.values(): - if obj.searchone(pattern,attribute): + if obj.searchone(pattern, attribute): resultlist.append(obj) return resultlist @@ -766,24 +825,24 @@ def searchPattern(self,pattern,attribute,tag): # search all string attributes. If tag is 'dataset', just check the dataset, # else check all nodes in the dataset of class type matching the tag. If tag # is None, search the dataset and all objects contained in it. - def matchPattern(self,pattern,attribute,tag): + def matchPattern(self, pattern, attribute, tag): resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('dataset',None): - if self.matchone(pattern,attribute)==1: + if tag in ('dataset', None): + if self.matchone(pattern, attribute) == 1: resultlist = [self] else: resultlist = [] if tag is None: for dict in self.dictdict.values(): for obj in dict.values(): - if obj.matchone(pattern,attribute): + if obj.matchone(pattern, attribute): resultlist.append(obj) - elif tag!='dataset': + elif tag != 'dataset': dict = self.dictdict[tag] for obj in dict.values(): - if obj.matchone(pattern,attribute): + if obj.matchone(pattern, attribute): resultlist.append(obj) return resultlist @@ -794,13 +853,13 @@ def matchPattern(self,pattern,attribute,tag): # If 'variable', 'axis', etc., it is applied only to that type of object # in the dataset. If None, it is applied to all objects, including # the dataset itself. - def searchPredicate(self,predicate,tag): + def searchPredicate(self, predicate, tag): resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('dataset',None): + if tag in ('dataset', None): try: - if apply(predicate,(self,))==1: + if predicate(*(self,)) == 1: resultlist.append(self) except AttributeError: pass @@ -808,15 +867,15 @@ def searchPredicate(self,predicate,tag): for dict in self.dictdict.values(): for obj in dict.values(): try: - if apply(predicate,(obj,))==1: + if predicate(*(obj,)) == 1: resultlist.append(obj) except AttributeError: pass - elif tag!="dataset": + elif tag != "dataset": dict = self.dictdict[tag] for obj in dict.values(): try: - if apply(predicate,(obj,))==1: + if predicate(*(obj,)) == 1: resultlist.append(obj) except: pass @@ -828,8 +887,7 @@ def getPaths(self): for var in self.variables.values(): for path, stuple in var.getPaths(): pathdict[path] = 1 - result = pathdict.keys() - result.sort() + result = sorted(pathdict.keys()) return result # Open a data file associated with this dataset. @@ -839,33 +897,35 @@ def openFile(self, filename, mode): # Opened via a local XML file? if self.parent is None: - path = os.path.join(self.datapath,filename) - if cdmsobj._debug==1: - sys.stdout.write(path+'\n'); sys.stdout.flush() - f = Cdunif.CdunifFile(path,mode) + path = os.path.join(self.datapath, filename) + if cdmsobj._debug == 1: + sys.stdout.write(path + '\n') + sys.stdout.flush() + f = Cdunif.CdunifFile(path, mode) return f # Opened via a database else: dburls = self.parent.url - if type(dburls)!=type([]): + if not isinstance(dburls, type([])): dburls = [dburls] # Try first to open as a local file for dburl in dburls: if os.path.isabs(self.directory): - fileurl = os.path.join(self.directory,filename) + fileurl = os.path.join(self.directory, filename) else: try: - fileurl = os.path.join(dburl,self.datapath,filename) + fileurl = os.path.join(dburl, self.datapath, filename) except: - print 'Error joining',`dburl`,self.datapath,filename + print 'Error joining', repr(dburl), self.datapath, filename raise - (scheme,netloc,path,parameters,query,fragment)=urlparse.urlparse(fileurl) - if scheme in ['file',''] and os.path.isfile(path): - if cdmsobj._debug==1: - sys.stdout.write(fileurl+'\n'); sys.stdout.flush() - f = Cdunif.CdunifFile(path,mode) + (scheme, netloc, path, parameters, query, fragment) = urlparse.urlparse(fileurl) + if scheme in ['file', ''] and os.path.isfile(path): + if cdmsobj._debug == 1: + sys.stdout.write(fileurl + '\n') + sys.stdout.flush() + f = Cdunif.CdunifFile(path, mode) return f # See if request manager is being used for file transfer @@ -879,28 +939,28 @@ def openFile(self, filename, mode): fileDN = (self.uri, filename) path = cache.getFile(filename, fileDN, lcpath=lcpath, userid=db.userid, useReplica=db.useReplica) try: - f = Cdunif.CdunifFile(path,mode) + f = Cdunif.CdunifFile(path, mode) except: # Try again, in case another process clobbered this file - path = cache.getFile(fileurl,fileDN) - f = Cdunif.CdunifFile(path,mode) + path = cache.getFile(fileurl, fileDN) + f = Cdunif.CdunifFile(path, mode) return f # Try to read via FTP: for dburl in dburls: - fileurl = os.path.join(dburl,self.datapath,filename) - (scheme,netloc,path,parameters,query,fragment)=urlparse.urlparse(fileurl) - if scheme=='ftp': + fileurl = os.path.join(dburl, self.datapath, filename) + (scheme, netloc, path, parameters, query, fragment) = urlparse.urlparse(fileurl) + if scheme == 'ftp': cache = self.parent.enableCache() - fileDN = (self.uri, filename) # Global file name - path = cache.getFile(fileurl,fileDN) + fileDN = (self.uri, filename) # Global file name + path = cache.getFile(fileurl, fileDN) try: - f = Cdunif.CdunifFile(path,mode) + f = Cdunif.CdunifFile(path, mode) except: # Try again, in case another process clobbered this file - path = cache.getFile(fileurl,fileDN) - f = Cdunif.CdunifFile(path,mode) + path = cache.getFile(fileurl, fileDN) + f = Cdunif.CdunifFile(path, mode) return f # File not found @@ -913,9 +973,9 @@ def getLogicalCollectionDN(self, base=None): if hasattr(self, "lc"): dn = self.lc else: - dn = "lc=%s"%self.id + dn = "lc=%s" % self.id if base is not None: - dn = "%s,%s"%(dn,base) + dn = "%s,%s" % (dn, base) return dn def getVariable(self, id): @@ -927,7 +987,14 @@ def getVariables(self, spatial=0): axes defined on latitude or longitude, excluding weights and bounds.""" retval = self.variables.values() if spatial: - retval = filter(lambda x: x.id[0:7]!="bounds_" and x.id[0:8]!="weights_" and ((x.getLatitude() is not None) or (x.getLongitude() is not None) or (x.getLevel() is not None)), retval) + retval = filter( + lambda x: x.id[ + 0:7] != "bounds_" and x.id[ + 0:8] != "weights_" and ( + (x.getLatitude() is not None) or ( + x.getLongitude() is not None) or ( + x.getLevel() is not None)), + retval) return retval def getAxis(self, id): @@ -939,38 +1006,40 @@ def getGrid(self, id): return self.grids.get(id) def __repr__(self): - return ""%(self.id, self.uri, self.mode, self._status_) - -## internattr.add_internal_attribute (Dataset, 'datapath', -## 'variables', -## 'axes', -## 'grids', -## 'xlinks', -## 'dictdict', -## 'default_variable_name', -## 'parent', -## 'uri', -## 'mode') + return "" % (self.id, self.uri, self.mode, self._status_) + +# internattr.add_internal_attribute (Dataset, 'datapath', +# 'variables', +# 'axes', +# 'grids', +# 'xlinks', +# 'dictdict', +# 'default_variable_name', +# 'parent', +# 'uri', +# 'mode') + class CdmsFile(CdmsObj, cuDataset): - def __init__(self, path, mode, hostObj = None, mpiBarrier=False): + + def __init__(self, path, mode, hostObj=None, mpiBarrier=False): if mpiBarrier: MPI.COMM_WORLD.Barrier() CdmsObj.__init__(self, None) cuDataset.__init__(self) - value = self.__cdms_internals__+['datapath', - 'variables', - 'axes', - 'grids', - 'xlinks', - 'dictdict', - 'default_variable_name', - 'id', - 'uri', - 'parent', - 'mode'] + value = self.__cdms_internals__ + ['datapath', + 'variables', + 'axes', + 'grids', + 'xlinks', + 'dictdict', + 'default_variable_name', + 'id', + 'uri', + 'parent', + 'mode'] self.___cdms_internals__ = value self.id = path if "://" in path: @@ -979,14 +1048,14 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): self.uri = "file://" + os.path.abspath(os.path.expanduser(path)) self._mode_ = mode try: - if mode[0].lower()=="w": + if mode[0].lower() == "w": try: os.remove(path) except: pass - _fileobj_ = Cdunif.CdunifFile (path, mode) - except Exception,err: - raise CDMSError('Cannot open file %s (%s)'%(path,err)) + _fileobj_ = Cdunif.CdunifFile(path, mode) + except Exception as err: + raise CDMSError('Cannot open file %s (%s)' % (path, err)) self._file_ = _fileobj_ # Cdunif file object self.variables = {} self.axes = {} @@ -994,30 +1063,30 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): self.xlinks = {} self._gridmap_ = {} - # self.attributes returns the Cdunif file dictionary. -## self.replace_external_attributes(self._file_.__dict__) - for att in self._file_.__dict__.keys(): - self.__dict__.__setitem__(att,self._file_.__dict__[att]) - self.attributes[att]=self._file_.__dict__[att] + # self.attributes returns the Cdunif file dictionary. +# self.replace_external_attributes(self._file_.__dict__) + for att in self._file_.__dict__.keys(): + self.__dict__.__setitem__(att, self._file_.__dict__[att]) + self.attributes[att] = self._file_.__dict__[att] self._boundAxis_ = None # Boundary axis for cell vertices - if self._mode_=='w': + if self._mode_ == 'w': self.Conventions = convention.CFConvention.current self._status_ = 'open' self._convention_ = convention.getDatasetConvention(self) try: - + # A mosaic variable with coordinates attached, but the coordinate variables reside in a # different file. Add the coordinate variables to the mosaic variables list. - if not hostObj is None: + if hostObj is not None: for name in self._file_.variables.keys(): if 'coordinates' in dir(self._file_.variables[name]): coords = self._file_.variables[name].coordinates.split() for coord in coords: - if not coord in self._file_.variables.keys(): + if coord not in self._file_.variables.keys(): cdunifvar = Cdunif.CdunifFile(hostObj.gridVars[coord][0], mode) self._file_.variables[coord] = cdunifvar.variables[coord] - + # Get lists of 1D and auxiliary coordinate axes coords1d = self._convention_.getAxisIds(self._file_.variables) coordsaux = self._convention_.getAxisAuxIds(self._file_.variables, coords1d) @@ -1029,12 +1098,12 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): if name in coordsaux: # Put auxiliary coordinate axes with variables, since there may be # a dimension with the same name. - if len(cdunifvar.shape)==2: + if len(cdunifvar.shape) == 2: self.variables[name] = FileAxis2D(self, name, cdunifvar) else: self.variables[name] = FileAuxAxis1D(self, name, cdunifvar) else: - self.variables[name] = FileVariable(self,name,cdunifvar) + self.variables[name] = FileVariable(self, name, cdunifvar) # Build axis list for name in self._file_.dimensions.keys(): @@ -1044,7 +1113,7 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): cdunifvar = self._file_.variables[name] else: cdunifvar = None - self.axes[name] = FileAxis(self,name,cdunifvar) + self.axes[name] = FileAxis(self, name, cdunifvar) # Attach boundary variables for name in coordsaux: @@ -1052,7 +1121,12 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): bounds = self._convention_.getVariableBounds(self, var) var.setBounds(bounds) - self.dictdict = {'variable':self.variables, 'axis':self.axes, 'rectGrid':self.grids, 'curveGrid':self.grids, 'genericGrid':self.grids} + self.dictdict = { + 'variable': self.variables, + 'axis': self.axes, + 'rectGrid': self.grids, + 'curveGrid': self.grids, + 'genericGrid': self.grids} # Initialize variable domains for var in self.variables.values(): @@ -1072,31 +1146,31 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): grid = self._gridmap_.get(gridkey) if grid is None: - if hasattr(var,'grid_type'): + if hasattr(var, 'grid_type'): gridtype = var.grid_type else: gridtype = "generic" candidateBasename = None if gridkey[4] == 'rectGrid': - gridshape = (len(lat),len(lon)) + gridshape = (len(lat), len(lon)) elif gridkey[4] == 'curveGrid': gridshape = lat.shape elif gridkey[4] == 'genericGrid': gridshape = lat.shape - candidateBasename = 'grid_%d'%gridshape + candidateBasename = 'grid_%d' % gridshape else: - gridshape = (len(lat),len(lon)) + gridshape = (len(lat), len(lon)) if candidateBasename is None: - candidateBasename = 'grid_%dx%d'%gridshape - if not self.grids.has_key(candidateBasename): + candidateBasename = 'grid_%dx%d' % gridshape + if candidateBasename not in self.grids: gridname = candidateBasename else: foundname = 0 - for i in range(97,123): # Lower-case letters - candidateName = candidateBasename+'_'+chr(i) - if not self.grids.has_key(candidateName): + for i in range(97, 123): # Lower-case letters + candidateName = candidateBasename + '_' + chr(i) + if candidateName not in self.grids: gridname = candidateName foundname = 1 break @@ -1109,11 +1183,11 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): if gridkey[4] == 'rectGrid': grid = FileRectGrid(self, gridname, lat, lon, gridkey[2], gridtype) else: - if gridkey[3]!='': - if self.variables.has_key(gridkey[3]): + if gridkey[3] != '': + if gridkey[3] in self.variables: maskvar = self.variables[gridkey[3]] else: - print 'Warning: mask variable %s not found'%gridkey[3] + print 'Warning: mask variable %s not found' % gridkey[3] maskvar = None else: maskvar = None @@ -1131,45 +1205,36 @@ def __init__(self, path, mode, hostObj = None, mpiBarrier=False): raise # setattr writes external global attributes to the file - def __setattr__ (self, name, value): -## s = self.get_property_s(name) -## if s is not None: -## print '....handler' -## s(self, name, value) -## return - self.__dict__[name] = value #attributes kept in sync w/file - if not name in self.__cdms_internals__ and name[0]!='_': + def __setattr__(self, name, value): + self.__dict__[name] = value # attributes kept in sync w/file + if name not in self.__cdms_internals__ and name[0] != '_': setattr(self._file_, name, value) - self.attributes[name]=value - -## # getattr reads external global attributes from the file -## def __getattr__ (self, name): -## ## g = self.get_property_g(name) -## ## if g is not None: -## ## return g(self, name) -## if name in self.__cdms_internals__: -## try: -## return self.__dict__[name] -## except KeyError: -## raise AttributeError("%s instance has no attribute %s." % \ -## (self.__class__.__name__, name)) -## else: -## return getattr(self._file_,name) + self.attributes[name] = value + +# getattr reads external global attributes from the file +# def __getattr__ (self, name): +# g = self.get_property_g(name) +# if g is not None: +# return g(self, name) +# if name in self.__cdms_internals__: +# try: +# return self.__dict__[name] +# except KeyError: +# raise AttributeError("%s instance has no attribute %s." % \ +# (self.__class__.__name__, name)) +# else: +# return getattr(self._file_,name) # delattr deletes external global attributes in the file def __delattr__(self, name): -## d = self.get_property_d(name) -## if d is not None: -## d(self, name) -## return try: del self.__dict__[name] except KeyError: - raise AttributeError("%s instance has no attribute %s." % \ - (self.__class__.__name__, name)) - if not name in self.__cdms_internals__: + raise AttributeError("%s instance has no attribute %s." % + (self.__class__.__name__, name)) + if name not in self.__cdms_internals__: delattr(self._file_, name) - if( name in self.attributes.keys() ): + if(name in self.attributes.keys()): del(self.attributes[name]) def sync(self): @@ -1179,12 +1244,12 @@ def sync(self): None :: (None) (0) yep ::: """ - if self._status_=="closed": + if self._status_ == "closed": raise CDMSError(FileWasClosed + self.id) self._file_.sync() def close(self): - if self._status_=="closed": + if self._status_ == "closed": return if hasattr(self, 'dictdict'): for dict in self.dictdict.values(): @@ -1195,21 +1260,21 @@ def close(self): self._file_.close() self._status_ = 'closed' -## Note: Removed to allow garbage collection of reference cycles -## def __del__(self): -## if cdmsobj._debug==1: -## print 'Deleting file',self.id -## # If the object has been deallocated due to open error, -## # it will not have an attribute .dictdict -## if hasattr(self,"dictdict") and self.dictdict != {}: -## self.close() +# Note: Removed to allow garbage collection of reference cycles +# def __del__(self): +# if cdmsobj._debug==1: +# print 'Deleting file',self.id +# If the object has been deallocated due to open error, +# it will not have an attribute .dictdict +# if hasattr(self,"dictdict") and self.dictdict != {}: +# self.close() # Create an axis # 'name' is the string name of the Axis # 'ar' is the 1-D data array, or None for an unlimited axis # Set unlimited to true to designate the axis as unlimited # Return an axis object. - def createAxis(self,name,ar,unlimited=0): + def createAxis(self, name, ar, unlimited=0): """ Create an axis 'name' is the string name of the Axis @@ -1228,36 +1293,36 @@ def createAxis(self,name,ar,unlimited=0): axis :: (cdms2.axis.FileAxis) (0) file axis whose id is name ::: """ - if self._status_=="closed": + if self._status_ == "closed": raise CDMSError(FileWasClosed + self.id) cufile = self._file_ - if ar is None or (unlimited==1 and getNetcdfUseParallelFlag()==0): - cufile.createDimension(name,None) + if ar is None or (unlimited == 1 and getNetcdfUseParallelFlag() == 0): + cufile.createDimension(name, None) if ar is None: typecode = numpy.float else: typecode = ar.dtype.char else: - cufile.createDimension(name,len(ar)) + cufile.createDimension(name, len(ar)) typecode = ar.dtype.char # Compatibility: revert to old typecode for cdunif typecode = typeconv.oldtypecodes[typecode] - cuvar = cufile.createVariable(name,typecode,(name,)) + cuvar = cufile.createVariable(name, typecode, (name,)) # Cdunif should really create this extra dimension info: # (units,typecode,filename,varname_local,dimension_type,ncid) - cufile.dimensioninfo[name] = ('',typecode,name,'','global',-1) + cufile.dimensioninfo[name] = ('', typecode, name, '', 'global', -1) # Note: like netCDF-3, cdunif does not support 64-bit integers. # If ar has dtype int64 on a 64-bit machine, cuvar will be a 32-bit int, # and ar must be downcast. if ar is not None: - if ar.dtype.char!='l': + if ar.dtype.char != 'l': cuvar[0:len(ar)] = numpy.ma.filled(ar) else: cuvar[0:len(ar)] = numpy.ma.filled(ar).astype(cuvar.typecode()) - axis = FileAxis(self,name,cuvar) + axis = FileAxis(self, name, cuvar) self.axes[name] = axis return axis @@ -1274,17 +1339,17 @@ def createVirtualAxis(self, name, axislen): ::: Input::: name :: (str) (0) dimension name - axislen :: (int) (1) + axislen :: (int) (1) ::: Output::: axis :: (cdms2.axis.FileVirtualAxis) (0) file axis whose id is name ::: """ - if self._status_=="closed": + if self._status_ == "closed": raise CDMSError(FileWasClosed + self.id) cufile = self._file_ cufile.createDimension(name, axislen) - cufile.dimensioninfo[name] = ('','f',name,'','global',-1) + cufile.dimensioninfo[name] = ('', 'f', name, '', 'global', -1) axis = FileVirtualAxis(self, name, axislen) self.axes[name] = axis return axis @@ -1307,24 +1372,25 @@ def copyAxis(self, axis, newname=None, unlimited=0, index=None, extbounds=None): axis :: (cdms2.axis.FileAxis/cdms2.axis.FileVirtualAxis) (0) copy of input axis ::: """ - if newname is None: newname=axis.id + if newname is None: + newname = axis.id # If the axis already exists and has the same values, return existing - if self.axes.has_key(newname): + if newname in self.axes: newaxis = self.axes[newname] if newaxis.isVirtual(): - if len(axis)!=len(newaxis): - raise DuplicateAxisError(DuplicateAxis+newname) - elif unlimited==0 or (unlimited==1 and getNetcdfUseParallelFlag()!=0): - if len(axis)!=len(newaxis) or numpy.alltrue(numpy.less(numpy.absolute(newaxis[:]-axis[:]),1.e-5))==0: - raise DuplicateAxisError(DuplicateAxis+newname) + if len(axis) != len(newaxis): + raise DuplicateAxisError(DuplicateAxis + newname) + elif unlimited == 0 or (unlimited == 1 and getNetcdfUseParallelFlag() != 0): + if len(axis) != len(newaxis) or numpy.alltrue(numpy.less(numpy.absolute(newaxis[:] - axis[:]), 1.e-5)) == 0: + raise DuplicateAxisError(DuplicateAxis + newname) else: if index is None: - isoverlap, index = isOverlapVector(axis[:],newaxis[:]) + isoverlap, index = isOverlapVector(axis[:], newaxis[:]) else: isoverlap = 1 if isoverlap: - newaxis[index:index+len(axis)] = axis[:] + newaxis[index:index + len(axis)] = axis[:] if extbounds is None: axisBounds = axis.getBounds() else: @@ -1332,7 +1398,7 @@ def copyAxis(self, axis, newname=None, unlimited=0, index=None, extbounds=None): if axisBounds is not None: newaxis.setBounds(axisBounds) else: - raise DuplicateAxisError(DuplicateAxis+newname) + raise DuplicateAxisError(DuplicateAxis + newname) elif axis.isVirtual(): newaxis = self.createVirtualAxis(newname, len(axis)) @@ -1347,8 +1413,8 @@ def copyAxis(self, axis, newname=None, unlimited=0, index=None, extbounds=None): else: boundsid = None newaxis.setBounds(bounds, persistent=1, boundsid=boundsid) - for attname,attval in axis.attributes.items(): - if attname not in ["datatype", "id","length","isvar","name_in_file","partition"]: + for attname, attval in axis.attributes.items(): + if attname not in ["datatype", "id", "length", "isvar", "name_in_file", "partition"]: setattr(newaxis, attname, attval) return newaxis @@ -1394,7 +1460,7 @@ def copyGrid(self, grid, newname=None): ::: """ if newname is None: - if hasattr(grid,'id'): + if hasattr(grid, 'id'): newname = grid.id else: newname = 'Grid' @@ -1411,14 +1477,14 @@ def copyGrid(self, grid, newname=None): lon.designateLongitude(persistent=1) # If the grid name already exists, and is the same, just return it - if self.grids.has_key(newname): + if newname in self.grids: newgrid = self.grids[newname] newlat = newgrid.getLatitude() newlon = newgrid.getLongitude() if ((newlat is not lat) or (newlon is not lon) or (newgrid.getOrder() != grid.getOrder()) or - (newgrid.getType() != grid.getType())): + (newgrid.getType() != grid.getType())): raise DuplicateGrid(newname) # else create a new grid and copy metadata @@ -1437,7 +1503,7 @@ def copyGrid(self, grid, newname=None): # 'axesOrGrids' is a list of axes, grids. (Note: this should be # generalized to allow subintervals of axes and/or grids) # Return a variable object. - def createVariable(self,name,datatype,axesOrGrids,fill_value=None): + def createVariable(self, name, datatype, axesOrGrids, fill_value=None): """ Create a variable 'name' is the string name of the Variable @@ -1457,7 +1523,7 @@ def createVariable(self,name,datatype,axesOrGrids,fill_value=None): axis :: (cdms2.fvariable.FileVariable) (0) file variable ::: """ - if self._status_=="closed": + if self._status_ == "closed": raise CDMSError(FileWasClosed + self.id) cufile = self._file_ if datatype in CdDatatypes: @@ -1465,7 +1531,6 @@ def createVariable(self,name,datatype,axesOrGrids,fill_value=None): else: numericType = datatype - #dimensions = map(lambda x: x.id, axes) # Make a list of names of axes for _Cdunif dimensions = [] for obj in axesOrGrids: @@ -1479,63 +1544,25 @@ def createVariable(self,name,datatype,axesOrGrids,fill_value=None): try: # Compatibility: revert to old typecode for cdunif numericType = typeconv.oldtypecodes[numericType] - cuvar = cufile.createVariable(name,numericType,tuple(dimensions)) - except Exception,err: + cuvar = cufile.createVariable(name, numericType, tuple(dimensions)) + except Exception as err: print err - raise CDMSError("Creating variable "+name) - var = FileVariable(self,name,cuvar) + raise CDMSError("Creating variable " + name) + var = FileVariable(self, name, cuvar) var.initDomain(self.axes) self.variables[name] = var - if fill_value is not None: var.setMissing(fill_value) + if fill_value is not None: + var.setMissing(fill_value) return var - # Create a variable from an existing variable, and copy the metadata -## def createVariableCopy(self, var, newname=None): - -## if newname is None: newname=var.id -## if self.variables.has_key(newname): -## raise DuplicateVariable(newname) - - -## # Create axes if necessary -## axislist = [] -## for (axis,start,length,true_length) in var.getDomain(): -## try: -## newaxis = self.copyAxis(axis) -## except DuplicateAxisError: - -## # Create a unique axis name -## setit = 0 -## for i in range(97,123): # Lower-case letters -## try: -## newaxis = self.copyAxis(axis,axis.id+'_'+chr(i)) -## setit = 1 -## break -## except DuplicateAxisError: -## continue - -## if setit==0: raise DuplicateAxisError(DuplicateAxis+axis.id) - -## axislist.append(newaxis) - -## # Create the new variable -## datatype = cdmsNode.NumericToCdType.get(var.dtype.char) -## newvar = self.createVariable(newname, datatype, axislist) - -## # Copy variable metadata -## for attname in var.attributes.keys(): -## if attname not in ["id", "datatype"]: -## setattr(newvar, attname, getattr(var, attname)) - -## return newvar - # Search for a pattern in a string-valued attribute. If attribute is None, # search all string attributes. If tag is 'cdmsFile', just check the dataset, # else check all nodes in the dataset of class type matching the tag. If tag # is None, search the dataset and all objects contained in it. - def searchPattern(self,pattern,attribute,tag): + def searchPattern(self, pattern, attribute, tag): """ - Search for a pattern in a string-valued attribute. If attribute is None, search all string attributes. If tag is not None, it must match the internal node tag. + Search for a pattern in a string-valued attribute. If attribute is None, + search all string attributes. If tag is not None, it must match the internal node tag. ::: Input::: pattern :: (str) (0) pattern @@ -1543,26 +1570,26 @@ def searchPattern(self,pattern,attribute,tag): tag :: (str/None) (2) node tag ::: Output::: - result :: (list) (0) + result :: (list) (0) ::: """ resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('cdmsFile',None,'dataset'): - if self.searchone(pattern,attribute)==1: + if tag in ('cdmsFile', None, 'dataset'): + if self.searchone(pattern, attribute) == 1: resultlist = [self] else: resultlist = [] if tag is None: for dict in self.dictdict.values(): for obj in dict.values(): - if obj.searchone(pattern,attribute): + if obj.searchone(pattern, attribute): resultlist.append(obj) - elif tag not in ('cdmsFile','dataset'): + elif tag not in ('cdmsFile', 'dataset'): dict = self.dictdict[tag] for obj in dict.values(): - if obj.searchone(pattern,attribute): + if obj.searchone(pattern, attribute): resultlist.append(obj) return resultlist @@ -1570,9 +1597,10 @@ def searchPattern(self,pattern,attribute,tag): # search all string attributes. If tag is 'cdmsFile', just check the dataset, # else check all nodes in the dataset of class type matching the tag. If tag # is None, search the dataset and all objects contained in it. - def matchPattern(self,pattern,attribute,tag): + def matchPattern(self, pattern, attribute, tag): """ - Match for a pattern in a string-valued attribute. If attribute is None, search all string attributes. If tag is not None, it must match the internal node tag. + Match for a pattern in a string-valued attribute. If attribute is None, + search all string attributes. If tag is not None, it must match the internal node tag. ::: Input::: pattern :: (str) (0) pattern @@ -1580,26 +1608,26 @@ def matchPattern(self,pattern,attribute,tag): tag :: (str/None) (2) node tag ::: Output::: - result :: (list) (0) + result :: (list) (0) ::: """ resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('cdmsFile',None,'dataset'): - if self.matchone(pattern,attribute)==1: + if tag in ('cdmsFile', None, 'dataset'): + if self.matchone(pattern, attribute) == 1: resultlist = [self] else: resultlist = [] if tag is None: for dict in self.dictdict.values(): for obj in dict.values(): - if obj.matchone(pattern,attribute): + if obj.matchone(pattern, attribute): resultlist.append(obj) - elif tag not in ('cdmsFile','dataset'): + elif tag not in ('cdmsFile', 'dataset'): dict = self.dictdict[tag] for obj in dict.values(): - if obj.matchone(pattern,attribute): + if obj.matchone(pattern, attribute): resultlist.append(obj) return resultlist @@ -1610,24 +1638,26 @@ def matchPattern(self,pattern,attribute,tag): # If 'variable', 'axis', etc., it is applied only to that type of object # in the dataset. If None, it is applied to all objects, including # the dataset itself. - def searchPredicate(self,predicate,tag): + def searchPredicate(self, predicate, tag): """ - Apply a truth-valued predicate. Return a list containing a single instance: [self] if the predicate is true and either tag is None or matches the object node tag. If the predicate returns false, return an empty list + Apply a truth-valued predicate. Return a list containing a single instance: + [self] if the predicate is true and either tag is None or matches the object node tag. + If the predicate returns false, return an empty list ::: Input::: predicate :: (function) (0) predicate tag :: (str/None) (1) node tag ::: Output::: - result :: (list) (0) + result :: (list) (0) ::: """ resultlist = [] if tag is not None: tag = string.lower(tag) - if tag in ('cdmsFile',None,'dataset'): + if tag in ('cdmsFile', None, 'dataset'): try: - if apply(predicate,(self,))==1: + if predicate(*(self,)) == 1: resultlist.append(self) except AttributeError: pass @@ -1635,15 +1665,15 @@ def searchPredicate(self,predicate,tag): for dict in self.dictdict.values(): for obj in dict.values(): try: - if apply(predicate,(obj,))==1: + if predicate(*(obj,)) == 1: resultlist.append(obj) except AttributeError: pass - elif tag not in ('dataset','cdmsFile'): + elif tag not in ('dataset', 'cdmsFile'): dict = self.dictdict[tag] for obj in dict.values(): try: - if apply(predicate,(obj,))==1: + if predicate(*(obj,)) == 1: resultlist.append(obj) except: pass @@ -1675,8 +1705,12 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds axes :: (None/[cdms2.axis.AbstractAxis]) (None) list of axes to use for the copied variable extbounds :: (None/numpy.ndarray) (None) Bounds of the (portion of) the extended dimension being written extend :: (int) (0) If 1, define the first dimension as the unlimited dimension. If 0, do not define an unlimited dimension. The default is the define the first dimension as unlimited only if it is a time dimension. + extend :: (int) (0) If 1, define the first dimension as the unlimited dimension. + If 0, do not define an unlimited dimension. The default is the define the + first dimension as unlimited only if it is a time dimension. fill_value :: (None/float) (None) the missing value flag - index :: (None/int) the extended dimension index to write to. The default index is determined by lookup relative to the existing extended dimension + index :: (None/int) the extended dimension index to write to. The default index is determined + by lookup relative to the existing extended dimension newname :: (str/None) id/newname of new variable grid :: (None/cdms2.grid.AbstractGrid) grid to use ::: @@ -1685,10 +1719,10 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds ::: """ if newname is None: - newname=var.id + newname = var.id if id is not None: newname = id - if self.variables.has_key(newname): + if newname in self.variables: raise DuplicateVariable(newname) # Determine the extended axis name if any @@ -1697,12 +1731,12 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds else: sourceAxislist = axes - if var.rank()==0: # scalars are not extensible + if var.rank() == 0: # scalars are not extensible extend = 0 - - if extend in (1,None): + + if extend in (1, None): firstAxis = sourceAxislist[0] - if firstAxis is not None and (extend==1 or firstAxis.isTime()): + if firstAxis is not None and (extend == 1 or firstAxis.isTime()): extendedAxis = firstAxis.id else: extendedAxis = None @@ -1712,22 +1746,23 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds # Create axes if necessary axislist = [] for axis in sourceAxislist: - if extendedAxis is None or axis.id!=extendedAxis: + if extendedAxis is None or axis.id != extendedAxis: try: newaxis = self.copyAxis(axis) except DuplicateAxisError: # Create a unique axis name setit = 0 - for i in range(97,123): # Lower-case letters + for i in range(97, 123): # Lower-case letters try: - newaxis = self.copyAxis(axis,axis.id+'_'+chr(i)) + newaxis = self.copyAxis(axis, axis.id + '_' + chr(i)) setit = 1 break except DuplicateAxisError: continue - if setit==0: raise DuplicateAxisError(DuplicateAxis+axis.id) + if setit == 0: + raise DuplicateAxisError(DuplicateAxis + axis.id) else: newaxis = self.copyAxis(axis, unlimited=1, index=index, extbounds=extbounds) @@ -1737,25 +1772,25 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds if attributes is None: attributes = var.attributes try: - attributes['missing_value']=var.missing_value - except Exception,err: + attributes['missing_value'] = var.missing_value + except Exception as err: print err pass try: if fill_value is None: - if( '_FillValue' in attributes.keys() ): - attributes['_FillValue']=numpy.array(var._FillValue).astype(var.dtype) - attributes['missing_value']=numpy.array(var._FillValue).astype(var.dtype) - if( 'missing_value' in attributes.keys() ): - attributes['_FillValue']=numpy.array(var.missing_value).astype(var.dtype) - attributes['missing_value']=numpy.array(var.missing_value).astype(var.dtype) + if('_FillValue' in attributes.keys()): + attributes['_FillValue'] = numpy.array(var._FillValue).astype(var.dtype) + attributes['missing_value'] = numpy.array(var._FillValue).astype(var.dtype) + if('missing_value' in attributes.keys()): + attributes['_FillValue'] = numpy.array(var.missing_value).astype(var.dtype) + attributes['missing_value'] = numpy.array(var.missing_value).astype(var.dtype) else: - attributes['_FillValue']=fill_value - attributes['missing_value']=fill_value + attributes['_FillValue'] = fill_value + attributes['missing_value'] = fill_value except: pass - if attributes.has_key("name"): - if attributes['name']!=var.id: + if "name" in attributes: + if attributes['name'] != var.id: del(attributes['name']) # Create grid as necessary @@ -1764,7 +1799,7 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds if grid is not None: coords = grid.writeToFile(self) if coords is not None: - coordattr = "%s %s"%(coords[0].id, coords[1].id) + coordattr = "%s %s" % (coords[0].id, coords[1].id) if attributes is None: attributes = {'coordinates': coordattr} else: @@ -1773,19 +1808,19 @@ def createVariableCopy(self, var, id=None, attributes=None, axes=None, extbounds # Create the new variable datatype = cdmsNode.NumericToCdType.get(var.typecode()) newvar = self.createVariable(newname, datatype, axislist) - for attname,attval in attributes.items(): + for attname, attval in attributes.items(): if attname not in ["id", "datatype", "parent"]: setattr(newvar, attname, attval) if (attname == "_FillValue") or (attname == "missing_value"): - setattr(newvar, "_FillValue", attval) - setattr(newvar, "missing_value", attval) + setattr(newvar, "_FillValue", attval) + setattr(newvar, "missing_value", attval) if fill_value is not None: newvar.setMissing(fill_value) return newvar - def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ + def write(self, var, attributes=None, axes=None, extbounds=None, id=None, extend=None, fill_value=None, index=None, typecode=None, dtype=None, pack=False): """Write var to the file. If the variable is not yet defined in the file, a definition is created. By default, the time dimension of the variable is defined as the @@ -1814,9 +1849,12 @@ def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ axes :: (None/[cdms2.axis.AbstractAxis]) (None) list of axes to use for the copied variable extbounds :: (None/numpy.ndarray) (None) Bounds of the (portion of) the extended dimension being written id :: (str/None) (None) id of copied variable - extend :: (int) (0) If 1, define the first dimension as the unlimited dimension. If 0, do not define an unlimited dimension. The default is the define the first dimension as unlimited only if it is a time dimension. + extend :: (int) (0) If 1, define the first dimension as the unlimited dimension. + If 0, do not define an unlimited dimension. + The default is the define the first dimension as unlimited only if it is a time dimension. fill_value :: (None/float) (None) the missing value flag - index :: (None/int) the extended dimension index to write to. The default index is determined by lookup relative to the existing extended dimension + index :: (None/int) the extended dimension index to write to. + The default index is determined by lookup relative to the existing extended dimension typecode :: (None/str) (None) typdecode to write the variable as dtype :: (None/numpy.dtype) type to write the variable as; overwrites typecode pack :: (False/True/numpy/numpy.int8/numpy.int16/numpy.int32/numpy.int64) pack the data to save up space @@ -1826,15 +1864,30 @@ def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ ::: """ if _showCompressWarnings: - if (Cdunif.CdunifGetNCFLAGS("shuffle")!=0) or (Cdunif.CdunifGetNCFLAGS("deflate")!=0) or (Cdunif.CdunifGetNCFLAGS("deflate_level")!=0): + if (Cdunif.CdunifGetNCFLAGS("shuffle") != 0) or (Cdunif.CdunifGetNCFLAGS( + "deflate") != 0) or (Cdunif.CdunifGetNCFLAGS("deflate_level") != 0): import warnings - warnings.warn("Files are written with compression and shuffling\nYou can query different values of compression using the functions:\ncdms2.getNetcdfShuffleFlag() returning 1 if shuffling is enabled, 0 otherwise\ncdms2.getNetcdfDeflateFlag() returning 1 if deflate is used, 0 otherwise\ncdms2.getNetcdfDeflateLevelFlag() returning the level of compression for the deflate method\n\nIf you want to turn that off or set different values of compression use the functions:\nvalue = 0\ncdms2.setNetcdfShuffleFlag(value) ## where value is either 0 or 1\ncdms2.setNetcdfDeflateFlag(value) ## where value is either 0 or 1\ncdms2.setNetcdfDeflateLevelFlag(value) ## where value is a integer between 0 and 9 included\n\nTurning all values to 0 will produce NetCDF3 Classic files\nTo Force NetCDF4 output with classic format and no compressing use:\ncdms2.setNetcdf4Flag(1)\nNetCDF4 file with no shuffling or defalte and noclassic will be open for parallel i/o",Warning) - + warnings.warn("Files are written with compression and shuffling\n" + + "You can query different values of compression using the functions:\n" + + "cdms2.getNetcdfShuffleFlag() returning 1 if shuffling is enabled, " + + "0 otherwise\ncdms2.getNetcdfDeflateFlag() returning 1 if deflate is used, " + + "0 otherwise\ncdms2.getNetcdfDeflateLevelFlag() " + + "returning the level of compression for the deflate method\n\n" + + "If you want to turn that off or set different values of compression " + + "use the functions:\nvalue = 0\ncdms2.setNetcdfShuffleFlag(value) " + + "## where value is either 0 or 1\ncdms2.setNetcdfDeflateFlag(value) " + + "## where value is either 0 or 1\ncdms2.setNetcdfDeflateLevelFlag(value) " + + "## where value is a integer between 0 and 9 included\n\nTurning all values " + + "to 0 will produce NetCDF3 Classic files\nTo Force NetCDF4 output with " + + "classic format and no compressing use:\ncdms2.setNetcdf4Flag(1)\n" + + "NetCDF4 file with no shuffling or deflate and noclassic will be open " + + "for parallel i/o", Warning) + # Make var an AbstractVariable if dtype is None and typecode is not None: dtype = typeconv.convtypecode2(typecode) typecode = dtype - if typecode is not None and var.dtype.char!=typecode: + if typecode is not None and var.dtype.char != typecode: var = var.astype(typecode) var = asVariable(var, writeable=0) @@ -1843,25 +1896,23 @@ def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ varid = var.id else: varid = id - if self.variables.has_key(varid): + if varid in self.variables: if pack: - raise CDMSError("You cannot pack an existing variable %s " % varid) + raise CDMSError("You cannot pack an existing variable %s " % varid) v = self.variables[varid] else: - if pack is not False: - typ = numpy.int16 - n = 16 - else: - typ = var.dtype - v = self.createVariableCopy(var.astype(typ), attributes=attributes, axes=axes, extbounds=extbounds, - id=varid, extend=extend, fill_value=fill_value, index=index) - - + if pack is not False: + typ = numpy.int16 + n = 16 + else: + typ = var.dtype + v = self.createVariableCopy(var.astype(typ), attributes=attributes, axes=axes, extbounds=extbounds, + id=varid, extend=extend, fill_value=fill_value, index=index) # If var has typecode numpy.int, and v is created from var, then v will have # typecode numpy.int32. (This is a Cdunif 'feature'). This causes a downcast error # for numpy versions 23+, so make the downcast explicit. - if var.typecode()==numpy.int and v.typecode()==numpy.int32 and pack is False: + if var.typecode() == numpy.int and v.typecode() == numpy.int32 and pack is False: var = var.astype(numpy.int32) # Write @@ -1871,17 +1922,17 @@ def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ sourceAxislist = axes vrank = var.rank() - if vrank==0: # scalars are not extensible + if vrank == 0: # scalars are not extensible extend = 0 else: vec1 = sourceAxislist[0] - - if extend==0 or (extend is None and not vec1.isTime()): - if vrank>0: + + if extend == 0 or (extend is None and not vec1.isTime()): + if vrank > 0: if pack is not False: - v[:] = numpy.zeros(var.shape,typ) + v[:] = numpy.zeros(var.shape, typ) else: - v[:] = var.astype(v.dtype) + v[:] = var.astype(v.dtype) else: v.assignValue(var.getValue()) else: @@ -1895,47 +1946,51 @@ def write(self, var, attributes=None, axes=None, extbounds=None, id=None, \ isoverlap, index = isOverlapVector(vec1[:], vec2[:]) else: isoverlap = 1 - if isoverlap==1: - v[index:index+len(vec1)] = var.astype(v.dtype) - vec2[index:index+len(vec1)] = vec1[:].astype(vec2[:].dtype) + if isoverlap == 1: + v[index:index + len(vec1)] = var.astype(v.dtype) + vec2[index:index + len(vec1)] = vec1[:].astype(vec2[:].dtype) if bounds1 is not None: vec2.setBounds(bounds1, persistent=1, index=index) else: - raise CDMSError('Cannot write variable %s: the values of dimension %s=%s, do not overlap the extended dimension %s values: %s'%(varid, vec1.id,`vec1[:]`,vec2.id,`vec2[:]`)) + raise CDMSError( + 'Cannot write variable %s: the values of dimension %s=%s, do not overlap the ' + + 'extended dimension %s values: %s' % + (varid, vec1.id, repr( + vec1[:]), vec2.id, repr( + vec2[:]))) # pack implementation source: https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html if pack: - M = var.max() - m = var.min() - scale_factor = (M-m)/(pow(2,n)-2) - add_offset = (M+m)/2. - missing = -pow(2,n-1) - v.setMissing(-pow(2,n-1)) - scale_factor = scale_factor.astype(var.dtype) - add_offset = add_offset.astype(var.dtype) - tmp = (var-add_offset)/scale_factor - tmp= numpy.round(tmp) - tmp=tmp.astype(typ) - v[:] = tmp.filled() - v.scale_factor = scale_factor.astype(var.dtype) - v.add_offset = add_offset.astype(var.dtype) - if not hasattr(var,"valid_min"): - v.valid_min = m.astype(var.dtype) - if not hasattr(var,"valid_max"): - v.valid_max = M.astype(var.dtype) + M = var.max() + m = var.min() + scale_factor = (M - m) / (pow(2, n) - 2) + add_offset = (M + m) / 2. + v.setMissing(-pow(2, n - 1)) + scale_factor = scale_factor.astype(var.dtype) + add_offset = add_offset.astype(var.dtype) + tmp = (var - add_offset) / scale_factor + tmp = numpy.round(tmp) + tmp = tmp.astype(typ) + v[:] = tmp.filled() + v.scale_factor = scale_factor.astype(var.dtype) + v.add_offset = add_offset.astype(var.dtype) + if not hasattr(var, "valid_min"): + v.valid_min = m.astype(var.dtype) + if not hasattr(var, "valid_max"): + v.valid_max = M.astype(var.dtype) return v - def write_it_yourself( self, obj ): + def write_it_yourself(self, obj): """Tell obj to write itself to self (already open for writing), using its writeg method (AbstractCurveGrid has such a method, for example). If no such method be available, writeToFile will be used. If that is not available, then self.write(obj) will be called to try to write obj as a variable.""" # This method was formerly called writeg and just wrote an AbstractCurveGrid. - if ( hasattr(obj,'writeg') and callable(getattr(obj,'writeg')) ): - obj.writeg( self ) - elif ( hasattr(obj,'writeToFile') and callable(getattr(obj,'writeToFile')) ): - obj.writeToFile( self ) + if (hasattr(obj, 'writeg') and callable(getattr(obj, 'writeg'))): + obj.writeg(self) + elif (hasattr(obj, 'writeToFile') and callable(getattr(obj, 'writeToFile'))): + obj.writeToFile(self) else: self.write(obj) @@ -1948,7 +2003,7 @@ def getVariable(self, id): ::: Output::: variable :: (cdms2.fvariable.FileVariable/None) (0) file variable - ::: + ::: """ return self.variables.get(id) @@ -1957,15 +2012,23 @@ def getVariables(self, spatial=0): axes defined on latitude or longitude, excluding weights and bounds. ::: Options::: - spatial :: (int/True/False) (0) If spatial=1, only return those axes defined on latitude or longitude, excluding weights and bounds + spatial :: (int/True/False) (0) If spatial=1, only return those axes defined on latitude or + longitude, excluding weights and bounds ::: Output::: variables :: ([cdms2.fvariable.FileVariable]) (0) file variables - ::: + ::: """ retval = self.variables.values() if spatial: - retval = filter(lambda x: x.id[0:7]!="bounds_" and x.id[0:8]!="weights_" and ((x.getLatitude() is not None) or (x.getLongitude() is not None) or (x.getLevel() is not None)), retval) + retval = filter( + lambda x: x.id[ + 0:7] != "bounds_" and x.id[ + 0:8] != "weights_" and ( + (x.getLatitude() is not None) or ( + x.getLongitude() is not None) or ( + x.getLevel() is not None)), + retval) return retval def getAxis(self, id): @@ -1976,7 +2039,7 @@ def getAxis(self, id): ::: Output::: axis :: (cdms2.axis.FileAxis/None) (0) file axis - ::: + ::: """ return self.axes.get(id) @@ -1989,11 +2052,11 @@ def getGrid(self, id): ::: Output::: grid :: (cdms2.hgrid.FileCurveGrid/cdms2.gengrid.FileGenericGrid/cdms2.grid.FileRectGrid/None) (0) file axis - ::: + ::: """ return self.grids.get(id) - def getBoundsAxis(self, n,boundid=None): + def getBoundsAxis(self, n, boundid=None): """Get a bounds axis of length n. Create the bounds axis if necessary. ::: Input::: @@ -2004,30 +2067,31 @@ def getBoundsAxis(self, n,boundid=None): ::: """ if boundid is None: - if n==2: + if n == 2: boundid = "bound" else: - boundid = "bound_%d"%n - - if self.axes.has_key(boundid): + boundid = "bound_%d" % n + + if boundid in self.axes: boundaxis = self.axes[boundid] else: boundaxis = self.createVirtualAxis(boundid, n) return boundaxis def __repr__(self): - filerep = `self._file_` - loc = string.find(filerep,"file") - if loc==-1: loc=0 - return ""%self._status_ - -## internattr.add_internal_attribute (CdmsFile, 'datapath', -## 'variables', -## 'axes', -## 'grids', -## 'xlinks', -## 'dictdict', -## 'default_variable_name', -## 'id', -## 'parent', -## 'mode') + filerep = repr(self._file_) + loc = string.find(filerep, "file") + if loc == -1: + loc = 0 + return "" % self._status_ + +# internattr.add_internal_attribute (CdmsFile, 'datapath', +# 'variables', +# 'axes', +# 'grids', +# 'xlinks', +# 'dictdict', +# 'default_variable_name', +# 'id', +# 'parent', +# 'mode') diff --git a/Lib/fvariable.py b/Lib/fvariable.py index 4271e12c..555bd849 100644 --- a/Lib/fvariable.py +++ b/Lib/fvariable.py @@ -1,5 +1,5 @@ -## Automatically adapted for numpy.oldnumeric Aug 01, 2007 by -## Further modified to be pure new numpy June 24th 2008 +# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by +# Further modified to be pure new numpy June 24th 2008 "CDMS File-based variables." import numpy @@ -18,6 +18,7 @@ FileClosed = "Cannot read from closed file, variable: " FileClosedWrite = "Cannot write to a closed file, variable: " + class FileVariable(DatasetVariable): "A variable in a single file." def __init__(self,parent,varname,cdunifobj=None): @@ -115,7 +116,7 @@ def __setslice__(self, low, high, value): high = min(Max32int, high) if high == Max32int and self.rank()==0: high=1 - + if numpy.ma.isMaskedArray(value): if value.mask is not numpy.ma.nomask and not numpy.ma.allclose(value.mask,0): saveFill = value.fill_value @@ -169,7 +170,7 @@ def getValue(self, squeeze=1): return self.getSlice(Ellipsis, squeeze=squeeze) else: return self._obj_.getValue() - + def __len__(self): " Length of first dimension. " if self.parent is None: diff --git a/Lib/tvariable.py b/Lib/tvariable.py index 152875ad..1f8eb6e3 100644 --- a/Lib/tvariable.py +++ b/Lib/tvariable.py @@ -1,4 +1,4 @@ -# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by +# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by # Further modified to be pure new numpy June 24th 2008 """ @@ -20,7 +20,7 @@ from hgrid import AbstractCurveGrid from gengrid import AbstractGenericGrid -# dist array support +# dist array support HAVE_MPI = False try: from mpi4py import MPI @@ -31,38 +31,39 @@ id_builtin = id # built_in gets clobbered by keyword + def fromJSON(jsn): """ Recreate a TV from a dumped jsn object""" D = json.loads(jsn) - ## First recreates the axes - axes=[] + # First recreates the axes + axes = [] for a in D["_axes"]: - ax = createAxis(numpy.array(a["_values"],dtype=a["_dtype"]),id=a["id"]) - for k,v in a.iteritems(): - if not k in ["_values","id","_dtype"]: - setattr(ax,k,v) + ax = createAxis(numpy.array(a["_values"], dtype=a["_dtype"]), id=a["id"]) + for k, v in a.iteritems(): + if k not in ["_values", "id", "_dtype"]: + setattr(ax, k, v) axes.append(ax) - ## Now prep the variable - V= createVariable(D["_values"],id=D["id"],typecode=D["_dtype"]) + # Now prep the variable + V = createVariable(D["_values"], id=D["id"], typecode=D["_dtype"]) V.setAxisList(axes) - for k,v in D.iteritems(): - if not k in ["id","_values","_axes","_grid","_fill_value","_dtype",]: - setattr(V,k,v) + for k, v in D.iteritems(): + if k not in ["id", "_values", "_axes", "_grid", "_fill_value", "_dtype", ]: + setattr(V, k, v) V.set_fill_value(D["_fill_value"]) return V -class TransientVariable(AbstractVariable,numpy.ma.MaskedArray): +class TransientVariable(AbstractVariable, numpy.ma.MaskedArray): "An in-memory variable." variable_count = 0 _missing = numpy.ma.MaskedArray.fill_value - def _getShape(self): return self._data.shape - shape = property(_getShape,None) + shape = property(_getShape, None) + def iscontiguous(self): return self.flags['CONTIGUOUS'] @@ -71,49 +72,51 @@ def ascontiguousarray(self): out = numpy.ascontiguousarray(d) m = numpy.ma.getmask(self) if m is not numpy.ma.nomask: - m= numpy.ascontiguousarray(m) - out = TransientVariable(out,mask=m,attributes=self.attributes) + m = numpy.ascontiguousarray(m) + out = TransientVariable(out, mask=m, attributes=self.attributes) out.setAxisList(self.getAxisList()) out.setMissing(self.getMissing()) return out - + ascontiguous = ascontiguousarray - + def asma(self): - return numpy.ma.array(self._data,mask=self._mask) - - def _update_from(self,obj): - numpy.ma.MaskedArray._update_from(self,obj) - if not hasattr(self,'___cdms_internals__'): - self.__dict__['___cdms_internals__']=['__cdms_internals__','___cdms_internals__','_node_','parent','attributes','shape'] - if not hasattr(self,'attributes'): - self.attributes={} - self._grid_ = getattr(obj,'_grid_',None) + return numpy.ma.array(self._data, mask=self._mask) + + def _update_from(self, obj): + numpy.ma.MaskedArray._update_from(self, obj) + if not hasattr(self, '___cdms_internals__'): + self.__dict__['___cdms_internals__'] = ['__cdms_internals__', + '___cdms_internals__', '_node_', 'parent', 'attributes', 'shape'] + if not hasattr(self, 'attributes'): + self.attributes = {} + self._grid_ = getattr(obj, '_grid_', None) try: - for nm,val in obj.__dict__.items(): - if nm[0]=='_': -## print nm + for nm, val in obj.__dict__.items(): + if nm[0] == '_': + # print nm pass -## self.__dict__[nm]=val +# self.__dict__[nm]=val else: - setattr(self,nm,val) - except Exception,err: + setattr(self, nm, val) + except Exception: pass - id = getattr(self,'id',None) + id = getattr(self, 'id', None) if id is None: - TransientVariable.variable_count+=1 - id = 'variable_'+str(TransientVariable.variable_count) - self.id=id - self.name = getattr(obj,'name',id) - if not hasattr(self,'__domain'): + TransientVariable.variable_count += 1 + id = 'variable_' + str(TransientVariable.variable_count) + self.id = id + self.name = getattr(obj, 'name', id) + if not hasattr(self, '__domain'): self.initDomain(axes=None) - - def __array_finalize__(self,obj): - numpy.ma.MaskedArray.__array_finalize__(self,obj) + def __array_finalize__(self, obj): + numpy.ma.MaskedArray.__array_finalize__(self, obj) return - + def __copy__(self): + return numpy.ma.MaskedArray.copy(self) + __mul__ = AbstractVariable.__mul__ __rmul__ = AbstractVariable.__rmul__ __imul__ = AbstractVariable.__imul__ @@ -125,8 +128,8 @@ def __array_finalize__(self,obj): __lshift__ = AbstractVariable.__lshift__ __rshift__ = AbstractVariable.__rshift__ __sub__ = AbstractVariable.__sub__ - __rsub__ = AbstractVariable.__rsub__ - __isub__ = AbstractVariable.__isub__ + __rsub__ = AbstractVariable.__rsub__ + __isub__ = AbstractVariable.__isub__ __div__ = AbstractVariable.__div__ __rdiv__ = AbstractVariable.__rdiv__ __idiv__ = AbstractVariable.__idiv__ @@ -139,81 +142,81 @@ def __array_finalize__(self,obj): __ge__ = AbstractVariable.__ge__ __sqrt__ = AbstractVariable.__sqrt__ - def __init__(self,data, typecode=None, copy=1, savespace=0, + def __init__(self, data, typecode=None, copy=1, savespace=0, mask=numpy.ma.nomask, fill_value=None, grid=None, - axes=None, attributes=None, id=None, copyaxes=1, dtype=None, - order=False, no_update_from=False,**kargs): - """createVariable (self, data, typecode=None, copy=0, savespace=0, + axes=None, attributes=None, id=None, copyaxes=1, dtype=None, + order='C', no_update_from=False, **kargs): + """createVariable (self, data, typecode=None, copy=0, savespace=0, mask=None, fill_value=None, grid=None, - axes=None, attributes=None, id=None, dtype=None, order=False) + axes=None, attributes=None, id=None, dtype=None, order='C') The savespace argument is ignored, for backward compatibility only. """ try: if data.fill_value is not None: self._setmissing(data.fill_value) - fill_value=data.fill_value + fill_value = data.fill_value except: pass if fill_value is not None: - self._setmissing(fill_value) + self._setmissing(fill_value) if attributes is not None and "_FillValue" in attributes.keys(): - self._setmissing(attributes["_FillValue"]) + self._setmissing(attributes["_FillValue"]) - # tile index, None means no mosaic + # tile index, None means no mosaic self.tileIndex = None - + # Compatibility: assuming old typecode, map to new if dtype is None and typecode is not None: dtype = typeconv.convtypecode2(typecode) typecode = sctype2char(dtype) - if type(data) is types.TupleType: + if isinstance(data, types.TupleType): data = list(data) - - AbstractVariable.__init__ (self) + + AbstractVariable.__init__(self) if isinstance(data, AbstractVariable): if not isinstance(data, TransientVariable): data = data.subSlice() -## if attributes is None: attributes = data.attributes +# if attributes is None: attributes = data.attributes if axes is None and not no_update_from: axes = map(lambda x: x[0], data.getDomain()) if grid is None and not no_update_from: grid = data.getGrid() if (grid is not None) and (not isinstance(grid, AbstractRectGrid)) \ and (not grid.checkAxes(axes)): - grid = grid.reconcile(axes) # Make sure grid and axes are consistent + grid = grid.reconcile(axes) # Make sure grid and axes are consistent ncopy = (copy!=0) # Initialize the geometry if grid is not None: - copyaxes=0 # Otherwise grid axes won't match domain. + copyaxes = 0 # Otherwise grid axes won't match domain. if axes is not None: self.initDomain(axes, copyaxes=copyaxes) # Note: clobbers the grid, so set the grid after. if grid is not None: self.setGrid(grid) - + # Initialize the attributes if attributes is not None: for key, value in attributes.items(): - if (key in ['shape','flat','imaginary','real'] or key[0]=='_') and key not in ['_FillValue']: - raise CDMSError, 'Bad key in attributes: ' + key + if (key in ['shape', 'flat', 'imaginary', 'real'] or key[0] == '_') and key not in ['_FillValue']: + raise CDMSError('Bad key in attributes: ' + key) elif key == 'missing_value': - #ignore if fill value given explicitly + # ignore if fill value given explicitly if fill_value is None: self._setmissing(value) - elif key not in ['scale_factor','add_offset']: + elif key not in ['scale_factor', 'add_offset']: setattr(self, key, value) # Sync up missing_value attribute and the fill value. self.missing_value = self._getmissing() self._FillValue = self._getmissing() if id is not None: - if not isinstance(id,(unicode,str)): - raise CDMSError, 'id must be a string' + if not isinstance(id, (unicode, str)): + raise CDMSError('id must be a string') self.id = id - elif hasattr(data,'id'): + elif hasattr(data, 'id'): self.id = data.id if self.id is None: @@ -228,92 +231,84 @@ def __init__(self,data, typecode=None, copy=1, savespace=0, self.__mpiWindows = {} self.__mpiType = self.__getMPIType() - def _getmissing(self): return self._missing - def _setmissing(self,value): - self._missing=numpy.array(value).astype(self.dtype) + def _setmissing(self, value): + self._missing = numpy.array(value).astype(self.dtype) - missing = property(_getmissing,_setmissing) - fill_value = property(_getmissing,_setmissing) - _FillValue = property(_getmissing,_setmissing) - missing_value = property(_getmissing,_setmissing) + missing = property(_getmissing, _setmissing) + fill_value = property(_getmissing, _setmissing) + _FillValue = property(_getmissing, _setmissing) + missing_value = property(_getmissing, _setmissing) - def __new__(cls, data, typecode=None, copy=0, savespace=0, + def __new__(cls, data, typecode=None, copy=0, savespace=0, mask=numpy.ma.nomask, fill_value=None, grid=None, - axes=None, attributes=None, id=None, copyaxes=1, dtype=None, order=False,**kargs): - """createVariable (self, data, typecode=None, copy=0, savespace=0, + axes=None, attributes=None, id=None, copyaxes=1, dtype=None, order='C', **kargs): + """createVariable (self, data, typecode=None, copy=0, savespace=0, mask=None, fill_value=None, grid=None, - axes=None, attributes=None, id=None, dtype=None, order=False) + axes=None, attributes=None, id=None, dtype=None, order='C') The savespace argument is ignored, for backward compatibility only. """ # Compatibility: assuming old typecode, map to new if dtype is None and typecode is not None: dtype = typeconv.convtypecode2(typecode) typecode = sctype2char(dtype) - if type(data) is types.TupleType: + if isinstance(data, types.TupleType): data = list(data) if isinstance(data, AbstractVariable): if not isinstance(data, TransientVariable): data = data.subSlice() if isinstance(data, numpy.ma.MaskedArray): try: - if fill_value is None: fill_value = data.fill_value + if fill_value is None: + fill_value = data.fill_value except: pass - ncopy = (copy!=0) + ncopy = (copy != 0) if mask is None: try: mask = data.mask - except Exception,err: + except Exception: mask = numpy.ma.nomask # Handle the case where ar[i:j] returns a single masked value if data is numpy.ma.masked: - #shape = tuple(len(axes)*[1]) data = numpy.ma.masked.data - #data.shape = shape mask = numpy.ma.masked.mask - #mask.shape = shape -## if data.getattr('mask',None) is not numpy.ma.nomask: -## mask = data.mask -## print 'passing:',mask.shape,data.shape,numpy.shape(cls) + if fill_value is not None: fill_value = numpy.array(fill_value).astype(dtype) else: fill_value = numpy.ma.MaskedArray(1).astype(dtype).item() + self = numpy.ma.MaskedArray.__new__(cls, data, dtype=dtype, + copy=ncopy, + mask=mask, + fill_value=fill_value, + subok=False, + order=order) - self = numpy.ma.MaskedArray.__new__(cls, data, dtype = dtype, - copy = ncopy, - mask = mask, - fill_value = fill_value, - subok = False, - order = order) - - - return self # typecode = numpy.ma.array.typecode def typecode(self): return self.dtype.char - def assignValue(self,data): + def assignValue(self, data): self[...] = data def getValue(self, squeeze=1): return self.filled() - def expertSlice (self, slicelist): + def expertSlice(self, slicelist): return numpy.ma.MaskedArray.__getitem__(self, slicelist) - def initDomain (self, axes, copyaxes=1): + def initDomain(self, axes, copyaxes=1): # lazy evaluation via getAxis to avoid creating axes that aren't ever used. newgrid = None - self.__domain = [None]*self.rank() + self.__domain = [None] * self.rank() if axes is not None: flataxes = [] try: @@ -326,22 +321,22 @@ def initDomain (self, axes, copyaxes=1): elif isinstance(item, AbstractRectGrid) or isinstance(item, AbstractCurveGrid): flataxes.append(item.getAxis(0)) flataxes.append(item.getAxis(1)) - copyaxes=0 + copyaxes = 0 newgrid = item elif isinstance(item, AbstractGenericGrid): flataxes.append(item.getAxis(0)) - copyaxes=0 + copyaxes = 0 newgrid = item else: - raise CDMSError, "Invalid item in axis list:\n"+`item` + raise CDMSError("Invalid item in axis list:\n" + repr(item)) if len(flataxes) != self.rank(): - raise CDMSError, "Wrong number of axes to initialize domain." + raise CDMSError("Wrong number of axes to initialize domain.") for i in range(len(flataxes)): if flataxes[i] is not None: - if (not flataxes[i].isVirtual()) and copyaxes==1: + if (not flataxes[i].isVirtual()) and copyaxes == 1: self.copyAxis(i, flataxes[i]) else: - self.setAxis(i, flataxes[i]) # No sense copying a virtual axis. + self.setAxis(i, flataxes[i]) # No sense copying a virtual axis. if newgrid is not None: # Do this after setting the axes, so the grid is consistent self.setGrid(newgrid) @@ -351,8 +346,9 @@ def getDomain(self): junk = self.getAxis(i) # will force a fill in return self.__domain - def getAxis (self, n): - if n < 0: n = n + self.rank() + def getAxis(self, n): + if n < 0: + n = n + self.rank() if self.__domain[n] is None: length = numpy.ma.size(self, n) # axis = createAxis(numpy.ma.arange(numpy.ma.size(self, n), typecode=numpy.Float)) @@ -360,16 +356,17 @@ def getAxis (self, n): axis.id = "axis_" + str(n) self.__domain[n] = (axis, 0, length, length) return self.__domain[n][0] - - def setAxis (self, n, axis, savegrid=0): + + def setAxis(self, n, axis, savegrid=0): """Set n axis of self to a copy of axis. (0-based index) """ - if n < 0: n = n + self.rank() + if n < 0: + n = n + self.rank() axislen = self.shape[n] - if len(axis)!=axislen: - raise CDMSError,"axis length %d does not match corresponding dimension %d"%(len(axis),axislen) + if len(axis) != axislen: + raise CDMSError("axis length %d does not match corresponding dimension %d" % (len(axis), axislen)) if not isinstance(axis, AbstractAxis): - raise CDMSError,"copydimension, other not a slab." + raise CDMSError("copydimension, other not a slab.") self.__domain[n] = (axis, 0, len(axis), len(axis)) def setAxisList(self, axislist): @@ -377,27 +374,28 @@ def setAxisList(self, axislist): for i in range(len(axislist)): self.setAxis(i, axislist[i]) - def copyAxis (self, n, axis): + def copyAxis(self, n, axis): """Set n axis of self to a copy of axis. (0-based index) Invalidates grid. """ - if n < 0: n = n + self.rank() + if n < 0: + n = n + self.rank() if not isinstance(axis, AbstractAxis): - raise CDMSError,"copydimension, other not an axis." + raise CDMSError("copydimension, other not an axis.") isGeneric = [False] b = axis.getBounds(isGeneric) mycopy = createAxis(axis[:], b, genericBounds=isGeneric[0]) mycopy.id = axis.id for k, v in axis.attributes.items(): - setattr(mycopy, k, v) - self.setAxis (n, mycopy) - - def copyDomain (self, other): + setattr(mycopy, k, v) + self.setAxis(n, mycopy) + + def copyDomain(self, other): "Set the axes and grid by copying variable other." if not isinstance(other, AbstractVariable): - raise CDMSError,"copyDomain, other not a variable." + raise CDMSError("copyDomain, other not a variable.") if self.rank() != other.rank(): - raise CDMSError, "copyDomain, ranks do not match." + raise CDMSError("copyDomain, ranks do not match.") for i in range(self.rank()): self.copyAxis(i, other.getAxis(i)) self.setGrid(other.getGrid()) @@ -408,20 +406,21 @@ def getGrid(self): for i in range(self.rank()): ax = self.getAxis(i) if ax.isLatitude(): - order = order+'y' + order = order + 'y' lat = ax elif ax.isLongitude(): - order = order+'x' + order = order + 'x' lon = ax - if len(order)==2: break + if len(order) == 2: + break - if order in ['yx','xy']: - self._grid_ = createRectGrid(lat,lon,order) + if order in ['yx', 'xy']: + self._grid_ = createRectGrid(lat, lon, order) return self._grid_ - def astype (self, tc): + def astype(self, tc): "return self as array of given type." - maresult = numpy.ma.MaskedArray.astype(self,tc) + maresult = numpy.ma.MaskedArray.astype(self, tc) return TransientVariable(maresult, copy=0, axes=self.getAxisList(), fill_value=self.fill_value, attributes=self.attributes, id=self.id, grid=self.getGrid()) @@ -439,18 +438,18 @@ def setMaskFromGridMask(self, mask, gridindices): shapeprep.append(self.shape[i]) # Broadcast mask - if tprep!=[]: + if tprep != []: newshape = tuple(shapeprep + list(mask.shape)) bigmask = numpy.resize(mask, newshape) # Generate the tranpose vector t = tuple(tprep + list(gridindices)) - tinv = [0]*len(t) + tinv = [0] * len(t) for i in range(len(t)): tinv[t[i]] = i # And reshape to fit the variable - if tinv!=range(len(tinv)): + if tinv != range(len(tinv)): bigmask = numpy.transpose(bigmask, tuple(tinv)) else: @@ -465,25 +464,25 @@ def setMaskFromGridMask(self, mask, gridindices): return result # Old cu interface - def copydimension (self, idim, other, jdim): - """Set idim dimension of self to variable other's jdim'th + def copydimension(self, idim, other, jdim): + """Set idim dimension of self to variable other's jdim'th This is for old cu compatibility. Use copyAxis for new code. """ if not isinstance(other, AbstractVariable): - raise CDMSError,"copydimension, other not a variable." + raise CDMSError("copydimension, other not a variable.") a = other.getAxis(jdim) self.copyAxis(idim, a) def setdimattribute(self, dim, field, value): "Set the attribute named field from the dim'th dimension." if dim < 0 or dim >= self.rank(): - raise CDMSError, "setdimattribute, dim out of bounds." + raise CDMSError("setdimattribute, dim out of bounds.") d = self.getAxis(dim) if field == "name": - if not type(value) == types.StringType: - raise CDMSError, "setdimattribute: name not a string" + if not isinstance(value, types.StringType): + raise CDMSError("setdimattribute: name not a string") d.id = value - + elif field == "values": # note -- invalidates grid, may break old code. a = createAxis(numpy.ma.filled(value[:])) @@ -493,29 +492,28 @@ def setdimattribute(self, dim, field, value): self.setAxis(dim, a) elif field == "units": - if not type(value) == types.StringType: - raise CDMSError, "setdimattribute: units not a string" + if not isinstance(value, types.StringType): + raise CDMSError("setdimattribute: units not a string") d.units = value elif field == "weights": # Well, you can't really do this without modifying the grid - raise CDMSError, "setdimattribute weights not implemented." + raise CDMSError("setdimattribute weights not implemented.") elif field == "bounds": if value is None: - d.setBounds(None) + d.setBounds(None) else: - b = numpy.ma.filled(value) - if numpy.ma.rank(b) == 2: - d.setBounds(b) - elif numpy.ma.rank(b) == 1: - b1 = numpy.zeros((len(b)-1,2), b.dtype.char) - b1[:,0] = b[:-1] - b1[:,1] = b[1:] - d.setBounds(b1) - else: - raise CDMSError, \ - "setdimattribute, bounds improper shape: " + b.shape + b = numpy.ma.filled(value) + if numpy.ma.rank(b) == 2: + d.setBounds(b) + elif numpy.ma.rank(b) == 1: + b1 = numpy.zeros((len(b) - 1, 2), b.dtype.char) + b1[:, 0] = b[:-1] + b1[:, 1] = b[1:] + d.setBounds(b1) + else: + raise CDMSError("setdimattribute, bounds improper shape: " + b.shape) else: setattr(d, field, value) @@ -526,63 +524,62 @@ def clone(self, copyData=1): result = createVariable(self, copy=copyData) return result - def dumps(self,*args,**kargs): - ## Probably need something for curv/gen grids + def dumps(self, *args, **kargs): + # Probably need something for curv/gen grids """ Dumps Variable to a jason object, args are passed directly to json.dump""" - J={} - for k,v in self.attributes.iteritems(): - if k=="autoApiInfo": + J = {} + for k, v in self.attributes.iteritems(): + if k == "autoApiInfo": continue - J[k]=v - J['id']=self.id - axes=[] + J[k] = v + J['id'] = self.id + axes = [] for a in self.getAxisList(): - ax={} - for A,v in a.attributes.iteritems(): - ax[A]=v - ax['id']=a.id - ax["_values"]=a[:].tolist() - ax["_dtype"]=a[:].dtype.char + ax = {} + for A, v in a.attributes.iteritems(): + ax[A] = v + ax['id'] = a.id + ax["_values"] = a[:].tolist() + ax["_dtype"] = a[:].dtype.char axes.append(ax) - J["_axes"]=axes - J["_values"]=self[:].filled(self.fill_value).tolist() - J["_fill_value"]=float(self.fill_value) - J["_dtype"]=self.typecode() - J["_grid"]=None #self.getGrid() - return json.dumps(J,*args,**kargs) + J["_axes"] = axes + J["_values"] = self[:].filled(self.fill_value).tolist() + J["_fill_value"] = float(self.fill_value) + J["_dtype"] = self.typecode() + J["_grid"] = None # self.getGrid() + return json.dumps(J, *args, **kargs) def isEncoded(self): "Transient variables are not encoded" return 0 - def __len__ (self): + def __len__(self): "Length of first dimension" - if self.rank()>0: - (axis,start,length,true_length) = self.getDomain()[0] + if self.rank() > 0: + (axis, start, length, true_length) = self.getDomain()[0] else: length = 0 return length - def __str__ (self): + def __str__(self): return numpy.ma.MaskedArray.__str__(self) - def __repr__ (self): + def __repr__(self): return self.id + '\n' + numpy.ma.MaskedArray.__repr__(self) + '\n' def set_fill_value(self, value): "Set missing value attribute and fill value" AbstractVariable.setMissing(self, value) - #self.__dict__['_fill_value'] = self.missing_value - ## Fix submitted by Ghislain Picard, this was broken with numpy 1.5 - numpy.ma.MaskedArray.set_fill_value(self,value) + # Fix submitted by Ghislain Picard, this was broken with numpy 1.5 + numpy.ma.MaskedArray.set_fill_value(self, value) - def setMissing (self, value): + def setMissing(self, value): "Set missing value attribute and fill value" self.set_fill_value(value) # For aggregation server interface. Use clone to make a true copy. def copy(self): - return self.filled() + return self.__copy__() def setTileIndex(self, index): """ @@ -597,7 +594,7 @@ def getTileIndex(self): """ return self.tileIndex - def toVisit(self, filename, format='Vs', sphereRadius=1.0, + def toVisit(self, filename, format='Vs', sphereRadius=1.0, maxElev=0.1): """ Save data to file for postprocessing by the VisIt visualization tool @@ -613,14 +610,14 @@ def toVisit(self, filename, format='Vs', sphereRadius=1.0, # required by mvVsWriter import tables except: - # fall back + # fall back format = 'VTK' def generateTimeFileName(filename, tIndex, tIndexMax, suffix): - ndigits = len('%d'%tIndexMax) - itdigits = len('%d'%tIndex) - tiStr = '0'*(ndigits-itdigits) + ('%d'%tIndex) - return re.sub(r'\.' + suffix, '_%s.%s' % (tiStr, suffix), + ndigits = len('%d' % tIndexMax) + itdigits = len('%d' % tIndex) + tiStr = '0' * (ndigits - itdigits) + ('%d' % tIndex) + return re.sub(r'\.' + suffix, '_%s.%s' % (tiStr, suffix), filename) # determine whether data are time dependent @@ -634,46 +631,46 @@ def generateTimeFileName(filename, tIndex, tIndexMax, suffix): counter += 1 if axis == 'time': timeIndex = counter - - if timeAxis == None or timeIndex == -1: + + if timeAxis is None or timeIndex == -1: # static data if format == 'VTK': vw = mvVTKSGWriter.VTKSGWriter(self, maxElev) - if filename.find('.vtk') == -1: + if filename.find('.vtk') == -1: filename += '.vtk' vw.write(filename) else: vw = mvVsWriter.VsWriter(self, maxElev) - if filename.find('.vsh5') == -1: + if filename.find('.vsh5') == -1: filename += '.vsh5' vw.write(filename) else: # time dependent data tIndexMax = len(timeAxis) for tIndex in range(tIndexMax): - sliceOp = 'self[' + (':,'*timeIndex) + ('%d,'%tIndex) + '...]' + sliceOp = 'self[' + (':,' * timeIndex) + ('%d,' % tIndex) + '...]' var = eval(sliceOp) if format == 'VTK': if filename.find('.vtk') == -1: filename += '.vtk' - tFilename = generateTimeFileName(filename, + tFilename = generateTimeFileName(filename, tIndex, tIndexMax, 'vtk') vw = mvVTKSGWriter.VTKSGWriter(var, maxElev) vw.write(tFilename) else: if filename.find('.h5') == -1: filename += '.h5' - tFilename = generateTimeFileName(filename, + tFilename = generateTimeFileName(filename, tIndex, tIndexMax, 'h5') vw = mvVsWriter.VsWriter(var, maxElev) vw.write(tFilename) - - # Following are distributed array methods, they require mpi4py + + # Following are distributed array methods, they require mpi4py # to be installed def setMPIComm(self, comm): """ - Set the MPI communicator. This is a no-op if MPI + Set the MPI communicator. This is a no-op if MPI is not available. """ if HAVE_MPI: @@ -700,7 +697,7 @@ def getMPISize(self): def exposeHalo(self, ghostWidth=1): """ Expose the halo to other processors. The halo is the region - within the local MPI data domain that is accessible to other + within the local MPI data domain that is accessible to other processors. The halo encompasses the edge of the data region and has thickness ghostWidth. @@ -717,9 +714,8 @@ def exposeHalo(self, ghostWidth=1): # given direction, a 1 represents a layer of # thickness ghostWidth on the high index side, # -1 on the low index side. - winId = tuple( [0 for i in range(dim) ] \ - + [drect] + \ - [0 for i in range(dim+1, ndims) ] ) + winId = tuple([0 for i in range(dim)] + [drect] + + [0 for i in range(dim + 1, ndims)]) slce = slice(0, ghostWidth) if drect == 1: @@ -728,19 +724,19 @@ def exposeHalo(self, ghostWidth=1): slab = self.__getSlab(dim, slce) # create the MPI window - dataSrc = numpy.zeros(self[slab].shape, self.dtype) - dataDst = numpy.zeros(self[slab].shape, self.dtype) + dataSrc = numpy.zeros(self[slab].shape, self.dtype) + dataDst = numpy.zeros(self[slab].shape, self.dtype) self.__mpiWindows[winId] = { 'slab': slab, 'dataSrc': dataSrc, 'dataDst': dataDst, 'window': MPI.Win.Create(dataSrc, comm=self.__mpiComm), - } - + } + def getHaloEllipsis(self, side): """ - Get the ellipsis for a given halo side. - + Get the ellipsis for a given halo side. + side - a tuple of zeros and one +1 or -1. To access the "north" side for instance, set side=(1, 0), (-1, 0) to access the south side, (0, 1) the east @@ -748,7 +744,7 @@ def getHaloEllipsis(self, side): Return none if halo was not exposed (see exposeHalo) """ - if HAVE_MPI and self.__mpiWindows.has_key(side): + if HAVE_MPI and side in self.__mpiWindows: return self.__mpiWindows[side]['slab'] else: return None @@ -756,20 +752,20 @@ def getHaloEllipsis(self, side): def fetchHaloData(self, pe, side): """ Fetch the halo data from another processor. The halo side - is a subdomain of the halo that is exposed to other + is a subdomain of the halo that is exposed to other processors. It is an error to call this method when MPI is not enabled. This is a collective method (must be called by all processes), which involves synchronization of data among all processors. pe - processor owning the halo data. This is a no - operation when pe is None. + operation when pe is None. side - a tuple of zeros and one +1 or -1. To access the "north" side for instance, set side=(1, 0), (-1, 0) to access the south side, (0, 1) the east - side, etc. + side, etc. - Note: collective, all procs must invoke this method. If some + Note: collective, all procs must invoke this method. If some processors should not fetch then pass None for pe. """ if HAVE_MPI: @@ -782,37 +778,36 @@ def fetchHaloData(self, pe, side): dataSrc[...] = self[slab] win = iw['window'] - win.Fence() # get the data ready + win.Fence() # get the data ready if pe is not None: - win.Get( [dataDst, self.__mpiType], pe ) - win.Fence() # make sure the communication completed + win.Get([dataDst, self.__mpiType], pe) + win.Fence() # make sure the communication completed return dataDst else: - raise CDMSError, 'Must have MPI to invoke fetchHaloData' + raise CDMSError('Must have MPI to invoke fetchHaloData') def freeHalo(self): """ - Free the MPI windows attached to the halo. This must be + Free the MPI windows attached to the halo. This must be called before MPI_Finalize. """ for iw in self.__mpiWindows: - self.__mpiWindows[iw]['window'].Free() + self.__mpiWindows[iw]['window'].Free() def __getSlab(self, dim, slce): """ Get slab. A slab is a multi-dimensional slice extending in all directions except along dim where slce applies - + dim - dimension (0=first index, 1=2nd index...) slce - python slice object along dimension dim - + return slab """ ndims = len(self.shape) - - slab = [ slice(0, None) for i in range(dim) ] \ - + [slce] + \ - [ slice(0, None) for i in range(dim+1, ndims) ] + + slab = [slice(0, None) for i in range(dim)] + [slce] + \ + [slice(0, None) for i in range(dim + 1, ndims)] return tuple(slab) def __getMPIType(self): @@ -836,50 +831,54 @@ def __getMPIType(self): elif dtyp == numpy.int8: typ = MPI.INT8_T else: - return None + return None else: return typ -## PropertiedClasses.set_property(TransientVariable, 'shape', -## nowrite=1, nodelete=1) +# PropertiedClasses.set_property(TransientVariable, 'shape', +# nowrite=1, nodelete=1) + -def createVariable(*args,**kargs): - if kargs.get("fromJSON",False): +def createVariable(*args, **kargs): + if kargs.get("fromJSON", False): return fromJSON(*args) else: - return TransientVariable(*args,**kargs) + return TransientVariable(*args, **kargs) -def isVariable (s): + +def isVariable(s): "Is s a variable?" return isinstance(s, AbstractVariable) + def asVariable(s, writeable=1): - """Returns s if s is a Variable; if writeable is 1, return - s if s is a TransientVariable. If s is not a variable of + """Returns s if s is a Variable; if writeable is 1, return + s if s is a TransientVariable. If s is not a variable of the desired type, attempt to make it so and return that. If we fail raise CDMSError """ target_class = AbstractVariable - if writeable: target_class = TransientVariable + if writeable: + target_class = TransientVariable if isinstance(s, target_class): return s elif isinstance(s, AbstractVariable): return s.subSlice() - + try: result = createVariable(s) except CDMSError: - result = None - + result = None + # if result.dtype.char == numpy.ma.PyObject: if issubclass(result.dtype.type, numpy.object_): result = None if result is None: - raise CDMSError, "asVariable could not make a Variable from the input." + raise CDMSError("asVariable could not make a Variable from the input.") return result if __name__ == '__main__': - for s in [(20,), (4,5)]: + for s in [(20,), (4, 5)]: x = numpy.arange(20) x.shape = s t = createVariable(x) @@ -888,21 +887,21 @@ def asVariable(s, writeable=1): assert numpy.ma.allclose(x, t) assert t.dtype.char == numpy.int assert numpy.ma.size(t) == numpy.ma.size(x) - assert numpy.ma.size(t,0) == len(t) - assert numpy.ma.allclose(t.getAxis(0)[:], numpy.ma.arange(numpy.ma.size(t,0))) + assert numpy.ma.size(t, 0) == len(t) + assert numpy.ma.allclose(t.getAxis(0)[:], numpy.ma.arange(numpy.ma.size(t, 0))) t.missing_value = -99 assert t.missing_value == -99 assert t.fill_value == -99 - t = createVariable(numpy.ma.arange(5), mask=[0,0,0,1,0]) - t.set_fill_value (1000) + t = createVariable(numpy.ma.arange(5), mask=[0, 0, 0, 1, 0]) + t.set_fill_value(1000) assert t.fill_value == 1000 assert t.missing_value == 1000 t.missing_value = -99 assert t[2] == 2 t[3] = numpy.ma.masked assert t[3] is numpy.ma.masked - f = createVariable(numpy.ma.arange(5, typecode=numpy.float32), mask=[0,0,0,1,0]) - f2 = createVariable(numpy.ma.arange(5, typecode=numpy.float32), mask=[0,0,0,1,0]) + f = createVariable(numpy.ma.arange(5, typecode=numpy.float32), mask=[0, 0, 0, 1, 0]) + f2 = createVariable(numpy.ma.arange(5, typecode=numpy.float32), mask=[0, 0, 0, 1, 0]) f[3] = numpy.ma.masked assert f[3] is numpy.ma.masked assert numpy.ma.allclose(2.0, f[2]) @@ -912,6 +911,5 @@ def asVariable(s, writeable=1): assert t.getdimattribute(0, 'name') == 'fudge' f2b = f2.getdimattribute(0, 'bounds') t.setdimattribute(0, 'bounds', f2b) - assert numpy.ma.allclose(f.getdimattribute(0,'bounds'), f2.getdimattribute(0,'bounds')) + assert numpy.ma.allclose(f.getdimattribute(0, 'bounds'), f2.getdimattribute(0, 'bounds')) print "Transient Variable test passed ok." -