diff --git a/Evaluate/evaluate.py b/Evaluate/evaluate.py index 42794b65..6a45ac81 100644 --- a/Evaluate/evaluate.py +++ b/Evaluate/evaluate.py @@ -36,7 +36,7 @@ from scipy.stats import scoreatpercentile as percentile from datetime import datetime -import interpolateTracks +import Evaluate.interpolateTracks as interpolateTracks from Utilities.files import flConfigFile, flStartLog from Utilities.config import ConfigParser from Utilities.loadData import loadTrackFile @@ -811,6 +811,13 @@ def historic(self): format(self.historicTrackFile)) return False else: + lon = [] + lat = [] + + for t in tracks: + #if t.inRegion(self.gridLimit): + lon = np.append(lon, t.Longitude) + lat = np.append(lat, t.Latitude) self.hist, x, y = self.calc2DHistogram(lon, lat) return True @@ -832,6 +839,13 @@ def synthetic(self): format(trackFile)) return False else: + lon = [] + lat = [] + + for t in tracks: + #if t.inRegion(self.gridLimit): + lon = np.append(lon, t.Longitude) + lat = np.append(lat, t.Latitude) self.synHist[n, :, :], x, y = self.calc2DHistogram(lon, lat) return True @@ -1048,6 +1062,8 @@ def findCrossings(self, tracks): """ Given a series of track points and a longitude, calculate if the tracks intersect that line of longitude + + :param tracks: list of `Track` objects. """ h = np.zeros((len(self.gateLats) - 1, len(self.gateLons))) @@ -1102,7 +1118,7 @@ def historic(self): self.timeStep) self.lonCrossingHist, self.lonCrossingEWHist, \ - self.lonCrossingWEHist = self.findCrossings(i, lon, lat) + self.lonCrossingWEHist = self.findCrossings(tracks) return @@ -1124,16 +1140,15 @@ def synthetic(self): return False else: self.lonCrossingSyn[n, :], self.lonCrossingSynEW[n, :], \ - self.lonCrossingSynWE[ - n, :] = self.findCrossings(i, lon, lat) + self.lonCrossingSynWE[n, :] = self.findCrossings(tracks) return True def synStats(self): """Calculate statistics of synthetic event sets""" - log.debug( - "Calculating statistics for longitude crossings of synthetic events") + log.debug(("Calculating statistics for longitude " + "crossings of synthetic events")) if not hasattr(self, 'lonCrossingSyn'): log.critical("Synthetic event sets have not been processed!") log.critical("Cannot calculate statistics") diff --git a/Evaluate/genesisDensity.py b/Evaluate/genesisDensity.py index 826d1d51..7cf32238 100755 --- a/Evaluate/genesisDensity.py +++ b/Evaluate/genesisDensity.py @@ -29,16 +29,10 @@ from Utilities.parallel import attemptParallel, disableOnWorkers from Utilities import pathLocator -import Utilities.Intersections as Int - -from shapely.geometry import Point, LineString, Polygon -from statsmodels.nonparametric.kernel_density import KDEMultivariate - from PlotInterface.maps import FilledContourMapFigure, saveFigure, levels -from PlotInterface.tracks import TrackMapFigure -log = logging.getLogger(__name__) -log.addHandler(logging.NullHandler()) +LOG = logging.getLogger(__name__) +LOG.addHandler(logging.NullHandler()) def loadTracks(trackfile): """ @@ -69,10 +63,17 @@ def loadTracksFromFiles(trackfiles): yield track def loadTracksFromPath(path): + """ + Load a collection of track files from a given path. + + :param str path: Path to load track files from. + + :returns: iterator of :class:`Track` objects. + """ files = os.listdir(path) trackfiles = [pjoin(path, f) for f in files if f.startswith('tracks')] msg = 'Processing %d track files in %s' % (len(trackfiles), path) - log.info(msg) + LOG.info(msg) return loadTracksFromFiles(sorted(trackfiles)) class GenesisDensity(object): @@ -130,7 +131,7 @@ def calculate(self, tracks): histogram, x, y = np.histogram2d(lon, lat, [self.lon_range, self.lat_range], - normed=False) + normed=False) return histogram def calculatePDF(self, tracks): @@ -152,7 +153,7 @@ def calculatePDF(self, tracks): lat = np.append(lat, t.Latitude) xy = np.vstack([self.X.ravel(), self.Y.ravel()]) - data = np.array([[lon],[lat]]) + data = np.array([[lon], [lat]]) kde = KDEMultivariate(data, bw='cv_ml', var_type='cc') pdf = kde.pdf(data_predict=xy) @@ -165,12 +166,12 @@ def _calculate(self, tracks): :param tracks: Collection of :class:`Track` objects. """ - log.debug("Calculating PDF for set of {0:d} tracks".format(len(tracks))) + LOG.debug("Calculating PDF for set of {0:d} tracks".format(len(tracks))) hist = ma.zeros((len(self.lon_range) - 1, len(self.lat_range) - 1)) - xy= np.vstack([self.X.ravel(), self.Y.ravel()]) + xy = np.vstack([self.X.ravel(), self.Y.ravel()]) x = [] y = [] @@ -188,9 +189,9 @@ def _calculate(self, tracks): xx = np.array(x) yy = np.array(y) ii = np.where((xx >= self.gridLimit['xMin']) & - (xx <= self.gridLimit['xMax']) & + (xx <= self.gridLimit['xMax']) & (yy >= self.gridLimit['yMin']) & - (yy <= self.gridLimit['yMax'])) + (yy <= self.gridLimit['yMax'])) values = np.vstack([xx[ii], yy[ii]]) kernel = KDEMultivariate(values, bw='cv_ml', var_type='cc') @@ -215,7 +216,7 @@ def calculateMeans(self): @disableOnWorkers def historic(self): """Load historic data and calculate histogram""" - log.info("Processing historic track records") + LOG.info("Processing historic track records") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') @@ -228,7 +229,7 @@ def historic(self): tracks = loadTrackFile(self.configFile, inputFile, source) except (TypeError, IOError, ValueError): - log.critical("Cannot load historical track file: {0}".\ + LOG.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: @@ -238,7 +239,7 @@ def historic(self): startYr = min(startYr, min(t.Year)) endYr = max(endYr, max(t.Year)) numYears = endYr - startYr - log.info("Range of years: %d - %d" % (startYr, endYr)) + LOG.info("Range of years: %d - %d" % (startYr, endYr)) self.hist = self._calculate(tracks) #self.hist = self._calculate(tracks) / numYears @@ -260,12 +261,12 @@ def synthetic(self): n = 0 for d in range(1, pp.size()): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w, len(trackfiles))) w += 1 terminated = 0 - while (terminated < pp.size() - 1): + while terminated < pp.size() - 1: results, status = pp.receive(pp.any_source, tag=result_tag, return_status=True) self.synHist[n, :, :] = results @@ -274,7 +275,7 @@ def synthetic(self): d = status.source if w < len(trackfiles): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w, len(trackfiles))) w += 1 else: @@ -284,19 +285,19 @@ def synthetic(self): self.calculateMeans() elif (pp.size() > 1) and (pp.rank() != 0): - while(True): + while True: trackfile = pp.receive(source=0, tag=work_tag) if trackfile is None: break - log.debug("Processing %s" % (trackfile)) + LOG.debug("Processing %s", trackfile) tracks = loadTracks(trackfile) results = self._calculate(tracks) #/ self.synNumYears pp.send(results, destination=0, tag=result_tag) elif (pp.size() == 1) and (pp.rank() == 0): for n, trackfile in enumerate(trackfiles): - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(n + 1, len(trackfiles))) tracks = loadTracks(trackfile) self.synHist[n, :, :] = self._calculate(tracks) #/ self.synNumYears @@ -309,12 +310,12 @@ def save(self): # Simple sanity check (should also include the synthetic data): if not hasattr(self, 'hist'): - log.critical("No historical data available!") - log.critical(("Check that data has been processed " + LOG.critical("No historical data available!") + LOG.critical(("Check that data has been processed " "before trying to save data")) return - log.info('Saving genesis density data to {0}'.format(dataFile)) + LOG.info('Saving genesis density data to {0}'.format(dataFile)) dimensions = { 0: { 'name': 'lat', diff --git a/Evaluate/landfallRates.py b/Evaluate/landfallRates.py index cb6c8632..1d3e7c95 100644 --- a/Evaluate/landfallRates.py +++ b/Evaluate/landfallRates.py @@ -30,8 +30,8 @@ from PlotInterface.curves import RangeCompareCurve, saveFigure -log = logging.getLogger(__name__) -log.addHandler(logging.NullHandler()) +LOG = logging.getLogger(__name__) +LOG.addHandler(logging.NullHandler()) def loadTracks(trackfile): """ @@ -70,8 +70,8 @@ def __init__(self, configFile): try: gateFile = config.get('Input', 'CoastlineGates') except NoOptionError: - log.exception(("No coastline gate file specified " - "in configuration file")) + LOG.exception(("No coastline gate file specified " + "in configuration file")) raise gateData = np.genfromtxt(gateFile, delimiter=',') @@ -174,7 +174,7 @@ def setOutput(self, ntracks): def historic(self): """Calculate historical rates of landfall""" - log.info("Processing landfall rates of historical tracks") + LOG.info("Processing landfall rates of historical tracks") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') @@ -187,7 +187,7 @@ def historic(self): try: tracks = loadTrackFile(self.configFile, inputFile, source) except (TypeError, IOError, ValueError): - log.critical("Cannot load historical track file: {0}".\ + LOG.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: @@ -198,7 +198,7 @@ def historic(self): def synthetic(self): """Load synthetic data and calculate histogram""" - log.info("Processing landfall rates of synthetic events") + LOG.info("Processing landfall rates of synthetic events") work_tag = 0 result_tag = 1 @@ -214,12 +214,12 @@ def synthetic(self): n = 0 for d in range(1, pp.size()): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w + 1, len(trackfiles))) w += 1 terminated = 0 - while (terminated < pp.size() - 1): + while terminated < pp.size() - 1: results, status = pp.receive(pp.any_source, tag=result_tag, return_status=True) @@ -230,7 +230,7 @@ def synthetic(self): if w < len(trackfiles): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w + 1, len(trackfiles))) w += 1 else: @@ -240,12 +240,12 @@ def synthetic(self): self.calculateStats() elif (pp.size() > 1) and (pp.rank() != 0): - while(True): + while True: trackfile = pp.receive(source=0, tag=work_tag) if trackfile is None: break - log.debug("Processing %s" % (trackfile)) + LOG.debug("Processing %s", trackfile) tracks = loadTracks(trackfile) results = self.processTracks(tracks) pp.send(results, destination=0, tag=result_tag) @@ -253,7 +253,7 @@ def synthetic(self): elif pp.size() == 1 and pp.rank() == 0: # Assumed no Pypar - helps avoid the need to extend DummyPypar() for n, trackfile in enumerate(sorted(trackfiles)): - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(n + 1, len(trackfiles))) tracks = loadTracks(trackfile) results = self.processTracks(tracks) diff --git a/Evaluate/longitudeCrossing.py b/Evaluate/longitudeCrossing.py index 24aa5583..fac040e4 100644 --- a/Evaluate/longitudeCrossing.py +++ b/Evaluate/longitudeCrossing.py @@ -24,7 +24,7 @@ from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -import interpolateTracks +import Evaluate.interpolateTracks as interpolateTracks from Utilities.config import ConfigParser from Utilities.track import ncReadTrackData @@ -34,8 +34,8 @@ from Utilities import pathLocator import Utilities.Intersections as Int -log = logging.getLogger(__name__) -log.addHandler(logging.NullHandler()) +LOG = logging.getLogger(__name__) +LOG.addHandler(logging.NullHandler()) def loadTracks(trackfile): """ @@ -90,7 +90,7 @@ def __init__(self, configFile): # Add configuration settings to global attributes: self.gatts = {'history': "Longitude crossing rates for TCRM simulation", - 'version': flProgramVersion() } + 'version': flProgramVersion()} for section in config.sections(): for option in config.options(section): @@ -108,7 +108,7 @@ def findCrossings(self, tracks): :return: h, ewh, weh, histograms for each line of longitude, recording the rate of crossings """ - log.debug("Processing %d tracks" % (len(tracks))) + LOG.debug("Processing %d tracks" % (len(tracks))) h = np.zeros((len(self.gateLats) - 1, len(self.gateLons))) ewh = np.zeros((len(self.gateLats) - 1, len(self.gateLons))) weh = np.zeros((len(self.gateLats) - 1, len(self.gateLons))) @@ -177,7 +177,7 @@ def calcStats(self, lonCrossHist, lonCrossEW, lonCrossWE): def historic(self): """Calculate historical rates of longitude crossing""" - log.info("Processing historical tracks for longitude crossings") + LOG.info("Processing historical tracks for longitude crossings") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') @@ -195,7 +195,7 @@ def historic(self): timestep, interpolation_type='linear') except (TypeError, IOError, ValueError): - log.critical("Cannot load historical track file: {0}".\ + LOG.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: @@ -207,7 +207,7 @@ def historic(self): def synthetic(self): """Calculate synthetic rates of longitude crossing""" - log.info("Processing synthetic rates of longitude crossing") + LOG.info("Processing synthetic rates of longitude crossing") work_tag = 0 result_tag = 1 @@ -231,12 +231,12 @@ def synthetic(self): n = 0 for d in range(1, pp.size()): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w + 1, len(trackfiles))) w += 1 terminated = 0 - while (terminated < pp.size() - 1): + while terminated < pp.size() - 1: results, status = pp.receive(pp.any_source, tag=result_tag, return_status=True) @@ -248,7 +248,7 @@ def synthetic(self): if w < len(trackfiles): pp.send(trackfiles[w], destination=d, tag=work_tag) - log.debug("Processing track file {0:d} of {1:d}".\ + LOG.debug("Processing track file {0:d} of {1:d}".\ format(w + 1, len(trackfiles))) w += 1 else: @@ -258,12 +258,12 @@ def synthetic(self): self.calcStats(lonCrossHist, lonCrossEW, lonCrossWE) elif (pp.size() > 1) and (pp.rank() != 0): - while(True): + while True: trackfile = pp.receive(source=0, tag=work_tag) if trackfile is None: break - log.debug("Processing %s" % (trackfile)) + LOG.debug("Processing %s", trackfile) tracks = loadTracks(trackfile) lonCross, lonCrossEW, lonCrossWE = self.findCrossings(tracks) results = (lonCross, lonCrossEW, lonCrossWE) @@ -283,7 +283,7 @@ def save(self): """Save data to file for archival and/or further processing""" dataFile = pjoin(self.dataPath, 'lonCrossings.nc') - log.debug("Saving longitude crossing data to %s" % dataFile) + LOG.debug("Saving longitude crossing data to %s" % dataFile) dimensions = { 0: { @@ -380,7 +380,7 @@ def save(self): 'dtype': 'f', 'atts': { 'long_name': ('Upper percentile synthetic longitudinal ', - 'crossing rate' ), + 'crossing rate'), 'units': 'number of crossings per year', 'percentile': 90 } @@ -422,28 +422,28 @@ def save(self): } }, 10: { - 'name': 'syn_lower_ew', - 'dims': ('lat', 'lon'), - 'values': self.synCrossEWLower, - 'dtype': 'f', - 'atts': { + 'name': 'syn_lower_ew', + 'dims': ('lat', 'lon'), + 'values': self.synCrossEWLower, + 'dtype': 'f', + 'atts': { 'long_name':('Lower percentile synthetic longitudinal ' - 'crossing rate - east-west crossings'), + 'crossing rate - east-west crossings'), 'units':'number of crossings per year', 'percentile': 5 } }, 11: { - 'name': 'syn_lower_we', - 'dims': ('lat', 'lon'), - 'values': self.synCrossWELower, - 'dtype': 'f', - 'atts': { + 'name': 'syn_lower_we', + 'dims': ('lat', 'lon'), + 'values': self.synCrossWELower, + 'dtype': 'f', + 'atts': { 'long_name': ('Lower percentile synthetic longitudinal ' - 'crossing rate - west-east crossings'), + 'crossing rate - west-east crossings'), 'units': 'number of crossings per year', 'percentile': 5 - } + } } } @@ -454,7 +454,7 @@ def save(self): @disableOnWorkers def plotCrossingRates(self): """Plot longitude crossing rates""" - log.debug("Plotting longitude crossing rates") + LOG.debug("Plotting longitude crossing rates") fig = Figure() ax1 = fig.add_subplot(2, 1, 1) for i in range(len(self.gateLons)): @@ -462,7 +462,7 @@ def plotCrossingRates(self): self.gateLats[:-1], color='r', lw=2) ax1.plot(2.* self.gateLons[i] - 100. * self.synCrossEW[:, i], - self.gateLats[:-1],color='k',lw=2) + self.gateLats[:-1], color='k', lw=2) x1 = 2.* self.gateLons[i] - 100. * self.synCrossEWUpper[:, i] x2 = 2.* self.gateLons[i] - 100. * self.synCrossEWLower[:, i] @@ -503,7 +503,7 @@ def plotCrossingRates(self): fig.tight_layout() canvas = FigureCanvas(fig) - canvas.print_figure(pjoin(self.plotPath,'lon_crossing_syn.png')) + canvas.print_figure(pjoin(self.plotPath, 'lon_crossing_syn.png')) return diff --git a/Evaluate/pressureDistribution.py b/Evaluate/pressureDistribution.py index d3ed18bf..4f18215c 100644 --- a/Evaluate/pressureDistribution.py +++ b/Evaluate/pressureDistribution.py @@ -265,7 +265,7 @@ def synthetic(self): w += 1 terminated = 0 - while (terminated < pp.size() - 1): + while terminated < pp.size() - 1: results, status = pp.receive(pp.any_source, tag=result_tag, return_status=True) @@ -292,7 +292,7 @@ def synthetic(self): self.calculateMeans(synMean, synMin, synMed, synMax, synMinCPDist) elif (pp.size() > 1) and (pp.rank() != 0): - while(True): + while True: trackfile = pp.receive(source=0, tag=work_tag) if trackfile is None: break diff --git a/StatInterface/GenerateDistributions.py b/StatInterface/GenerateDistributions.py index 56a16013..1b3d328f 100644 --- a/StatInterface/GenerateDistributions.py +++ b/StatInterface/GenerateDistributions.py @@ -134,7 +134,8 @@ def allDistributions(self, lonLat, parameterList, parameterName=None, """ if parameterName: - self.logger.debug("Running allDistributions for %s"%parameterName) + self.logger.debug("Running allDistributions for %s", + parameterName) else: self.logger.debug("Running allDistributions") @@ -145,7 +146,7 @@ def allDistributions(self, lonLat, parameterList, parameterName=None, self.lonLat = lonLat if isinstance(parameterList, str): - self.logger.debug("Loading parameter data from file: %s" % + self.logger.debug("Loading parameter data from file: %s", parameterList) self.pList = np.array(flLoadFile(parameterList)) else: @@ -160,7 +161,7 @@ def allDistributions(self, lonLat, parameterList, parameterName=None, "cells into files")) for cellNum in xrange(0, maxCellNum + 1): - self.logger.debug("Processing cell number %i"%cellNum) + self.logger.debug("Processing cell number %i", cellNum) # Generate cyclone parameter data for the cell number self.extractParameter(cellNum) @@ -173,8 +174,8 @@ def allDistributions(self, lonLat, parameterList, parameterName=None, if plotParam: self._plotParameter(cellNum, kdeStep) self.logger.debug(('size of parameter array = %d: ' - 'size of cdf array = %d') % - (self.parameter.size, cdf.size)) + 'size of cdf array = %d'), + self.parameter.size, cdf.size) cellNumlist = [] for i in range(len(cdf)): @@ -183,7 +184,7 @@ def allDistributions(self, lonLat, parameterList, parameterName=None, results = np.transpose(np.array([cellNumlist, cdf[:, 0], cdf[:, 2]])) else: - self.logger.debug('size of results = %s'%str(results.size)) + self.logger.debug('size of results = %s', str(results.size)) results = np.concatenate((results, np.transpose(np.array([cellNumlist, cdf[:, 0], @@ -270,7 +271,7 @@ def extractParameter(self, cellNum): wLon, eLon, nLat, sLat = self._expandCell(lon, lat, wLon, eLon, nLat, sLat) if ((wLon == wLon_last) & (eLon == eLon_last) & - (nLat == nLat_last) & (sLat == sLat_last)): + (nLat == nLat_last) & (sLat == sLat_last)): errMsg = ("Insufficient grid points in selected domain to " "estimate storm statistics - please select a larger " "domain. Samples = %i / %i")%(np.size(self.parameter), @@ -296,19 +297,19 @@ def extractParameter(self, cellNum): wLon, eLon, nLat, sLat = self._expandCell(lon, lat, wLon, eLon, nLat, sLat) if ((wLon == wLon_last) & (eLon == eLon_last) & - (nLat == nLat_last) & (sLat == sLat_last)): + (nLat == nLat_last) & (sLat == sLat_last)): errMsg = ("Insufficient grid points in selected domain " "to estimate storm statistics - " "please select a larger domain.") self.logger.critical(errMsg) raise StopIteration, errMsg indij = np.where(((lat >= sLat) & (lat < nLat)) & - ((lon >= wLon) & (lon < eLon))) + ((lon >= wLon) & (lon < eLon))) parameter_ = self.pList[indij] self.parameter = stats.statRemoveNum(np.array(parameter_), self.missingValue) - self.logger.debug("Number of valid observations in cell %s : %s" % - (str(cellNum), str(np.size(self.parameter)))) + self.logger.debug("Number of valid observations in cell %s : %s", + str(cellNum), str(np.size(self.parameter))) def _plotParameter(self, cellNum, kdeStep): diff --git a/StatInterface/KDEParameters.py b/StatInterface/KDEParameters.py index 6a3337eb..035b35f6 100644 --- a/StatInterface/KDEParameters.py +++ b/StatInterface/KDEParameters.py @@ -131,8 +131,8 @@ def generateKDE(self, parameters, kdeStep, kdeParameters=None, if self.grid.size < 2: LOG.critical("Grid for CDF generation is a single value") - LOG.critical("xmin=%7.3f, xmax=%7.3f, kdeStep=%7.3f" % - (xmin, xmax, kdeStep)) + LOG.critical("xmin=%7.3f, xmax=%7.3f, kdeStep=%7.3f", + xmin, xmax, kdeStep) raise ValueError bw = KPDF.UPDFOptimumBandwidth(self.parameters) diff --git a/Utilities/convolve.py b/Utilities/convolve.py index 15e3e3d5..c6c8932e 100644 --- a/Utilities/convolve.py +++ b/Utilities/convolve.py @@ -96,7 +96,7 @@ def getKernel(d, mtype, res=25., height=5.): ii = np.where((bear >= -112.5) & (bear <= -67.5) & (rr <= 20.*height)) elif d == "SE": - ii = np.where((bear >= -67.5) & (bear <= -22.5) & + ii = np.where((bear >= -67.5) & (bear <= -22.5) & (rr <= 20.*height)) g[ii] = 1. diff --git a/Utilities/error.py b/Utilities/error.py index 57951452..86d7ce42 100644 --- a/Utilities/error.py +++ b/Utilities/error.py @@ -44,11 +44,10 @@ def errDieWithLog(message=None): tb = tb.tb_next for frame in stack: - LOG.critical("Frame %s in %s at line %s" % (frame.f_code.co_name, - frame.f_code.co_filename, - frame.f_lineno)) + LOG.critical("Frame %s in %s at line %s", frame.f_code.co_name, + frame.f_code.co_filename, frame.f_lineno) for key, value in frame.f_locals.items(): - LOG.critical("%s = %s"%(key, repr(value))) + LOG.critical("%s = %s", key, repr(value)) if message: LOG.critical(message) @@ -63,7 +62,7 @@ def __init__(self, fileName): Exception.__init__() self.fileName = fileName def __str__(self): - LOG.exception("File open error: cannot open %s"%(repr(self.fileName))) + LOG.exception("File open error: cannot open %s", repr(self.fileName)) return "File open error : cannot open %s"%(repr(self.fileName)) class ErrFileCloseError(Exception): @@ -75,7 +74,7 @@ def __init__(self, value): Exception.__init__() self.value = value def __str__(self): - LOG.exception("File close error: cannot close %s"%(repr(self.fileName))) + LOG.exception("File close error: cannot close %s", repr(self.fileName)) return "File close error: cannot close %s"%(repr(self.fileName)) class ErrNetCDFError(Exception): @@ -87,7 +86,7 @@ def __init__(self, value): Exception.__init__() self.value = value def __str__(self): - LOG.exception("Error in nctools: %s"%repr(self.value)) + LOG.exception("Error in nctools: %s", repr(self.value)) return "Error in nctools: %s"%repr(self.value) diff --git a/Utilities/metutils.py b/Utilities/metutils.py index 1ac2271b..c4481035 100644 --- a/Utilities/metutils.py +++ b/Utilities/metutils.py @@ -385,7 +385,7 @@ def convert(value, inunits, outunits): """ startValue = value - value = np.array(value) + value = np.array(value, dtype=float) if inunits == outunits: # Do nothing: return value