Skip to content
This repository has been archived by the owner. It is now read-only.
Browse files
Merge branch 'CLIMATE-917' of…
  • Loading branch information
lewismc committed Jan 17, 2018
2 parents e63c5f2 + 13c6dd6 commit 91c00655541a05b7af07e6a837e810e6212c8183
Showing 4 changed files with 27 additions and 18 deletions.
@@ -14,6 +14,11 @@
import sys
import os

# esgf is not currently available for Python 3 and will throw an
# error when building the documents.
if sys.version_info[0] >= 3:
autodoc_mock_imports = ["esgf"]

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -33,11 +38,15 @@

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.

# Note that 'sphinxcontrib.autohttp.bottle' is currently broken in Sphinx > 1.56
# Remove from the extension list if documentation fails on Sphinx hard failure.
extensions = [

# Add any paths that contain templates here, relative to this directory.
@@ -117,6 +117,7 @@ def spatial_resolution(self):
If self.lats and self.lons are from curvilinear coordinates,
the output resolutions are approximate values.
:returns: The Dataset's latitudinal and longitudinal spatial resolution
as a tuple of the form (lat_resolution, lon_resolution).
:rtype: (:class:`float`, :class:`float`)
@@ -264,7 +265,7 @@ def __init__(self, boundary_type='rectangular',
start=None, end=None):
'''Default Bounds constructor
:param boundary_type: The type of spatial subset boundary.
:type boundary_type: :mod:`string'
:type boundary_type: :mod:`string`
:param lat_min: The minimum latitude bound.
@@ -33,33 +33,31 @@ def __init__(self, *loader_opts):
Each keyword argument can be information for a dataset in dictionary
form. For example:
>>> loader_opt1 = {'loader_name': 'rcmed', 'name': 'cru',
'dataset_id': 10, 'parameter_id': 34}
>>> loader_opt2 = {'path': './data/,
'variable': 'pcp'}
>>> loader = DatasetLoader(loader_opt1, loader_opt2)
Or more conveniently if the loader configuration is defined in a
yaml file named config_file (see RCMES examples):
>>> import yaml
>>> config = yaml.load(open(config_file))
>>> obs_loader_config = config['datasets']['reference']
>>> loader = DatasetLoader(*obs_loader_config)
As shown in the first example, the dictionary for each argument should
contain a loader name and parameters specific to the particular loader.
Once the configuration is entered, the datasets may be loaded using:
>>> loader.load_datasets()
>>> obs_datasets = loader.datasets
Additionally, each dataset must have a ``loader_name`` keyword. This may
be one of the following:
* ``'local'`` - One or multiple dataset files in a local directory
* ``'local_split'`` - A single dataset split accross multiple files in a
local directory
@@ -74,6 +72,7 @@ def __init__(self, *loader_opts):
Users who wish to load datasets from loaders not described above may
define their own custom dataset loader function and incorporate it as
>>> loader.add_source_loader('my_loader_name', my_loader_func)
:param loader_opts: Dictionaries containing the each dataset loader
@@ -84,7 +83,7 @@ def __init__(self, *loader_opts):
:type loader_opts: :class:`dict`
:raises KeyError: If an invalid argument is passed to a data source
loader function.
loader function.
# dataset loader config
@@ -115,8 +114,8 @@ def add_source_loader(self, loader_name, loader_func):
:type loader_name: :mod:`string`
:param loader_func: Reference to a custom defined function. This should
return an OCW Dataset object, and have an origin which satisfies
origin['source'] == loader_name.
return an OCW Dataset object, and have an origin which satisfies
origin['source'] == loader_name.
:type loader_func: :class:`callable`
self._source_loaders[loader_name] = loader_func
@@ -402,7 +402,7 @@ def trim_dataset(dataset):
''' Trim datasets such that first and last year of data have all 12 months
:param dataset: Dataset object
:type dataset: :class:`dataset.Dataset
:type dataset: :class:`dataset.Dataset`
:returns: Slice index for trimmed dataset
@@ -653,7 +653,7 @@ def _force_unicode(s, encoding='utf-8'):
def calculate_temporal_trends(dataset):
''' Calculate temporal trends in dataset.values
:param dataset: The dataset from which time values should be extracted.
:type dataset: :class:`dataset.Dataset'
:type dataset: :class:`dataset.Dataset`
:returns: Arrays of the temporal trend and standard error
:rtype: :class:``
@@ -675,13 +675,13 @@ def calculate_temporal_trends(dataset):
def calculate_ensemble_temporal_trends(timeseries_array, number_of_samples=1000):
''' Calculate temporal trends in an ensemble of time series
:param timeseries_array: Two dimensional array. 1st index: model, 2nd index: time.
:type timeseries_array: :class:`numpy.ndarray'
:type timeseries_array: :class:`numpy.ndarray`
:param sampling: A list whose elements are one-dimensional numpy arrays
:type timeseries_array: :class:`list'
:type timeseries_array: :class:`list`
:returns: temporal trend and estimated error from bootstrapping
:rtype: :float:`float','float'
:rtype: :class:`float`, :class:`float`

nmodels, nt = timeseries_array.shape
@@ -701,13 +701,13 @@ def calculate_ensemble_temporal_trends(timeseries_array, number_of_samples=1000)
def calculate_temporal_trend_of_time_series(x,y):
''' Calculate least-square trends (a) in y = ax+b and a's standard error
:param x: time series
:type x: :class:`numpy.ndarray'
:type x: :class:`numpy.ndarray`
:param x: time series
:type x: :class:`numpy.ndarray'
:type x: :class:`numpy.ndarray`
:returns: temporal trend and standard error
:rtype: :float:`float','float'
:rtype: :class:`float`, :class:`float`
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
return slope, std_err

0 comments on commit 91c0065

Please sign in to comment.