From a0e5d0bd7cfeb05fa82b725fcd5232ac2b043971 Mon Sep 17 00:00:00 2001 From: cgoodale Date: Mon, 23 Jun 2014 12:19:47 -0700 Subject: [PATCH 1/5] Initial script creation --- examples/model_ensemble_to_rcmed.py | 185 ++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 examples/model_ensemble_to_rcmed.py diff --git a/examples/model_ensemble_to_rcmed.py b/examples/model_ensemble_to_rcmed.py new file mode 100644 index 00000000..33f12191 --- /dev/null +++ b/examples/model_ensemble_to_rcmed.py @@ -0,0 +1,185 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import urllib +from os import path + +import numpy as np + +import ocw.data_source.local as local +import ocw.data_source.rcmed as rcmed +from ocw.dataset import Bounds as Bounds +import ocw.dataset_processor as dsp +import ocw.evaluation as evaluation +import ocw.metrics as metrics +import ocw.plotter as plotter + +# File URL leader +FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/" +# This way we can easily adjust the time span of the retrievals +YEARS = 3 +# Two Local Model Files +FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc" +FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc" +# Filename for the output image/plot (without file extension) +OUTPUT_PLOT = "model_ensemble_tasmax_africa_bias_monthly" + +# Download necessary NetCDF file if not present +if path.exists(FILE_1): + pass +else: + urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1) + +if path.exists(FILE_2): + pass +else: + urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2) + + +""" Step 1: Load Local NetCDF File into OCW Dataset Objects """ +# Load local knmi model data +knmi_dataset = local.load_file(FILE_1, "tasmax") +knmi_dataset.name = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax" +print(knmi_dataset) +wrf311_dataset = local.load_file(FILE_2, "tasmax") +wrf311_dataset.name = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax" +print(wrf311_dataset) + + + +""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """ +print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp") +metadata = rcmed.get_parameters_metadata() + +cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0] + +""" The RCMED API uses the following function to query, subset and return the +raw data from the database: + +rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, + max_lon, start_time, end_time) + +The first two required params are in the cru_31 variable we defined earlier +""" +# Must cast to int since the rcmed api requires ints +dataset_id = int(cru_31['dataset_id']) +parameter_id = int(cru_31['parameter_id']) + +# The spatial_boundaries() function returns the spatial extent of the dataset +min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries() + +print("Calculating the Maximum Overlap in Time for the datasets") + +cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d") +cru_end = datetime.datetime.strptime(cru_31['end_date'], "%Y-%m-%d") +knmi_start, knmi_end = knmi_dataset.time_range() +# Grab the Max Start Time +start_time = max([cru_start, knmi_start]) +# Grab the Min End Time +end_time = min([cru_end, knmi_end]) +print("Overlap computed to be: %s to %s" % (start_time.strftime("%Y-%m-%d"), + end_time.strftime("%Y-%m-%d"))) +print("We are going to grab the first %s year(s) of data" % YEARS) +end_time = datetime.datetime(start_time.year + YEARS, start_time.month, start_time.day) +print("Final Overlap is: %s to %s" % (start_time.strftime("%Y-%m-%d"), + end_time.strftime("%Y-%m-%d"))) + + +print("Fetching data from RCMED...") +cru31_dataset = rcmed.parameter_dataset(dataset_id, + parameter_id, + min_lat, + max_lat, + min_lon, + max_lon, + start_time, + end_time) + +import sys; sys.exit() +""" Step 3: Resample Datasets so they are the same shape """ +print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,)) +print("KNMI_Dataset.values shape: (times, lats, lons) - %s" % (knmi_dataset.values.shape,)) +print("Our two datasets have a mis-match in time. We will subset on time to %s years\n" % YEARS) + +# Create a Bounds object to use for subsetting +new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time) +knmi_dataset = dsp.subset(new_bounds, knmi_dataset) + +print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,)) +print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,)) + +print("Temporally Rebinning the Datasets to a Single Timestep") +# To run FULL temporal Rebinning use a timedelta > 366 days. I used 999 in this example +knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=999)) +cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=999)) + +print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,)) +print("CRU31_Dataset.values shape: %s \n\n" % (cru31_dataset.values.shape,)) + +""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """ +# Using the bounds we will create a new set of lats and lons on 1 degree step +new_lons = np.arange(min_lon, max_lon, 0.5) +new_lats = np.arange(min_lat, max_lat, 0.5) + +# Spatially regrid datasets using the new_lats, new_lons numpy arrays +print("Spatially Regridding the KNMI_Dataset...") +knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) +print("Spatially Regridding the CRU31_Dataset...") +cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons) +print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, )) +print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, )) + +""" Step 4: Build a Metric to use for Evaluation - Bias for this example """ +# You can build your own metrics, but OCW also ships with some common metrics +print("Setting up a Bias metric to use for evaluation") +bias = metrics.Bias() + +""" Step 5: Create an Evaluation Object using Datasets and our Metric """ +# The Evaluation Class Signature is: +# Evaluation(reference, targets, metrics, subregions=None) +# Evaluation can take in multiple targets and metrics, so we need to convert +# our examples into Python lists. Evaluation will iterate over the lists +print("Making the Evaluation definition") +bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias]) +print("Executing the Evaluation using the object's run() method") +bias_evaluation.run() + +""" Step 6: Make a Plot from the Evaluation.results """ +# The Evaluation.results are a set of nested lists to support many different +# possible Evaluation scenarios. +# +# The Evaluation results docs say: +# The shape of results is (num_metrics, num_target_datasets) if no subregion +# Accessing the actual results when we have used 1 metric and 1 dataset is +# done this way: +print("Accessing the Results of the Evaluation run") +results = bias_evaluation.results[0][0] + +# From the bias output I want to make a Contour Map of the region +print("Generating a contour map using ocw.plotter.draw_contour_map()") + +lats = new_lats +lons = new_lons +fname = OUTPUT_PLOT +gridshape = (1, 1) # Using a 1 x 1 since we have a single Bias for the full time range +plot_title = "TASMAX Bias of KNMI Compared to CRU 3.1 (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) +sub_titles = ["Full Temporal Range"] + +plotter.draw_contour_map(results, lats, lons, fname, + gridshape=gridshape, ptitle=plot_title, + subtitles=sub_titles) From 4f26ccaefd0dded90b69884129d770c0f52f3627 Mon Sep 17 00:00:00 2001 From: cgoodale Date: Mon, 23 Jun 2014 13:12:08 -0700 Subject: [PATCH 2/5] Raise dataset shapes in _check_dataset_shapes - Include the mismatching shapes in the Error Message --- ocw/dataset_processor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocw/dataset_processor.py b/ocw/dataset_processor.py index 0d08699f..cd6971c5 100644 --- a/ocw/dataset_processor.py +++ b/ocw/dataset_processor.py @@ -725,7 +725,8 @@ def _check_dataset_shapes(datasets): dataset_shape = dataset.values.shape else: if dataset.values.shape != dataset_shape: - raise ValueError("Input datasets must be the same shape for an ensemble") + msg = "%s != %s" % (dataset.values.shape, dataset_shape) + raise ValueError("Input datasets must be the same shape for an ensemble :: ", msg) else: pass From f4a67429eb3027a2a3217dcceb999814525dd419 Mon Sep 17 00:00:00 2001 From: cgoodale Date: Mon, 7 Jul 2014 15:18:52 -0700 Subject: [PATCH 3/5] Incremental progress on model ensemble example * Dropped extra print statements * Adjusted the existing Evaluation object to run against multiple datasets * Change the output plotter settings to support the new datasets --- examples/model_ensemble_to_rcmed.py | 58 ++++++++++++++--------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/model_ensemble_to_rcmed.py b/examples/model_ensemble_to_rcmed.py index 33f12191..544a6380 100644 --- a/examples/model_ensemble_to_rcmed.py +++ b/examples/model_ensemble_to_rcmed.py @@ -32,7 +32,7 @@ # File URL leader FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/" # This way we can easily adjust the time span of the retrievals -YEARS = 3 +YEARS = 1 # Two Local Model Files FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc" FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc" @@ -55,10 +55,9 @@ # Load local knmi model data knmi_dataset = local.load_file(FILE_1, "tasmax") knmi_dataset.name = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax" -print(knmi_dataset) + wrf311_dataset = local.load_file(FILE_2, "tasmax") wrf311_dataset.name = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax" -print(wrf311_dataset) @@ -81,7 +80,7 @@ parameter_id = int(cru_31['parameter_id']) # The spatial_boundaries() function returns the spatial extent of the dataset -min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries() +min_lat, max_lat, min_lon, max_lon = wrf311_dataset.spatial_boundaries() print("Calculating the Maximum Overlap in Time for the datasets") @@ -99,7 +98,6 @@ print("Final Overlap is: %s to %s" % (start_time.strftime("%Y-%m-%d"), end_time.strftime("%Y-%m-%d"))) - print("Fetching data from RCMED...") cru31_dataset = rcmed.parameter_dataset(dataset_id, parameter_id, @@ -110,42 +108,44 @@ start_time, end_time) -import sys; sys.exit() """ Step 3: Resample Datasets so they are the same shape """ -print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,)) -print("KNMI_Dataset.values shape: (times, lats, lons) - %s" % (knmi_dataset.values.shape,)) -print("Our two datasets have a mis-match in time. We will subset on time to %s years\n" % YEARS) +print("Temporally Rebinning the Datasets to a Monthly Timestep") +# To run monthly temporal Rebinning use a timedelta of 30 days. +knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=30)) +wrf311_dataset = dsp.temporal_rebin(wrf311_dataset, datetime.timedelta(days=30)) +cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=30)) + +# Running Temporal Rebin early helps negate the issue of datasets being on different +# days of the month (1st vs. 15th) # Create a Bounds object to use for subsetting new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time) + +# DEBUG +print new_bounds +print knmi_dataset +print wrf311_dataset + knmi_dataset = dsp.subset(new_bounds, knmi_dataset) +wrf311_dataset = dsp.subset(new_bounds, wrf311_dataset) + -print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,)) -print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,)) -print("Temporally Rebinning the Datasets to a Single Timestep") -# To run FULL temporal Rebinning use a timedelta > 366 days. I used 999 in this example -knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=999)) -cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=999)) -print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,)) -print("CRU31_Dataset.values shape: %s \n\n" % (cru31_dataset.values.shape,)) - """ Spatially Regrid the Dataset Objects to a 1/2 degree grid """ -# Using the bounds we will create a new set of lats and lons on 1 degree step +# Using the bounds we will create a new set of lats and lons on 1/2 degree step new_lons = np.arange(min_lon, max_lon, 0.5) new_lats = np.arange(min_lat, max_lat, 0.5) # Spatially regrid datasets using the new_lats, new_lons numpy arrays -print("Spatially Regridding the KNMI_Dataset...") knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) -print("Spatially Regridding the CRU31_Dataset...") +wrf311_dataset = dsp.spatial_regrid(wrf311_dataset, new_lats, new_lons) cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons) -print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, )) -print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, )) - + +# Generate an ensemble dataset from knmi and wrf models +ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset]) + """ Step 4: Build a Metric to use for Evaluation - Bias for this example """ -# You can build your own metrics, but OCW also ships with some common metrics print("Setting up a Bias metric to use for evaluation") bias = metrics.Bias() @@ -155,7 +155,7 @@ # Evaluation can take in multiple targets and metrics, so we need to convert # our examples into Python lists. Evaluation will iterate over the lists print("Making the Evaluation definition") -bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias]) +bias_evaluation = evaluation.Evaluation(cru31_dataset, [knmi_dataset, wrf311_dataset, ensemble_dataset], [bias]) print("Executing the Evaluation using the object's run() method") bias_evaluation.run() @@ -176,9 +176,9 @@ lats = new_lats lons = new_lons fname = OUTPUT_PLOT -gridshape = (1, 1) # Using a 1 x 1 since we have a single Bias for the full time range -plot_title = "TASMAX Bias of KNMI Compared to CRU 3.1 (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) -sub_titles = ["Full Temporal Range"] +gridshape = (3, 12) # Using a 3 x 12 since we have a 1 year of monthly data for 3 models +plot_title = "TASMAX Bias of CRU 3.1 vs. KNMI, WRF311 and ENSEMBLE (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) +sub_titles = ["Monthly Time Step"] plotter.draw_contour_map(results, lats, lons, fname, gridshape=gridshape, ptitle=plot_title, From 3caaa0101460a72a55469638fa14f436cb32c4e6 Mon Sep 17 00:00:00 2001 From: cgoodale Date: Sat, 18 Oct 2014 10:12:26 -0700 Subject: [PATCH 4/5] Incremental progress * Shrink bounds to ensure clean overlap * Convert temporal rebin to Annual for this example * Still working on the final plotting routine --- examples/model_ensemble_to_rcmed.py | 47 ++++++++++++++++------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/examples/model_ensemble_to_rcmed.py b/examples/model_ensemble_to_rcmed.py index 544a6380..24ecb40b 100644 --- a/examples/model_ensemble_to_rcmed.py +++ b/examples/model_ensemble_to_rcmed.py @@ -16,6 +16,7 @@ # under the License. import datetime +import math import urllib from os import path @@ -82,6 +83,14 @@ # The spatial_boundaries() function returns the spatial extent of the dataset min_lat, max_lat, min_lon, max_lon = wrf311_dataset.spatial_boundaries() +# There is a boundry alignment issue with the datasets. To mitigate this +# we will use the math.floor() and math.ceil() functions to shrink the +# boundries slighty. +min_lat = math.ceil(min_lat) +max_lat = math.floor(max_lat) +min_lon = math.ceil(min_lon) +max_lon = math.floor(max_lon) + print("Calculating the Maximum Overlap in Time for the datasets") cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d") @@ -110,28 +119,21 @@ """ Step 3: Resample Datasets so they are the same shape """ -print("Temporally Rebinning the Datasets to a Monthly Timestep") -# To run monthly temporal Rebinning use a timedelta of 30 days. -knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=30)) -wrf311_dataset = dsp.temporal_rebin(wrf311_dataset, datetime.timedelta(days=30)) -cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=30)) +print("Temporally Rebinning the Datasets to an Annual Timestep") +# To run annual temporal Rebinning use a timedelta of 360 days. +knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=360)) +wrf311_dataset = dsp.temporal_rebin(wrf311_dataset, datetime.timedelta(days=360)) +cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=360)) # Running Temporal Rebin early helps negate the issue of datasets being on different # days of the month (1st vs. 15th) # Create a Bounds object to use for subsetting new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time) -# DEBUG -print new_bounds -print knmi_dataset -print wrf311_dataset - +# Subset our model datasets so they are the same size knmi_dataset = dsp.subset(new_bounds, knmi_dataset) wrf311_dataset = dsp.subset(new_bounds, wrf311_dataset) - - - """ Spatially Regrid the Dataset Objects to a 1/2 degree grid """ # Using the bounds we will create a new set of lats and lons on 1/2 degree step new_lons = np.arange(min_lon, max_lon, 0.5) @@ -155,7 +157,9 @@ # Evaluation can take in multiple targets and metrics, so we need to convert # our examples into Python lists. Evaluation will iterate over the lists print("Making the Evaluation definition") -bias_evaluation = evaluation.Evaluation(cru31_dataset, [knmi_dataset, wrf311_dataset, ensemble_dataset], [bias]) +bias_evaluation = evaluation.Evaluation(cru31_dataset, + [knmi_dataset, wrf311_dataset, ensemble_dataset], + [bias]) print("Executing the Evaluation using the object's run() method") bias_evaluation.run() @@ -168,7 +172,7 @@ # Accessing the actual results when we have used 1 metric and 1 dataset is # done this way: print("Accessing the Results of the Evaluation run") -results = bias_evaluation.results[0][0] +results = bias_evaluation.results[0] # From the bias output I want to make a Contour Map of the region print("Generating a contour map using ocw.plotter.draw_contour_map()") @@ -176,10 +180,11 @@ lats = new_lats lons = new_lons fname = OUTPUT_PLOT -gridshape = (3, 12) # Using a 3 x 12 since we have a 1 year of monthly data for 3 models +gridshape = (3, 1) # Using a 3 x 1 since we have a 1 year of data for 3 models plot_title = "TASMAX Bias of CRU 3.1 vs. KNMI, WRF311 and ENSEMBLE (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) -sub_titles = ["Monthly Time Step"] - -plotter.draw_contour_map(results, lats, lons, fname, - gridshape=gridshape, ptitle=plot_title, - subtitles=sub_titles) +plotnames = ["KNMI", "WRF311", "ENSEMBLE"] +for i, result in enumerate(results): + output_file = "%s_%s" % (fname, plotnames[i]) + print "creating %s" % (output_file,) + plotter.draw_contour_map(result, lats, lons, output_file, + gridshape=gridshape, ptitle=plot_title) \ No newline at end of file From b857ed98b89bb03fac97168af6f062c97359c4f3 Mon Sep 17 00:00:00 2001 From: cgoodale Date: Mon, 20 Oct 2014 08:23:13 -0700 Subject: [PATCH 5/5] Create Model Ensemble to RCMED Example Script * Set the Time Range to a Fixed 1 Year Range (1989) * Updated the Docs explaining how the Evaluation.results are formed * Adjusted the plotting routine to set plot_title per output plot * Resolves CLIMATE-265 --- examples/model_ensemble_to_rcmed.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/examples/model_ensemble_to_rcmed.py b/examples/model_ensemble_to_rcmed.py index 24ecb40b..1f653a18 100644 --- a/examples/model_ensemble_to_rcmed.py +++ b/examples/model_ensemble_to_rcmed.py @@ -38,7 +38,7 @@ FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc" FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc" # Filename for the output image/plot (without file extension) -OUTPUT_PLOT = "model_ensemble_tasmax_africa_bias_monthly" +OUTPUT_PLOT = "tasmax_africa_bias_annual" # Download necessary NetCDF file if not present if path.exists(FILE_1): @@ -96,15 +96,11 @@ cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d") cru_end = datetime.datetime.strptime(cru_31['end_date'], "%Y-%m-%d") knmi_start, knmi_end = knmi_dataset.time_range() -# Grab the Max Start Time -start_time = max([cru_start, knmi_start]) -# Grab the Min End Time -end_time = min([cru_end, knmi_end]) -print("Overlap computed to be: %s to %s" % (start_time.strftime("%Y-%m-%d"), - end_time.strftime("%Y-%m-%d"))) -print("We are going to grab the first %s year(s) of data" % YEARS) -end_time = datetime.datetime(start_time.year + YEARS, start_time.month, start_time.day) -print("Final Overlap is: %s to %s" % (start_time.strftime("%Y-%m-%d"), +# Set the Time Range to be the year 1989 +start_time = datetime.datetime(1989,1,1) +end_time = datetime.datetime(1989,12,1) + +print("Time Range is: %s to %s" % (start_time.strftime("%Y-%m-%d"), end_time.strftime("%Y-%m-%d"))) print("Fetching data from RCMED...") @@ -168,11 +164,11 @@ # possible Evaluation scenarios. # # The Evaluation results docs say: -# The shape of results is (num_metrics, num_target_datasets) if no subregion -# Accessing the actual results when we have used 1 metric and 1 dataset is +# The shape of results is (num_target_datasets, num_metrics) if no subregion +# Accessing the actual results when we have used 3 datasets and 1 metric is # done this way: print("Accessing the Results of the Evaluation run") -results = bias_evaluation.results[0] +results = bias_evaluation.results # From the bias output I want to make a Contour Map of the region print("Generating a contour map using ocw.plotter.draw_contour_map()") @@ -181,10 +177,10 @@ lons = new_lons fname = OUTPUT_PLOT gridshape = (3, 1) # Using a 3 x 1 since we have a 1 year of data for 3 models -plot_title = "TASMAX Bias of CRU 3.1 vs. KNMI, WRF311 and ENSEMBLE (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) plotnames = ["KNMI", "WRF311", "ENSEMBLE"] for i, result in enumerate(results): - output_file = "%s_%s" % (fname, plotnames[i]) + plot_title = "TASMAX Bias of CRU 3.1 vs. %s (%s - %s)" % (plotnames[i], start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m")) + output_file = "%s_%s" % (fname, plotnames[i].lower()) print "creating %s" % (output_file,) - plotter.draw_contour_map(result, lats, lons, output_file, + plotter.draw_contour_map(result[0], lats, lons, output_file, gridshape=gridshape, ptitle=plot_title) \ No newline at end of file